diff --git a/CMakeLists.txt b/CMakeLists.txt index 4f0f8be5ac..08e9a08aa0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,6 +15,7 @@ option(BUILD_SERVER "Build dedicated server" ON) option(BUILD_CLIENT "Build client" ON) option(BUILD_RENDERER_GL1 "Build GL1 renderer" ON) option(BUILD_RENDERER_GL2 "Build GL2 renderer" ON) +option(BUILD_RENDERER_VULKAN "Build Vulkan renderer" OFF) option(BUILD_GAME_LIBRARIES "Build game module libraries" ON) option(BUILD_GAME_QVMS "Build game module qvms" ON) option(BUILD_STANDALONE "Build binaries for standalone games" OFF) @@ -36,6 +37,7 @@ option(USE_INTERNAL_JPEG "Use internal copy of libjpeg" ${USE_INTERNAL_LIBS}) option(USE_INTERNAL_OGG "Use internal copy of ogg" ${USE_INTERNAL_LIBS}) option(USE_INTERNAL_VORBIS "Use internal copy of vorbis" ${USE_INTERNAL_LIBS}) option(USE_INTERNAL_OPUS "Use internal copy of opus" ${USE_INTERNAL_LIBS}) +option(USE_INTERNAL_VULKAN_HEADERS "Use internal copy of Vulkan headers" ${USE_INTERNAL_LIBS}) # Release build by default, set externally if you want something else if(NOT CMAKE_CONFIGURATION_TYPES AND NOT CMAKE_BUILD_TYPE) @@ -99,6 +101,7 @@ include(libraries/all) include(server) include(renderer_gl1) include(renderer_gl2) +include(renderer_vulkan) include(client) include(basegame) include(missionpack) diff --git a/README.md b/README.md index 47a58f5d0d..5a5c451af2 100644 --- a/README.md +++ b/README.md @@ -134,6 +134,13 @@ For macOS, 7. Copy the resulting `ioquake3.app` in `/build/` to your `/Applications/ioquake3` folder. + To build with the Vulkan renderer, add `-DBUILD_RENDERER_VULKAN=ON` to the + cmake command in step 5. The Vulkan renderer requires MoltenVK at runtime: + + `brew install molten-vk` + + See `docs/vulkan-readme.md` for more details on macOS Vulkan setup. + For Emscripten, 1. Follow the installation instructions for the Emscripten SDK including setting up the environment with emsdk_env. https://emscripten.org/ @@ -163,6 +170,7 @@ The following CMake variables may be set, using `-D` on the command line. BUILD_CLIENT - build the 'ioquake3' client binary BUILD_RENDERER_OPENGL1 - build the opengl1 client / renderer library BUILD_RENDERER_OPENGL2 - build the opengl2 client / renderer library + BUILD_RENDERER_VULKAN - build the vulkan client / renderer library (default OFF) BUILD_GAME_LIBRARIES - build the game shared libraries BUILD_GAME_QVMS - build the game qvms BUILD_STANDALONE - build binaries suited for stand-alone games @@ -186,6 +194,8 @@ The following CMake variables may be set, using `-D` on the command line. USE_INTERNAL_OGG - build and link against internal ogg library USE_INTERNAL_VORBIS - build and link against internal Vorbis library USE_INTERNAL_OPUS - build and link against internal opus/opusfile libraries + USE_INTERNAL_VULKAN_HEADERS - use bundled Vulkan headers instead of system-provided + ones; only relevant when BUILD_RENDERER_VULKAN is ON EMSCRIPTEN_PRELOAD_FILE - set to 1 to package 'baseq3' (BASEGAME) directory containing pk3s and loose files as a single diff --git a/cmake/client.cmake b/cmake/client.cmake index 41dc93602e..fc67f26bf7 100644 --- a/cmake/client.cmake +++ b/cmake/client.cmake @@ -90,7 +90,8 @@ if(NOT USE_RENDERER_DLOPEN) target_sources(${CLIENT_BINARY} PRIVATE # These are never simultaneously populated ${RENDERER_GL1_BINARY_SOURCES} - ${RENDERER_GL2_BINARY_SOURCES}) + ${RENDERER_GL2_BINARY_SOURCES} + ${RENDERER_VULKAN_BINARY_SOURCES}) target_include_directories( ${CLIENT_BINARY} PRIVATE ${RENDERER_INCLUDE_DIRS}) target_compile_definitions( ${CLIENT_BINARY} PRIVATE ${RENDERER_DEFINITIONS}) diff --git a/cmake/libraries/freetype.cmake b/cmake/libraries/freetype.cmake index b7d5c98ccc..e6d95fb230 100644 --- a/cmake/libraries/freetype.cmake +++ b/cmake/libraries/freetype.cmake @@ -2,7 +2,7 @@ if(NOT USE_FREETYPE) return() endif() -if(NOT BUILD_RENDERER_GL1 AND NOT BUILD_RENDERER_GL2) +if(NOT BUILD_RENDERER_GL1 AND NOT BUILD_RENDERER_GL2 AND NOT BUILD_RENDERER_VULKAN) return() endif() diff --git a/cmake/libraries/jpeg.cmake b/cmake/libraries/jpeg.cmake index c3c7256e72..8ebae79267 100644 --- a/cmake/libraries/jpeg.cmake +++ b/cmake/libraries/jpeg.cmake @@ -1,4 +1,4 @@ -if(NOT BUILD_RENDERER_GL1 AND NOT BUILD_RENDERER_GL2) +if(NOT BUILD_RENDERER_GL1 AND NOT BUILD_RENDERER_GL2 AND NOT BUILD_RENDERER_VULKAN) return() endif() diff --git a/cmake/platforms/macos.cmake b/cmake/platforms/macos.cmake index ce681ae0ae..423e27537b 100644 --- a/cmake/platforms/macos.cmake +++ b/cmake/platforms/macos.cmake @@ -74,6 +74,11 @@ function(finish_macos_app) set_output_dirs(${RENDERER_GL2_BINARY} SUBDIRECTORY ${MACOS_APP_BINARY_DIR}) add_dependencies(${CLIENT_BINARY} ${RENDERER_GL2_BINARY}) endif() + + if(BUILD_RENDERER_VULKAN) + set_output_dirs(${RENDERER_VULKAN_BINARY} SUBDIRECTORY ${MACOS_APP_BINARY_DIR}) + add_dependencies(${CLIENT_BINARY} ${RENDERER_VULKAN_BINARY}) + endif() endif() endfunction() diff --git a/cmake/renderer_common.cmake b/cmake/renderer_common.cmake index 277bf46ad9..cc7f69d574 100644 --- a/cmake/renderer_common.cmake +++ b/cmake/renderer_common.cmake @@ -29,10 +29,23 @@ endif() if(USE_RENDERER_DLOPEN) list(APPEND RENDERER_DEFINITIONS USE_RENDERER_DLOPEN) -elseif(BUILD_RENDERER_GL1 AND BUILD_RENDERER_GL2) - message(FATAL_ERROR "Multiple static renderers enabled; choose one") -elseif(NOT BUILD_RENDERER_GL1 AND NOT BUILD_RENDERER_GL2) - message(FATAL_ERROR "Zero static renderers enabled; choose one") +else() + # Count how many static renderers are enabled + set(_STATIC_RENDERER_COUNT 0) + if(BUILD_RENDERER_GL1) + math(EXPR _STATIC_RENDERER_COUNT "${_STATIC_RENDERER_COUNT} + 1") + endif() + if(BUILD_RENDERER_GL2) + math(EXPR _STATIC_RENDERER_COUNT "${_STATIC_RENDERER_COUNT} + 1") + endif() + if(BUILD_RENDERER_VULKAN) + math(EXPR _STATIC_RENDERER_COUNT "${_STATIC_RENDERER_COUNT} + 1") + endif() + if(_STATIC_RENDERER_COUNT GREATER 1) + message(FATAL_ERROR "Multiple static renderers enabled; choose one") + elseif(_STATIC_RENDERER_COUNT EQUAL 0) + message(FATAL_ERROR "Zero static renderers enabled; choose one") + endif() endif() list(APPEND RENDERER_LIBRARIES ${COMMON_LIBRARIES}) diff --git a/cmake/renderer_vulkan.cmake b/cmake/renderer_vulkan.cmake new file mode 100644 index 0000000000..26d71d7dd0 --- /dev/null +++ b/cmake/renderer_vulkan.cmake @@ -0,0 +1,137 @@ +if(NOT BUILD_CLIENT OR NOT BUILD_RENDERER_VULKAN) + return() +endif() + +include(utils/set_output_dirs) +include(renderer_common) + +set(RENDERER_VULKAN_SOURCES + ${SOURCE_DIR}/renderer_vulkan/glConfig.c + ${SOURCE_DIR}/renderer_vulkan/matrix_multiplication.c + ${SOURCE_DIR}/renderer_vulkan/RB_DrawNormals.c + ${SOURCE_DIR}/renderer_vulkan/RB_DrawTris.c + ${SOURCE_DIR}/renderer_vulkan/RB_ShowImages.c + ${SOURCE_DIR}/renderer_vulkan/RB_SurfaceAnim.c + ${SOURCE_DIR}/renderer_vulkan/R_DebugGraphics.c + ${SOURCE_DIR}/renderer_vulkan/RE_RegisterModel.c + ${SOURCE_DIR}/renderer_vulkan/R_FindShader.c + ${SOURCE_DIR}/renderer_vulkan/R_ImageBMP.c + ${SOURCE_DIR}/renderer_vulkan/R_ImageJPG.c + ${SOURCE_DIR}/renderer_vulkan/R_ImagePCX.c + ${SOURCE_DIR}/renderer_vulkan/R_ImagePNG.c + ${SOURCE_DIR}/renderer_vulkan/R_ImageProcess.c + ${SOURCE_DIR}/renderer_vulkan/R_ImageTGA.c + ${SOURCE_DIR}/renderer_vulkan/R_LerpTag.c + ${SOURCE_DIR}/renderer_vulkan/R_ListShader.c + ${SOURCE_DIR}/renderer_vulkan/R_LoadImage.c + ${SOURCE_DIR}/renderer_vulkan/R_LoadImage2.c + ${SOURCE_DIR}/renderer_vulkan/R_LoadMD3.c + ${SOURCE_DIR}/renderer_vulkan/R_LoadMDR.c + ${SOURCE_DIR}/renderer_vulkan/R_ModelBounds.c + ${SOURCE_DIR}/renderer_vulkan/R_Parser.c + ${SOURCE_DIR}/renderer_vulkan/R_PortalPlane.c + ${SOURCE_DIR}/renderer_vulkan/R_PrintMat.c + ${SOURCE_DIR}/renderer_vulkan/R_StretchRaw.c + ${SOURCE_DIR}/renderer_vulkan/ref_import.c + ${SOURCE_DIR}/renderer_vulkan/render_export.c + ${SOURCE_DIR}/renderer_vulkan/tr_animation.c + ${SOURCE_DIR}/renderer_vulkan/tr_backend.c + ${SOURCE_DIR}/renderer_vulkan/tr_bsp.c + ${SOURCE_DIR}/renderer_vulkan/tr_cmds.c + ${SOURCE_DIR}/renderer_vulkan/tr_Cull.c + ${SOURCE_DIR}/renderer_vulkan/tr_curve.c + ${SOURCE_DIR}/renderer_vulkan/tr_cvar.c + ${SOURCE_DIR}/renderer_vulkan/tr_flares.c + ${SOURCE_DIR}/renderer_vulkan/tr_fog.c + ${SOURCE_DIR}/renderer_vulkan/tr_fonts.c + ${SOURCE_DIR}/renderer_vulkan/tr_globals.c + ${SOURCE_DIR}/renderer_vulkan/tr_image.c + ${SOURCE_DIR}/renderer_vulkan/tr_init.c + ${SOURCE_DIR}/renderer_vulkan/tr_light.c + ${SOURCE_DIR}/renderer_vulkan/tr_main.c + ${SOURCE_DIR}/renderer_vulkan/tr_marks.c + ${SOURCE_DIR}/renderer_vulkan/tr_mesh.c + ${SOURCE_DIR}/renderer_vulkan/tr_model.c + ${SOURCE_DIR}/renderer_vulkan/tr_model_iqm.c + ${SOURCE_DIR}/renderer_vulkan/tr_noise.c + ${SOURCE_DIR}/renderer_vulkan/tr_scene.c + ${SOURCE_DIR}/renderer_vulkan/tr_shade.c + ${SOURCE_DIR}/renderer_vulkan/tr_shade_calc.c + ${SOURCE_DIR}/renderer_vulkan/tr_shader.c + ${SOURCE_DIR}/renderer_vulkan/tr_shadows.c + ${SOURCE_DIR}/renderer_vulkan/tr_sky.c + ${SOURCE_DIR}/renderer_vulkan/tr_surface.c + ${SOURCE_DIR}/renderer_vulkan/tr_world.c + ${SOURCE_DIR}/renderer_vulkan/vk_cmd.c + ${SOURCE_DIR}/renderer_vulkan/vk_create_window_SDL.c + ${SOURCE_DIR}/renderer_vulkan/vk_depth_attachment.c + ${SOURCE_DIR}/renderer_vulkan/vk_frame.c + ${SOURCE_DIR}/renderer_vulkan/vk_image.c + ${SOURCE_DIR}/renderer_vulkan/vk_image_sampler2.c + ${SOURCE_DIR}/renderer_vulkan/vk_init.c + ${SOURCE_DIR}/renderer_vulkan/vk_instance.c + ${SOURCE_DIR}/renderer_vulkan/vk_pipelines.c + ${SOURCE_DIR}/renderer_vulkan/vk_screenshot.c + ${SOURCE_DIR}/renderer_vulkan/vk_shade_geometry.c + ${SOURCE_DIR}/renderer_vulkan/vk_shaders.c + ${SOURCE_DIR}/renderer_vulkan/vk_swapchain.c +) + +# Pre-compiled SPIR-V shaders embedded as C byte arrays +set(RENDERER_VULKAN_SHADER_SOURCES + ${SOURCE_DIR}/renderer_vulkan/shaders/Compiled/multi_texture_clipping_plane_vert.c + ${SOURCE_DIR}/renderer_vulkan/shaders/Compiled/multi_texture_frag.c + ${SOURCE_DIR}/renderer_vulkan/shaders/Compiled/multi_texture_vert.c + ${SOURCE_DIR}/renderer_vulkan/shaders/Compiled/single_texture_clipping_plane_vert.c + ${SOURCE_DIR}/renderer_vulkan/shaders/Compiled/single_texture_frag.c + ${SOURCE_DIR}/renderer_vulkan/shaders/Compiled/single_texture_vert.c +) + +set(RENDERER_VULKAN_BASENAME renderer_vulkan) +set(RENDERER_VULKAN_BINARY ${RENDERER_VULKAN_BASENAME}) + +# The Vulkan renderer is self-contained: it has its own image loaders, font +# rendering, noise functions, and Com_Printf/Com_Error wrappers (in ref_import.c) +# rather than using renderercommon. It only needs puff.c for PNG decompression. +list(APPEND RENDERER_VULKAN_BINARY_SOURCES + ${RENDERER_VULKAN_SOURCES} + ${RENDERER_VULKAN_SHADER_SOURCES} + ${SOURCE_DIR}/renderercommon/puff.c + ${RENDERER_LIBRARY_SOURCES}) + +# Vulkan headers: use bundled copy or system-provided headers. +# The renderer loads Vulkan dynamically via SDL, so we only need the headers +# at compile time -- no link-time dependency on libvulkan. +if(USE_INTERNAL_VULKAN_HEADERS) + set(VULKAN_INCLUDE_DIRS ${SOURCE_DIR}/renderer_vulkan) +else() + find_path(VULKAN_INCLUDE_DIR vulkan/vulkan.h) + if(NOT VULKAN_INCLUDE_DIR) + message(FATAL_ERROR "System Vulkan headers not found. " + "Install vulkan-headers (e.g. 'brew install vulkan-headers' on macOS, " + "'sudo apt install libvulkan-dev' on Debian/Ubuntu, " + "'sudo dnf install vulkan-headers' on Fedora) " + "or set USE_INTERNAL_VULKAN_HEADERS=ON to use the bundled copy.") + endif() + set(VULKAN_INCLUDE_DIRS ${VULKAN_INCLUDE_DIR}) +endif() + +if(USE_RENDERER_DLOPEN) + # The Vulkan renderer's ref_import.c already provides Com_Printf and + # Com_Error, so we cannot include tr_subs.c from DYNAMIC_RENDERER_SOURCES. + # Include only q_shared.c and q_math.c directly. + list(APPEND RENDERER_VULKAN_BINARY_SOURCES + ${SOURCE_DIR}/qcommon/q_shared.c + ${SOURCE_DIR}/qcommon/q_math.c) + + add_library(${RENDERER_VULKAN_BINARY} SHARED ${RENDERER_VULKAN_BINARY_SOURCES}) + + target_link_libraries( ${RENDERER_VULKAN_BINARY} PRIVATE ${RENDERER_LIBRARIES}) + target_include_directories( ${RENDERER_VULKAN_BINARY} PRIVATE ${RENDERER_INCLUDE_DIRS} + ${VULKAN_INCLUDE_DIRS}) + target_compile_definitions( ${RENDERER_VULKAN_BINARY} PRIVATE ${RENDERER_DEFINITIONS}) + target_compile_options( ${RENDERER_VULKAN_BINARY} PRIVATE ${RENDERER_COMPILE_OPTIONS}) + target_link_options( ${RENDERER_VULKAN_BINARY} PRIVATE ${RENDERER_LINK_OPTIONS}) + + set_output_dirs(${RENDERER_VULKAN_BINARY}) +endif() diff --git a/code/renderer_vulkan/RB_DrawNormals.c b/code/renderer_vulkan/RB_DrawNormals.c new file mode 100644 index 0000000000..2557366774 --- /dev/null +++ b/code/renderer_vulkan/RB_DrawNormals.c @@ -0,0 +1,43 @@ +#include "tr_backend.h" +#include "vk_shade_geometry.h" +#include "tr_globals.h" +#include "vk_pipelines.h" + +/* +================ +Draws vertex normals for debugging +================ +*/ +void RB_DrawNormals (shaderCommands_t* pTess, int numVertexes ) +{ + // VULKAN + // int numVertexes = tess.numVertexes; + vec4_t xyz[SHADER_MAX_VERTEXES]; + memcpy(xyz, pTess->xyz, numVertexes * sizeof(vec4_t)); + + memset(pTess->svars.colors, tr.identityLightByte, SHADER_MAX_VERTEXES * sizeof(color4ub_t)); + + int i = 0; + while (i < numVertexes) + { + int count = numVertexes - i; + if (count >= SHADER_MAX_VERTEXES/2 - 1) + count = SHADER_MAX_VERTEXES/2 - 1; + + int k; + for (k = 0; k < count; k++) + { + VectorCopy(xyz[i + k], pTess->xyz[2*k]); + VectorMA(xyz[i + k], 2, pTess->normal[i + k], pTess->xyz[2*k + 1]); + } + pTess->numVertexes = 2 * count; + pTess->numIndexes = 0; + + vk_UploadXYZI(pTess->xyz, pTess->numVertexes, NULL, 0); + + updateMVP(backEnd.viewParms.isPortal, backEnd.projection2D, getptr_modelview_matrix()); + vk_shade_geometry(g_stdPipelines.normals_debug_pipeline, VK_FALSE, DEPTH_RANGE_ZERO, VK_FALSE); + + i += count; + } +} diff --git a/code/renderer_vulkan/RB_DrawNormals.h b/code/renderer_vulkan/RB_DrawNormals.h new file mode 100644 index 0000000000..e0e646da65 --- /dev/null +++ b/code/renderer_vulkan/RB_DrawNormals.h @@ -0,0 +1,8 @@ +#ifndef RB_SHOW_NORMALS_H_ +#define RB_SHOW_NORMALS_H_ + +struct shaderCommands_s; + +void RB_DrawNormals (struct shaderCommands_s* input , int numVertexes ); + +#endif diff --git a/code/renderer_vulkan/RB_DrawTris.c b/code/renderer_vulkan/RB_DrawTris.c new file mode 100644 index 0000000000..750664c7e5 --- /dev/null +++ b/code/renderer_vulkan/RB_DrawTris.c @@ -0,0 +1,30 @@ +#include "RB_DrawTris.h" +#include "tr_globals.h" +#include "vk_shade_geometry.h" +#include "vk_pipelines.h" +#include "tr_backend.h" +/* +================ +Draws triangle outlines for debugging +================ +*/ +void RB_DrawTris (shaderCommands_t * pInput) +{ + if (vk.features.fillModeNonSolid == VK_FALSE) { + static qboolean printed = qfalse; + if (!printed) { + ri.Printf(PRINT_WARNING, "RB_ShowTris: fillModeNonSolid not supported.\n"); + printed = qtrue; + } + return; + } + + + updateCurDescriptor( tr.whiteImage->descriptor_set, 0); + + // VULKAN + + memset(pInput->svars.colors, 255, pInput->numVertexes * 4 ); + VkPipeline pipeline = backEnd.viewParms.isMirror ? g_stdPipelines.tris_mirror_debug_pipeline : g_stdPipelines.tris_debug_pipeline; + vk_shade_geometry(pipeline, VK_FALSE, DEPTH_RANGE_ZERO, VK_TRUE); +} diff --git a/code/renderer_vulkan/RB_DrawTris.h b/code/renderer_vulkan/RB_DrawTris.h new file mode 100644 index 0000000000..7b2bcece49 --- /dev/null +++ b/code/renderer_vulkan/RB_DrawTris.h @@ -0,0 +1,7 @@ +#ifndef RB_DRAWTRIS_H_ +#define RB_DRAWTRIS_H_ +struct shaderCommands_s; + +void RB_DrawTris (struct shaderCommands_s *input); + +#endif diff --git a/code/renderer_vulkan/RB_ShowImages.c b/code/renderer_vulkan/RB_ShowImages.c new file mode 100644 index 0000000000..93744743f4 --- /dev/null +++ b/code/renderer_vulkan/RB_ShowImages.c @@ -0,0 +1,85 @@ +#include "tr_backend.h" +#include "vk_shade_geometry.h" +#include "vk_pipelines.h" +#include "glConfig.h" + +/* +=============== +Draw all the images to the screen, on top of whatever was there. +This is used to test for texture thrashing. + +Also called by RE_EndRegistration +=============== +*/ +// TODO: move glConfig retated stuff to glConfig.c, + + +void RB_ShowImages(image_t ** const pImg, unsigned int N) +{ + + backEnd.projection2D = qtrue; + + const float black[4] = {0, 0, 0, 1}; + vk_clearColorAttachments(black); + + int width; + int height; + + R_GetWinResolution(&width, &height); + + const float w = width / 20; + const float h = height / 15; + + + tess.numIndexes = 6; + tess.numVertexes = 4; + + uint32_t i; + for (i = 0 ; i < N; ++i) + { + //image_t* image = tr.images[i]; + float x = i % 20 * w; + float y = i / 20 * h; + + tess.indexes[0] = 0; + tess.indexes[1] = 1; + tess.indexes[2] = 2; + tess.indexes[3] = 0; + tess.indexes[4] = 2; + tess.indexes[5] = 3; + + tess.xyz[0][0] = x; + tess.xyz[0][1] = y; + + tess.xyz[1][0] = x + w; + tess.xyz[1][1] = y; + + tess.xyz[2][0] = x + w; + tess.xyz[2][1] = y + h; + + tess.xyz[3][0] = x; + tess.xyz[3][1] = y + h; + + tess.svars.texcoords[0][0][0] = 0; + tess.svars.texcoords[0][0][1] = 0; + tess.svars.texcoords[0][1][0] = 1; + tess.svars.texcoords[0][1][1] = 0; + tess.svars.texcoords[0][2][0] = 1; + tess.svars.texcoords[0][2][1] = 1; + tess.svars.texcoords[0][3][0] = 0; + tess.svars.texcoords[0][3][1] = 1; + + memset( tess.svars.colors, 255, tess.numVertexes * 4 ); + + updateCurDescriptor( pImg[i]->descriptor_set, 0); + + vk_UploadXYZI(tess.xyz, 4, tess.indexes, 6); + + // updateMVP(backEnd.viewParms.isPortal, backEnd.projection2D, getptr_modelview_matrix()); + updateMVP( 0 , 1, getptr_modelview_matrix()); + + vk_shade_geometry(g_stdPipelines.images_debug_pipeline, VK_FALSE, DEPTH_RANGE_NORMAL, VK_TRUE); + } + tess.numIndexes = 0; + tess.numVertexes = 0; +} diff --git a/code/renderer_vulkan/RB_ShowImages.h b/code/renderer_vulkan/RB_ShowImages.h new file mode 100644 index 0000000000..a6a2230988 --- /dev/null +++ b/code/renderer_vulkan/RB_ShowImages.h @@ -0,0 +1,6 @@ +#ifndef RB_SHOW_IMAGES_H_ +#define RB_SHOW_IMAGES_H_ + +void RB_ShowImages(image_t ** const pImg, unsigned int N); + +#endif diff --git a/code/renderer_vulkan/RB_SurfaceAnim.c b/code/renderer_vulkan/RB_SurfaceAnim.c new file mode 100644 index 0000000000..e066640e7a --- /dev/null +++ b/code/renderer_vulkan/RB_SurfaceAnim.c @@ -0,0 +1,137 @@ +////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////// + +#include "tr_globals.h" +#include "RB_SurfaceAnim.h" +#include "tr_backend.h" + +void RB_MDRSurfaceAnim( mdrSurface_t *surface ) +{ + int j, k; + float backlerp; + + mdrBone_t bones[MDR_MAX_BONES], *bonePtr; + + + // don't lerp if lerping off, or this is the only frame, or the last frame... + if (backEnd.currentEntity->e.oldframe == backEnd.currentEntity->e.frame) + { + backlerp = 0; // if backlerp is 0, lerping is off and frontlerp is never used + } + else + { + backlerp = backEnd.currentEntity->e.backlerp; + } + + mdrHeader_t* header = (mdrHeader_t *)((unsigned char *)surface + surface->ofsHeader); + + int frameSize = (size_t)( &((mdrFrame_t *)0)->bones[ header->numBones ] ); + + mdrFrame_t* frame = (mdrFrame_t *)((byte *)header + header->ofsFrames + backEnd.currentEntity->e.frame * frameSize ); + + mdrFrame_t* oldFrame = (mdrFrame_t *)((byte *)header + header->ofsFrames + backEnd.currentEntity->e.oldframe * frameSize ); + + RB_CHECKOVERFLOW( surface->numVerts, surface->numTriangles * 3 ); + + int* triangles = (int *) ((byte *)surface + surface->ofsTriangles); + int indexes = surface->numTriangles * 3; + int baseIndex = tess.numIndexes; + int baseVertex = tess.numVertexes; + + // Set up all triangles. + for (j = 0 ; j < indexes ; j++) + { + tess.indexes[baseIndex + j] = baseVertex + triangles[j]; + } + tess.numIndexes += indexes; + + // + // lerp all the needed bones + // + if ( !backlerp ) + { + // no lerping needed + bonePtr = frame->bones; + } + else + { + bonePtr = bones; + int nBones = header->numBones; + int n; + float tmp; + for ( n = 0 ; n < nBones; n++ ) + { + tmp = frame->bones[n].matrix[0][0]; + bones[n].matrix[0][0] = tmp + backlerp * (oldFrame->bones[n].matrix[0][0] - tmp); + tmp = frame->bones[n].matrix[0][1]; + bones[n].matrix[0][1] = tmp + backlerp * (oldFrame->bones[n].matrix[0][1] - tmp); + tmp = frame->bones[n].matrix[0][2]; + bones[n].matrix[0][2] = tmp + backlerp * (oldFrame->bones[n].matrix[0][2] - tmp); + tmp = frame->bones[n].matrix[0][3]; + bones[n].matrix[0][3] = tmp + backlerp * (oldFrame->bones[n].matrix[0][3] - tmp); + + tmp = frame->bones[n].matrix[1][0]; + bones[n].matrix[1][0] = tmp + backlerp * (oldFrame->bones[n].matrix[1][0] - tmp); + tmp = frame->bones[n].matrix[1][1]; + bones[n].matrix[1][1] = tmp + backlerp * (oldFrame->bones[n].matrix[1][1] - tmp); + tmp = frame->bones[n].matrix[1][2]; + bones[n].matrix[1][2] = tmp + backlerp * (oldFrame->bones[n].matrix[1][2] - tmp); + tmp = frame->bones[n].matrix[1][3]; + bones[n].matrix[1][3] = tmp + backlerp * (oldFrame->bones[n].matrix[1][3] - tmp); + + tmp = frame->bones[n].matrix[2][0]; + bones[n].matrix[2][0] = tmp + backlerp * (oldFrame->bones[n].matrix[2][0] - tmp); + tmp = frame->bones[n].matrix[2][1]; + bones[n].matrix[2][1] = tmp + backlerp * (oldFrame->bones[n].matrix[2][1] - tmp); + tmp = frame->bones[n].matrix[2][2]; + bones[n].matrix[2][2] = tmp + backlerp * (oldFrame->bones[n].matrix[2][2] - tmp); + tmp = frame->bones[n].matrix[2][3]; + bones[n].matrix[2][3] = tmp + backlerp * (oldFrame->bones[n].matrix[2][3] - tmp); + } + } + + // + // deform the vertexes by the lerped bones + // + mdrVertex_t* v = (mdrVertex_t *) ((unsigned char *)surface + surface->ofsVerts); + int numVerts = surface->numVerts; + for( j = 0; j < numVerts; j++ ) + { + float tempVert[3] = {0, 0, 0}; + float tempNormal[3] = {0, 0, 0}; + + mdrWeight_t *w = v->weights; + + for ( k = 0 ; k < v->numWeights ; k++, w++ ) + { + mdrBone_t* bone = bonePtr + w->boneIndex; + + tempVert[0] += w->boneWeight * ( DotProduct( bone->matrix[0], w->offset ) + bone->matrix[0][3] ); + tempVert[1] += w->boneWeight * ( DotProduct( bone->matrix[1], w->offset ) + bone->matrix[1][3] ); + tempVert[2] += w->boneWeight * ( DotProduct( bone->matrix[2], w->offset ) + bone->matrix[2][3] ); + + tempNormal[0] += w->boneWeight * DotProduct( bone->matrix[0], v->normal ); + tempNormal[1] += w->boneWeight * DotProduct( bone->matrix[1], v->normal ); + tempNormal[2] += w->boneWeight * DotProduct( bone->matrix[2], v->normal ); + } + + tess.xyz[baseVertex + j][0] = tempVert[0]; + tess.xyz[baseVertex + j][1] = tempVert[1]; + tess.xyz[baseVertex + j][2] = tempVert[2]; + + tess.normal[baseVertex + j][0] = tempNormal[0]; + tess.normal[baseVertex + j][1] = tempNormal[1]; + tess.normal[baseVertex + j][2] = tempNormal[2]; + + tess.texCoords[baseVertex + j][0][0] = v->texCoords[0]; + tess.texCoords[baseVertex + j][0][1] = v->texCoords[1]; + + //supress GGC strict-alisaing warnning + { + mdrWeight_t* pTmp = &v->weights[v->numWeights]; + v = (mdrVertex_t *) pTmp; + } + } + + tess.numVertexes += surface->numVerts; +} diff --git a/code/renderer_vulkan/RB_SurfaceAnim.h b/code/renderer_vulkan/RB_SurfaceAnim.h new file mode 100644 index 0000000000..13799f2b83 --- /dev/null +++ b/code/renderer_vulkan/RB_SurfaceAnim.h @@ -0,0 +1,5 @@ +#ifndef RB_SURFACEANIM_H +#define RB_SURFACEANIM_H +#include "tr_model.h" +void RB_MDRSurfaceAnim( mdrSurface_t *surface ); +#endif diff --git a/code/renderer_vulkan/README.md b/code/renderer_vulkan/README.md new file mode 100644 index 0000000000..4255abdee4 --- /dev/null +++ b/code/renderer_vulkan/README.md @@ -0,0 +1,148 @@ +# vulkan backend info +* codes in this dir is "borrow" from https://github.com/kennyalive/Quake-III-Arena-Kenny-Edition, I convert cpp to c so that it can compile. + +* vulkan forder is copied form vulkan sdk, not all items in it is used, it need clean. + +* I am a naive programmer, need help, doc and instructions. + + +# Rendering + +## General setup. + +Single command buffer that records all the commands. Single render pass which specifies color and depth-stencil attachment. +Stencil buffer is used to render Q3's stencil shadows (cg\_shadows=2). + +## Geometry. +Quake 3 renderer prepares geometry data for each draw call in tess.xyz and tess.indexes arrays. +OpenGL backend calls qglDrawElements to feed this geometry to the GPU. +Vulkan backend appends this data to geometry buffers that are bound to host visible memory chunk. +At the end of the frame when command buffer is submitted to the queue the geometry buffers contain all the geometry data to render the frame. +Typically up to 500Kb of vertex data is copied to the vertex buffer and up to 100Kb of index data is copied to the index buffer per frame. + +## Descriptor sets. +For each image used by the renderer separate descriptor set is created. +Each descriptor set contains single descriptor (combined image sampler). +For each draw call either one or two (if lightmap is available) descriptor sets are bound. +Descriptor sets are updated only once during initialization. +There are no descriptor set updates during frame. + +## Per-primitive uniform data. +Vulkan guarantees that minimum size of push constants range is at least 128 bytes. +To render ordinary view we use 64 bytes to specify mvp transform. +For portaled/mirrored views additional 64 byte are used to specify eye transform and clipping plane. + +Pipeline layout. 2 sets + 128 bytes push constant range. + +## Pipelines. +Standard pipelines are created when renderer starts. +They are used for skybox rendering, fog/dynamic light effects, shadow volumes and various debug features. +Map specific pipelines are created as part of parsing Q3 shaders and are created during map load. +For each Q3 shader we create three pipelines: one pipeline to render regular view and two additional pipelines for portal and mirror views. + +## Shaders. +Emulate corresponding fixed-function functionality. +Vertex shaders are boring with the only thing to mention that for portaled/mirrored views +we additionally compute distance to the clipping plane. +Fragment shaders do one or two texture lookups and modulate the results by the color. + +## Draw calls. +vkCmdDrawIndexed is used to draw geometry in most cases. Additionally there are few debug features that use vkCmdDraw to convey unindexed vertexes. + +## Code +vk.h provides interface that brings Vulkan support to Q3 renderer. +The interface is quite concise and consists of a dozen of functions that can be divided into 3 categories: +initialization functions, resource management functions and rendering setup functions. + +### Initialization: + +* vk\_initialize : initialize Vulkan backend +* vk\_shutdown : shutdown Vulkan backend + +### Resource management: + +* images: vk\_create\_image/vk\_upload\_image\_data + +* descriptor sets: vk\_update\_descriptor\_set + +* samplers: vk\_find\_sampler + +* pipelines: vk\_find\_pipeline + +### Rendering setup: + +* vk\_clear\_attachments : clears framebuffer¡¯s attachments. + +* vk\_bind\_geometry : is called when we start drawing new geometry. + +* vk\_shade\_geometry : is called to shade geometry specified with vk\_bind\_geometry. Can be called multiple times for Q3's multi-stage shaders. + +* vk\_begin\_frame/vk\_end\_frame : frame setup. + +* vk\_read\_pixels : takes a screenshot. + + +### about turn the intensity/gamma of the drawing wondow + +* use r\_gamma in the pulldown window, which nonlinearly correct the image before the uploading to GPU. +`\r_gamma 1.5` then `vid_restart` + +* you can also use r\_intensity which turn the intensity linearly. +``` +# 1.5 ~ 2.0 give acceptable result +$ \r_intensity 1.8 +$ \vid_restart +``` + +* but why, because original gamma setting program turn the light by setting entire destop window. +which works on newer computer on the market +but not works on some machine. it is buggy and embarrasing when program abnormal quit or stall. + +### new cmd + +* pipelineList: list the pipeline we have created; +* gpuMem: image memmory allocated on GPU; +* printOR: print the value of backend.or; +* displayResoList: list of the display resolution you monitor supported +For example: +``` +$ \displayResoList + +Mode 0: 320x240 +Mode 1: 400x300 +Mode 2: 512x384 +Mode 3: 640x480 (480p) +Mode 4: 800x600 +Mode 5: 960x720 +Mode 6: 1024x768 +Mode 7: 1152x864 +Mode 8: 1280x1024 +Mode 9: 1600x1200 +Mode 10: 2048x1536 +Mode 11: 856x480 +Mode 12: 1280x720 (720p) +Mode 13: 1280x768 +Mode 14: 1280x800 +Mode 15: 1280x960 +Mode 16: 1360x768 +Mode 17: 1366x768 +Mode 18: 1360x1024 +Mode 19: 1400x1050 +Mode 20: 1400x900 +Mode 21: 1600x900 +Mode 22: 1680x1050 +Mode 23: 1920x1080 (1080p) +Mode 24: 1920x1200 +Mode 25: 1920x1440 +Mode 26: 2560x1080 +Mode 27: 2560x1600 +Mode 28: 3840x2160 (4K) + +$ \r_mode 12 +$ \vid_restart +``` + + + +### TODO: +* get cpu, gpu, memory usage diff --git a/code/renderer_vulkan/RE_RegisterModel.c b/code/renderer_vulkan/RE_RegisterModel.c new file mode 100644 index 0000000000..6f69faaaed --- /dev/null +++ b/code/renderer_vulkan/RE_RegisterModel.c @@ -0,0 +1,142 @@ +#include "tr_local.h" +#include "tr_model.h" +#include "tr_globals.h" +#include "ref_import.h" + + +typedef struct +{ + char *ext; + qhandle_t (* ModelLoader)( const char * , model_t * ); +} modelExtToLoaderMap_t; + +// Note that the ordering indicates the order of preference used +// when there are multiple models of different formats available +static const modelExtToLoaderMap_t modelLoaders[ ] = +{ + { "md3", R_RegisterMD3 }, + { "mdr", R_RegisterMDR }, + { "iqm", R_RegisterIQM } +}; + +static const uint32_t numModelLoaders = ARRAY_LEN(modelLoaders); + + +/* +==================== +Loads in a model for the given name + +Zero will be returned if the model fails to load. +An entry will be retained for failed models as an +optimization to prevent disk rescanning if they are +asked for again. +==================== +*/ +qhandle_t RE_RegisterModel( const char *name ) +{ + ri.Printf( PRINT_ALL, "RegisterModel: %s. \n", name); + + qboolean orgNameFailed = qfalse; + int orgLoader = -1; + + if ( !name || !name[0] ) { + ri.Printf( PRINT_WARNING, "RE_RegisterModel: NULL name\n" ); + return 0; + } + + if ( strlen( name ) >= MAX_QPATH ) { + ri.Printf( PRINT_WARNING, "Model name exceeds MAX_QPATH\n" ); + return 0; + } + + // + // search the currently loaded models + // + qhandle_t hModel; + + for ( hModel = 1; hModel < tr.numModels; hModel++ ) + { + if ( 0 == strcmp( tr.models[hModel]->name, name ) ) + { + if( tr.models[hModel]->type == MOD_BAD ) + { + ri.Printf( PRINT_WARNING, "tr.models[%d]->type = MOD_BAD \n", hModel); + return 0; + } + return hModel; + } + } + + + // allocate a new model_t + ri.Printf( PRINT_ALL, "Allocate Memory for %s. \n", name); + + model_t* mod = ri.Hunk_Alloc( sizeof( model_t ), h_low ); + + // only set the name after the model has been successfully loaded + strncpy(mod->name, name, MAX_QPATH); + mod->index = tr.numModels; + mod->type = MOD_BAD; + mod->numLods = 0; + + tr.models[tr.numModels] = mod; + + if ( ++tr.numModels > MAX_MOD_KNOWN ) + { + ri.Printf(PRINT_WARNING, "RE_RegisterModel: MAX_MOD_KNOWN.\n"); + } + + // load the files + + const char* dot = strrchr(name, '.'); + + if(dot != NULL) + { + if( (dot[1] == 'm') && (dot[2] == 'd') && (dot[3] == '3') ) + { + hModel = R_RegisterMD3(name, mod); + } + else if( (dot[1] == 'm') && (dot[2] == 'd') && (dot[3] == 'r') ) + { + hModel = R_RegisterMDR(name, mod); + } + else if( (dot[1] == 'i') && (dot[2] == 'q') && (dot[3] == 'm') ) + { + hModel = R_RegisterIQM(name, mod); + } + else + { + ri.Printf( PRINT_WARNING, " %s format not support now. \n ", name); + } + } + else + { + ri.Printf( PRINT_WARNING, "RegisterModel: %s without extention. " + " Try and find a suitable match using all the model formats supported\n", name); + + uint32_t i; + for( i = 0; i < numModelLoaders; i++ ) + { + if (i == orgLoader) + continue; + + char altName[ MAX_QPATH * 2 ] = {0}; + snprintf( altName, sizeof (altName), "%s.%s", name, modelLoaders[ i ].ext ); + + // Load + hModel = modelLoaders[ i ].ModelLoader( altName, mod ); + + if( hModel ) + { + if( orgNameFailed ) + { + ri.Printf( PRINT_ALL, "WARNING: %s not present, using %s instead\n", + name, altName ); + } + + break; + } + } + } + return hModel; +} diff --git a/code/renderer_vulkan/R_DebugGraphics.c b/code/renderer_vulkan/R_DebugGraphics.c new file mode 100644 index 0000000000..0e3ad63aab --- /dev/null +++ b/code/renderer_vulkan/R_DebugGraphics.c @@ -0,0 +1,109 @@ +#include "tr_globals.h" +#include "vk_shade_geometry.h" +#include "vk_instance.h" +#include "vk_image.h" +#include "vk_pipelines.h" +#include "tr_cvar.h" +#include "tr_backend.h" +#include "ref_import.h" +#include "matrix_multiplication.h" + + +void R_DebugPolygon( int color, int numPoints, float *points ) +{ + if (numPoints < 3 || numPoints >= SHADER_MAX_VERTEXES/2) + return; + int i; + // In Vulkan we don't have GL_POLYGON + GLS_POLYMODE_LINE equivalent, + // so we use lines to draw polygon outlines.This approach has additional + // implication that we need to do manual backface culling to reject outlines + // that belong to back facing polygons. The code assumes that polygons are convex. + + // Backface culling. + float pa[3], pb[3], p[3]; + + + const float* m = getptr_modelview_matrix(); + + // transform to eye space + Vec3Transform(points, m, pa); + Vec3Transform(&points[3], m, pb); + VectorSubtract(pb, pa, p); + + float n[3]; + for (i = 2; i < numPoints; i++) + { + Vec3Transform(&points[3*i], m, pb); + float q[3]; + VectorSubtract(pb, pa, q); + CrossProduct(q, p, n); + if (VectorLength(n) > 1e-5) + break; + } + if (DotProduct(n, pa) >= 0) + return; // discard backfacing polygon + + // Solid shade. + for (i = 0; i < numPoints; i++) + { + VectorCopy(&points[3*i], tess.xyz[i]); + + tess.svars.colors[i][0] = (color&1) ? 255 : 0; + tess.svars.colors[i][1] = (color&2) ? 255 : 0; + tess.svars.colors[i][2] = (color&4) ? 255 : 0; + tess.svars.colors[i][3] = 255; + } + tess.numVertexes = numPoints; + + tess.numIndexes = 0; + for (i = 1; i < numPoints - 1; i++) + { + tess.indexes[tess.numIndexes + 0] = 0; + tess.indexes[tess.numIndexes + 1] = i; + tess.indexes[tess.numIndexes + 2] = i + 1; + tess.numIndexes += 3; + } + + vk_UploadXYZI(tess.xyz, tess.numVertexes, tess.indexes, tess.numIndexes); + + updateMVP(backEnd.viewParms.isPortal, backEnd.projection2D, getptr_modelview_matrix()); + + vk_shade_geometry(g_stdPipelines.surface_debug_pipeline_solid, VK_FALSE, DEPTH_RANGE_NORMAL, VK_TRUE); + + + // Outline. + memset(tess.svars.colors, tr.identityLightByte, numPoints * 2 * sizeof(color4ub_t)); + + for (i = 0; i < numPoints; i++) + { + VectorCopy(&points[3*i], tess.xyz[2*i]); + VectorCopy(&points[3*((i + 1) % numPoints)], tess.xyz[2*i + 1]); + } + tess.numVertexes = numPoints * 2; + tess.numIndexes = 0; + + vk_UploadXYZI(tess.xyz, tess.numVertexes, tess.indexes, 0); + + updateMVP(backEnd.viewParms.isPortal, backEnd.projection2D, getptr_modelview_matrix()); + vk_shade_geometry(g_stdPipelines.surface_debug_pipeline_outline, VK_FALSE, DEPTH_RANGE_ZERO, VK_FALSE); + + tess.numVertexes = 0; +} + +/* +==================== +R_DebugGraphics + +Visualization aid for movement clipping debugging +==================== +*/ +void R_DebugGraphics( void ) +{ + // the render thread can't make callbacks to the main thread + if ( tr.registered ) { + R_IssueRenderCommands( qfalse ); + } + + updateCurDescriptor( tr.whiteImage->descriptor_set, 0); + ri.CM_DrawDebugSurface( R_DebugPolygon ); +} diff --git a/code/renderer_vulkan/R_DebugGraphics.h b/code/renderer_vulkan/R_DebugGraphics.h new file mode 100644 index 0000000000..a4f3e31eba --- /dev/null +++ b/code/renderer_vulkan/R_DebugGraphics.h @@ -0,0 +1,5 @@ +#ifndef R_DEBUG_GRAPHICS_H_ +#define R_DEBUG_GRAPHICS_H_ + +void R_DebugGraphics(void); +#endif diff --git a/code/renderer_vulkan/R_FindShader.c b/code/renderer_vulkan/R_FindShader.c new file mode 100644 index 0000000000..8f9442819c --- /dev/null +++ b/code/renderer_vulkan/R_FindShader.c @@ -0,0 +1,678 @@ +#include "tr_local.h" +#include "vk_image.h" +#include "tr_cvar.h" +#include "ref_import.h" + +#include "R_PrintMat.h" +#include "R_Parser.h" +#include "tr_globals.h" +#include "tr_shader.h" + + +#define MAX_SHADERTEXT_HASH 2048 +static char** shaderTextHashTable[MAX_SHADERTEXT_HASH] ={ 0 }; + +#define FILE_HASH_SIZE 1024 +static shader_t* hashTable[FILE_HASH_SIZE] = {0}; + + +static char *s_shaderText = NULL; + +/* +================ +return a hash value for the filename +================ +*/ +static int generateHashValue( const char *fname, const int size ) +{ + int i = 0; + long hash = 0; + + while (fname[i] != '\0') + { + char letter = tolower(fname[i]); + if (letter =='.') break; // don't include extension + if (letter =='\\') letter = '/'; // damn path names + if (letter == PATH_SEP) letter = '/'; // damn path names + hash+=(long)(letter)*(i+119); + i++; + } + hash = (hash ^ (hash >> 10) ^ (hash >> 20)); + hash &= (size-1); + return hash; +} + + + +void R_ClearShaderHashTable(void) +{ + memset(hashTable, 0, sizeof(hashTable)); +} + + + +/* +==================== +FindShaderInShaderText + +Scans the combined text description of all the shader files for the given +shader name. If found, it will return a valid shader, return NULL if not found. +===================== +*/ +static char* FindShaderInShaderText( const char *shadername ) +{ + +// ri.Printf( PRINT_ALL, "FindShaderInShaderText: %s\n", shadername); + + int hash = generateHashValue(shadername, MAX_SHADERTEXT_HASH); + + int i; + for (i = 0; shaderTextHashTable[hash][i]; i++) + { + char* p = shaderTextHashTable[hash][i]; + char* token = R_ParseExt(&p, qtrue); + if ( !Q_stricmp( token, shadername ) ) + { + return p; + } + } + + char* p = s_shaderText; + + if ( !p ) { + return NULL; + } + + // look for label + while ( 1 ) + { + char* token = R_ParseExt( &p, qtrue ); + + if( token[0] == 0 ) + { + break; + } + + if ( !Q_stricmp( token, shadername ) ) + { + return p; + } + else + { + // skip the definition, tr_common + // -> R_SkipBracedSection ? + SkipBracedSection( &p , 0); + } + } + + return NULL; +} + + + +/* +=============== +R_FindShader + +Will always return a valid shader, but it might be the +default shader if the real one can't be found. + +In the interest of not requiring an explicit shader text entry to +be defined for every single image used in the game, three default +shader behaviors can be auto-created for any image: + +If lightmapIndex == LIGHTMAP_NONE, then the image will have +dynamic diffuse lighting applied to it, as apropriate for most +entity skin surfaces. + +If lightmapIndex == LIGHTMAP_2D, then the image will be used +for 2D rendering unless an explicit shader is found + +If lightmapIndex == LIGHTMAP_BY_VERTEX, then the image will use +the vertex rgba modulate values, as apropriate for misc_model +pre-lit surfaces. + +Other lightmapIndex values will have a lightmap stage created +and src*dest blending applied with the texture, as apropriate for +most world construction surfaces. + +=============== +*/ + +extern void setDefaultShader(void); + + +shader_t* R_FindShader( const char *name, int lightmapIndex, qboolean mipRawImage ) +{ + char strippedName[MAX_QPATH] = {0}; + + if ( name == NULL ) + { + ri.Printf( PRINT_WARNING, "Find Shader: name = NULL\n"); + return tr.defaultShader; + } + + + // use (fullbright) vertex lighting if the bsp file doesn't have lightmaps + if ( (lightmapIndex >= 0) && (lightmapIndex >= tr.numLightmaps) ) + { + lightmapIndex = LIGHTMAP_BY_VERTEX; + } + else if ( lightmapIndex < LIGHTMAP_2D ) + { + // negative lightmap indexes cause stray pointers (think tr.lightmaps[lightmapIndex]) + ri.Printf( PRINT_WARNING, "WARNING: shader '%s' has invalid lightmap index of %d\n", name, lightmapIndex ); + lightmapIndex = LIGHTMAP_BY_VERTEX; + } + + + R_StripExtension(name, strippedName, sizeof(strippedName)); + + int hash = generateHashValue(strippedName, FILE_HASH_SIZE); + + // + // see if the shader is already loaded + // + { + shader_t* sh = hashTable[hash]; + while ( sh ) + { + // NOTE: if there was no shader or image available with the name strippedName + // then a default shader is created with lightmapIndex == LIGHTMAP_NONE, so we + // have to check all default shaders otherwise for every call to R_findShader + // with that same strippedName a new default shader is created. + if ( ( 0 == Q_stricmp(sh->name, strippedName) ) && (sh->lightmapIndex == lightmapIndex || sh->defaultShader) ) + { + // match found + return sh; + } + + sh = sh->next; + } + } + + R_SetTheShader( strippedName, lightmapIndex ); + + // + // attempt to define shader from an explicit parameter file + // + { + char* shaderText = FindShaderInShaderText( strippedName ); + if ( shaderText ) + { + // enable this when building a pak file to get a global list + // of all explicit shaders + if ( r_printShaders->integer ) { + ri.Printf( PRINT_ALL, "*SHADER* %s\n", name ); + } + + if ( !ParseShader( &shaderText ) ) + { + // had errors, so use default shader + R_SetDefaultShader( ); + ri.Printf( PRINT_WARNING, "ParseShader: %s had errors\n", strippedName ); + } + + return FinishShader(); + } + } + + + // if not defined in the in-memory shader descriptions, + // look for a single supported image file + + /* + char fileName[128] = {0}; + { + qboolean ptExist = qfalse; + int i = 0; + + while(name[i] != '\0') + { + fileName[i] = name[i]; + if(fileName[i] == '.') + { + ptExist = qtrue; + } + i++; + } + + // if path doesn't have an extension, then append + // the specified one (which should include the .) + + if(ptExist == qtrue) + fileName[i] = '\0'; + else + { + fileName[i++] = '.'; + fileName[i++] = 't'; + fileName[i++] = 'g'; + fileName[i++] = 'a'; + fileName[i] = '\0'; + } + } + */ + + + image_t* image = R_FindImageFile( name, mipRawImage, mipRawImage, mipRawImage ? GL_REPEAT : GL_CLAMP ); + + if(image != NULL) + { + // create the default shading commands + R_CreateDefaultShadingCmds(name, image); + } + else + { + setDefaultShader(); + } + + + return FinishShader(); +} + + + + + + +/* +==================== +This is the exported shader entry point for the rest of the system +It will always return an index that will be valid. + +This should really only be used for explicit shaders, because there is no +way to ask for different implicit lighting modes (vertex, lightmap, etc) +==================== +*/ +qhandle_t RE_RegisterShader( const char *name ) +{ + + if ( strlen( name ) >= MAX_QPATH ) { + ri.Printf(PRINT_ALL, "Shader name exceeds MAX_QPATH\n" ); + return 0; + } + + shader_t* sh = R_FindShader( name, LIGHTMAP_2D, qtrue ); + + // we want to return 0 if the shader failed to + // load for some reason, but R_FindShader should + // still keep a name allocated for it, so if + // something calls RE_RegisterShader again with + // the same name, we don't try looking for it again + if ( sh->defaultShader ) { + return 0; + } + + return sh->index; +} + +/* +==================== +RE_RegisterShaderNoMip + +For menu graphics that should never be picmiped +==================== +*/ +qhandle_t RE_RegisterShaderNoMip( const char *name ) +{ + + if ( strlen( name ) >= MAX_QPATH ) { + ri.Printf(PRINT_ALL, "Shader name exceeds MAX_QPATH\n" ); + return 0; + } + + shader_t* sh = R_FindShader( name, LIGHTMAP_2D, qfalse ); + + // we want to return 0 if the shader failed to + // load for some reason, but R_FindShader should + // still keep a name allocated for it, so if + // something calls RE_RegisterShader again with + // the same name, we don't try looking for it again + if ( sh->defaultShader ) { + return 0; + } + + return sh->index; +} + + +qhandle_t R_RegisterShaderFromImage(const char *name, int lightmapIndex, image_t *image, qboolean mipRawImage) +{ + + int hash = generateHashValue(name, FILE_HASH_SIZE); + + // + // see if the shader is already loaded + // + shader_t* sh = hashTable[hash]; + while(sh) + { + // NOTE: if there was no shader or image available with the name strippedName + // then a default shader is created with lightmapIndex == LIGHTMAP_NONE, so we + // have to check all default shaders otherwise for every call to R_FindShader + // with that same strippedName a new default shader is created. + if ( (sh->lightmapIndex == lightmapIndex || sh->defaultShader) && + // index by name + !Q_stricmp(sh->name, name)) { + // match found + return sh->index; + } + + sh = sh->next; + } + + + R_SetTheShader( name, lightmapIndex ); + + + // + // create the default shading commands + // + R_CreateDefaultShadingCmds(name, image); + + sh = FinishShader(); + return sh->index; +} + + +/* +==================== +ScanAndLoadShaderFiles + +Finds and loads all .shader files, combining them into +a single large text block that can be scanned for shader names +===================== +*/ + +static void BuildSingleLargeBuffer(char* buffers[], const int nShaderFiles, const int sum) +{ + // build single large buffer + s_shaderText = ri.Hunk_Alloc( sum + nShaderFiles*2, h_low ); + s_shaderText[ 0 ] = '\0'; + + char* textEnd = s_shaderText; + int n = nShaderFiles - 1; + // free in reverse order, so the temp files are all dumped + for ( n = nShaderFiles - 1; n >= 0 ; n-- ) + { + if ( buffers[n] ) + { + strcat( textEnd, buffers[n] ); + strcat( textEnd, "\n" ); + + textEnd += strlen(buffers[n]) + 1; + + ri.FS_FreeFile( buffers[n] ); + } + } +} + + +static void Shader_DoSimpleCheck(char* name, char* p) +{ + char* pBuf = p; + + R_BeginParseSession(name); + + while(1) + { + char* token = R_ParseExt(&p, qtrue); + if(0 == *token) + break; + char shaderName[64]={0}; + strncpy(shaderName, token, sizeof(shaderName)); + + int shaderLine = R_GetCurrentParseLine(); + + token = R_ParseExt(&p, qtrue); + if(token[0] != '{' || token[1] != '\0') + { + ri.Printf(PRINT_WARNING, "WARNING: Ignoring shader file %s. Shader \"%s\" on line %d missing opening brace", + name, shaderName, shaderLine); + if (token[0]) + { + ri.Printf(PRINT_WARNING, " (found \"%s\" on line %d)", token, R_GetCurrentParseLine()); + } + ri.Printf(PRINT_WARNING, ".\n"); + ri.FS_FreeFile(pBuf); + pBuf = NULL; + break; + } + + if(!SkipBracedSection(&p, 1)) + { + ri.Printf(PRINT_WARNING, "WARNING: Ignoring shader file %s. Shader \"%s\" on line %d missing closing brace.\n", + name, shaderName, shaderLine); + ri.FS_FreeFile(pBuf); + pBuf = NULL; + break; + } + } + +} + + +static void SetShaderTextHashTableSizes( void ) +{ + int shaderTextHashTableSizes[MAX_SHADERTEXT_HASH]; + memset(shaderTextHashTableSizes, 0, sizeof(shaderTextHashTableSizes)); + + int size = 0; + + char* p = s_shaderText; + // look for shader names + while ( 1 ) + { + char* token = R_ParseExt( &p, qtrue ); + if ( token[0] == 0 ) + { + break; + } + + int hash = generateHashValue(token, MAX_SHADERTEXT_HASH); + shaderTextHashTableSizes[hash]++; + size++; + SkipBracedSection(&p, 0); + } + + size += MAX_SHADERTEXT_HASH; + + + char* hashMem = (char*)ri.Hunk_Alloc( size * sizeof(char *), h_low ); + + int i; + for (i = 0; i < MAX_SHADERTEXT_HASH; i++) + { + shaderTextHashTable[i] = (char **) hashMem; + hashMem += (shaderTextHashTableSizes[i] + 1) * sizeof(char *); + } + + memset(shaderTextHashTableSizes, 0, sizeof(shaderTextHashTableSizes)); + + p = s_shaderText; + // look for shader names + while ( 1 ) + { + char* oldp = p; + char* token = R_ParseExt( &p, qtrue ); + + if ( token[0] == 0 ) { + break; + } + + int hash = generateHashValue(token, MAX_SHADERTEXT_HASH); + shaderTextHashTable[hash][shaderTextHashTableSizes[hash]++] = oldp; + + SkipBracedSection(&p, 0); + } + +} + + +#define MAX_SHADER_FILES 4096 +void ScanAndLoadShaderFiles( void ) +{ + ri.Printf( PRINT_ALL, "ScanAndLoadShaderFiles\n" ); + + char *buffers[MAX_SHADER_FILES] = {0}; + int numShaderFiles = 0; + + + // scan for shader files + char** shaderFiles = ri.FS_ListFiles( "scripts", ".shader", &numShaderFiles ); + + if ( !shaderFiles || !numShaderFiles ) + { + ri.Printf( PRINT_WARNING, "WARNING: no shader files found\n" ); + return; + } + + if ( numShaderFiles > MAX_SHADER_FILES ) { + numShaderFiles = MAX_SHADER_FILES; + ri.Printf( PRINT_WARNING, "numShaderFiles > MAX_SHADER_FILES\n" ); + } + + // load and parse shader files + long sum = 0; + int i; + for ( i = 0; i < numShaderFiles; i++ ) + { + char filename[128] = {0}; + + snprintf( filename, sizeof( filename ), "scripts/%s", shaderFiles[i] ); + ri.Printf( PRINT_ALL, "...loading '%s'\n", filename ); + long summand = ri.FS_ReadFile( filename, (void**)&buffers[i] ); + + if ( !buffers[i] ) + ri.Error( ERR_DROP, "Couldn't load %s", filename ); + + // Do a simple check on the shader structure in that file + // to make sure one bad shader file cannot fuck up all other shaders. + Shader_DoSimpleCheck(filename, buffers[i]); + + if (buffers[i]) + sum += summand; + } + + // build single large buffer + BuildSingleLargeBuffer(buffers, numShaderFiles, sum); + + R_Compress( s_shaderText ); + + + // free up memory + ri.FS_FreeFileList( shaderFiles ); + + + SetShaderTextHashTableSizes(); + + return; + +} + + +/* +==================== +RE_RegisterShader + +This is the exported shader entry point for the rest of the system +It will always return an index that will be valid. + +This should really only be used for explicit shaders, because there is no +way to ask for different implicit lighting modes (vertex, lightmap, etc) +==================== +*/ + +void RE_RemapShader(const char *shaderName, const char *newShaderName, const char *timeOffset) +{ + + shader_t* sh2 = tr.defaultShader; + + //R_FindShaderByName( newShaderName ); + { + char strippedName2[MAX_QPATH]; + R_StripExtension( newShaderName, strippedName2, sizeof(strippedName2) ); + + int hash2 = generateHashValue(strippedName2, FILE_HASH_SIZE); + + // see if the shader is already loaded + shader_t* pSh = hashTable[hash2]; + + while ( pSh ) + { + // NOTE: if there was no shader or image available with the name strippedName + // then a default shader is created with lightmapIndex == LIGHTMAP_NONE, so we + // have to check all default shaders otherwise for every call to R_findShader + // with that same strippedName a new default shader is created. + if (Q_stricmp(pSh->name, strippedName2) == 0) + { + // match found + sh2 = pSh; + break; + } + pSh=pSh->next; + } + + if (sh2 == tr.defaultShader) + { + qhandle_t h; + //h = RE_RegisterShaderLightMap(newShaderName, 0); + + pSh = R_FindShader( newShaderName, 0, qtrue ); + + if ( pSh->defaultShader ) + { + h = 0; + } + else + { + h = pSh->index; + } + + sh2 = R_GetShaderByHandle(h); + + if( (sh2 == tr.defaultShader) || (sh2 == NULL) ) + { + ri.Printf( PRINT_WARNING, "WARNING: R_RemapShader: shader %s not found\n", newShaderName ); + } + } + } + + char strippedName[MAX_QPATH]; + R_StripExtension(shaderName, strippedName, sizeof(strippedName)); + int hash = generateHashValue(strippedName, FILE_HASH_SIZE); + shader_t* sh = hashTable[hash]; + // remap all the shaders with the given name + // even tho they might have different lightmaps + + while( sh ) + { + if (Q_stricmp(sh->name, strippedName) == 0) + { + if (sh != sh2) + { + sh->remappedShader = sh2; + } + else + { + sh->remappedShader = NULL; + } + } + sh = sh->next; + } + + if (timeOffset) + { + sh2->timeOffset = atof(timeOffset); + } +} + + + + +void R_UpdateShaderHashTable(shader_t* newShader) +{ + int hash = generateHashValue(newShader->name, FILE_HASH_SIZE); + newShader->next = hashTable[hash]; + hashTable[hash] = newShader; +} diff --git a/code/renderer_vulkan/R_ImageBMP.c b/code/renderer_vulkan/R_ImageBMP.c new file mode 100644 index 0000000000..61f3e2774e --- /dev/null +++ b/code/renderer_vulkan/R_ImageBMP.c @@ -0,0 +1,238 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + +#include "ref_import.h" +#include "image_loader.h" + + +typedef struct +{ + char id[2]; + unsigned fileSize; + unsigned reserved0; + unsigned bitmapDataOffset; + unsigned bitmapHeaderSize; + unsigned width; + unsigned height; + unsigned short planes; + unsigned short bitsPerPixel; + unsigned compression; + unsigned bitmapDataSize; + unsigned hRes; + unsigned vRes; + unsigned colors; + unsigned importantColors; + unsigned char palette[256][4]; +} BMPHeader_t; + +void R_LoadBMP( const char *name, byte **pic, int *width, int *height ) +{ + int columns, rows; + unsigned numPixels; + byte *pixbuf; + int row, column; + byte *buf_p; + byte *end; + + char * buffer; + int length; + BMPHeader_t bmpHeader; + byte *bmpRGBA; + + *pic = NULL; + + if(width) + *width = 0; + + if(height) + *height = 0; + + // + // load the file + // + length = ri.FS_ReadFile( name, (void**)&buffer ); + if (!buffer || length < 0) { + return; + } + + if (length < 54) + { + ri.Error( ERR_DROP, "LoadBMP: header too short (%s)", name ); + } + + buf_p = (unsigned char*)buffer; + end = (unsigned char*)buffer + length; + + bmpHeader.id[0] = *buf_p++; + bmpHeader.id[1] = *buf_p++; + bmpHeader.fileSize = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.reserved0 = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.bitmapDataOffset = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.bitmapHeaderSize = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.width = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.height = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.planes = LittleShort( * ( short * ) buf_p ); + buf_p += 2; + bmpHeader.bitsPerPixel = LittleShort( * ( short * ) buf_p ); + buf_p += 2; + bmpHeader.compression = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.bitmapDataSize = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.hRes = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.vRes = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.colors = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + bmpHeader.importantColors = LittleLong( * ( int * ) buf_p ); + buf_p += 4; + + if ( bmpHeader.bitsPerPixel == 8 ) + { + if (buf_p + sizeof(bmpHeader.palette) > end) + ri.Error( ERR_DROP, "LoadBMP: header too short (%s)", name ); + + memcpy( bmpHeader.palette, buf_p, sizeof( bmpHeader.palette ) ); + } + + if ((unsigned char*)buffer + bmpHeader.bitmapDataOffset > end) + { + ri.Error( ERR_DROP, "LoadBMP: invalid offset value in header (%s)", name ); + } + + buf_p = (unsigned char*)buffer + bmpHeader.bitmapDataOffset; + + if ( bmpHeader.id[0] != 'B' && bmpHeader.id[1] != 'M' ) + { + ri.Error( ERR_DROP, "LoadBMP: only Windows-style BMP files supported (%s)", name ); + } + if ( bmpHeader.fileSize != length ) + { + ri.Error( ERR_DROP, "LoadBMP: header size does not match file size (%u vs. %u) (%s)", bmpHeader.fileSize, length, name ); + } + if ( bmpHeader.compression != 0 ) + { + ri.Error( ERR_DROP, "LoadBMP: only uncompressed BMP files supported (%s)", name ); + } + if ( bmpHeader.bitsPerPixel < 8 ) + { + ri.Error( ERR_DROP, "LoadBMP: monochrome and 4-bit BMP files not supported (%s)", name ); + } + + switch ( bmpHeader.bitsPerPixel ) + { + case 8: + case 16: + case 24: + case 32: + break; + default: + ri.Error( ERR_DROP, "LoadBMP: illegal pixel_size '%hu' in file '%s'", bmpHeader.bitsPerPixel, name ); + break; + } + + columns = bmpHeader.width; + rows = bmpHeader.height; + if ( rows < 0 ) + rows = -rows; + numPixels = columns * rows; + + if(columns <= 0 || !rows || numPixels > 0x1FFFFFFF // 4*1FFFFFFF == 0x7FFFFFFC < 0x7FFFFFFF + || ((numPixels * 4) / columns) / 4 != rows) + { + ri.Error (ERR_DROP, "LoadBMP: %s has an invalid image size", name); + } + if(buf_p + numPixels*bmpHeader.bitsPerPixel/8 > end) + { + ri.Error (ERR_DROP, "LoadBMP: file truncated (%s)", name); + } + + if ( width ) + *width = columns; + if ( height ) + *height = rows; + + bmpRGBA = ri.Malloc( numPixels * 4 ); + *pic = bmpRGBA; + + + for ( row = rows-1; row >= 0; row-- ) + { + pixbuf = bmpRGBA + row*columns*4; + + for ( column = 0; column < columns; column++ ) + { + unsigned char red, green, blue, alpha; + int palIndex; + unsigned short shortPixel; + + switch ( bmpHeader.bitsPerPixel ) + { + case 8: + palIndex = *buf_p++; + *pixbuf++ = bmpHeader.palette[palIndex][2]; + *pixbuf++ = bmpHeader.palette[palIndex][1]; + *pixbuf++ = bmpHeader.palette[palIndex][0]; + *pixbuf++ = 0xff; + break; + case 16: + shortPixel = * ( unsigned short * ) pixbuf; + pixbuf += 2; + *pixbuf++ = ( shortPixel & ( 31 << 10 ) ) >> 7; + *pixbuf++ = ( shortPixel & ( 31 << 5 ) ) >> 2; + *pixbuf++ = ( shortPixel & ( 31 ) ) << 3; + *pixbuf++ = 0xff; + break; + + case 24: + blue = *buf_p++; + green = *buf_p++; + red = *buf_p++; + *pixbuf++ = red; + *pixbuf++ = green; + *pixbuf++ = blue; + *pixbuf++ = 255; + break; + case 32: + blue = *buf_p++; + green = *buf_p++; + red = *buf_p++; + alpha = *buf_p++; + *pixbuf++ = red; + *pixbuf++ = green; + *pixbuf++ = blue; + *pixbuf++ = alpha; + break; + } + } + } + + ri.FS_FreeFile( buffer ); + +} diff --git a/code/renderer_vulkan/R_ImageJPG.c b/code/renderer_vulkan/R_ImageJPG.c new file mode 100644 index 0000000000..3c912c7038 --- /dev/null +++ b/code/renderer_vulkan/R_ImageJPG.c @@ -0,0 +1,473 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + +#include +#include "tr_local.h" +#include "ref_import.h" + +/* + * Include file for users of JPEG library. + * You will need to have included system headers that define at least + * the typedefs FILE and size_t before you can include jpeglib.h. + * (stdio.h is sufficient on ANSI-conforming systems.) + * You may also wish to include "jerror.h". + */ + +#ifdef USE_INTERNAL_JPEG +# define JPEG_INTERNALS +#endif + +#include + +#ifndef USE_INTERNAL_JPEG +# if JPEG_LIB_VERSION < 80 && !defined(MEM_SRCDST_SUPPORTED) +# error Need system libjpeg >= 80 or jpeg_mem_ support +# endif +#endif + +/* Catching errors, as done in libjpeg's example.c */ +typedef struct q_jpeg_error_mgr_s +{ + struct jpeg_error_mgr pub; /* "public" fields */ + + jmp_buf setjmp_buffer; /* for return to caller */ +} q_jpeg_error_mgr_t; + +static void R_JPGErrorExit(j_common_ptr cinfo) +{ + char buffer[JMSG_LENGTH_MAX]; + + /* cinfo->err really points to a q_jpeg_error_mgr_s struct, so coerce pointer */ + q_jpeg_error_mgr_t *jerr = (q_jpeg_error_mgr_t *)cinfo->err; + + (*cinfo->err->format_message) (cinfo, buffer); + + ri.Printf(PRINT_ALL, "Error: %s", buffer); + + /* Return control to the setjmp point */ + longjmp(jerr->setjmp_buffer, 1); +} + +static void R_JPGOutputMessage(j_common_ptr cinfo) +{ + char buffer[JMSG_LENGTH_MAX]; + + /* Create the message */ + (*cinfo->err->format_message) (cinfo, buffer); + + /* Send it to stderr, adding a newline */ + ri.Printf(PRINT_ALL, "%s\n", buffer); +} + +void R_LoadJPG(const char *filename, unsigned char **pic, int *width, int *height) +{ + /* This struct contains the JPEG decompression parameters and pointers to + * working space (which is allocated as needed by the JPEG library). + */ + struct jpeg_decompress_struct cinfo = {NULL}; + /* We use our private extension JPEG error handler. + * Note that this struct must live as long as the main JPEG parameter + * struct, to avoid dangling-pointer problems. + */ + /* This struct represents a JPEG error handler. It is declared separately + * because applications often want to supply a specialized error handler + * (see the second half of this file for an example). But here we just + * take the easy way out and use the standard error handler, which will + * print a message on stderr and call exit() if compression fails. + * Note that this struct must live as long as the main JPEG parameter + * struct, to avoid dangling-pointer problems. + */ + q_jpeg_error_mgr_t jerr; + /* More stuff */ + JSAMPARRAY buffer; /* Output row buffer */ + unsigned int row_stride; /* physical row width in output buffer */ + unsigned int pixelcount, memcount; + unsigned int sindex, dindex; + byte *out; + int len; + char * fbuffer; + byte *buf; + + /* In this example we want to open the input file before doing anything else, + * so that the setjmp() error recovery below can assume the file is open. + * VERY IMPORTANT: use "b" option to fopen() if you are on a machine that + * requires it in order to read binary files. + */ + + len = ri.FS_ReadFile ( filename, (void**)&fbuffer); + if (!fbuffer || len < 0) { + return; + } + + /* Step 1: allocate and initialize JPEG decompression object */ + + /* We have to set up the error handler first, in case the initialization + * step fails. (Unlikely, but it could happen if you are out of memory.) + * This routine fills in the contents of struct jerr, and returns jerr's + * address which we place into the link field in cinfo. + */ + cinfo.err = jpeg_std_error(&jerr.pub); + cinfo.err->error_exit = R_JPGErrorExit; + cinfo.err->output_message = R_JPGOutputMessage; + + /* Establish the setjmp return context for R_JPGErrorExit to use. */ + if (setjmp(jerr.setjmp_buffer)) + { + /* If we get here, the JPEG code has signaled an error. + * We need to clean up the JPEG object, close the input file, and return. + */ + jpeg_destroy_decompress(&cinfo); + ri.FS_FreeFile(fbuffer); + + /* Append the filename to the error for easier debugging */ + ri.Printf(PRINT_ALL, ", loading file %s\n", filename); + return; + } + + /* Now we can initialize the JPEG decompression object. */ + jpeg_create_decompress(&cinfo); + + /* Step 2: specify data source (eg, a file) */ + + jpeg_mem_src(&cinfo, (unsigned char*)fbuffer, len); + + /* Step 3: read file parameters with jpeg_read_header() */ + + (void) jpeg_read_header(&cinfo, TRUE); + /* We can ignore the return value from jpeg_read_header since + * (a) suspension is not possible with the stdio data source, and + * (b) we passed TRUE to reject a tables-only JPEG file as an error. + * See libjpeg.doc for more info. + */ + + /* Step 4: set parameters for decompression */ + + /* + * Make sure it always converts images to RGB color space. This will + * automatically convert 8-bit greyscale images to RGB as well. + */ + cinfo.out_color_space = JCS_RGB; + + /* Step 5: Start decompressor */ + + (void) jpeg_start_decompress(&cinfo); + /* We can ignore the return value since suspension is not possible + * with the stdio data source. + */ + + /* We may need to do some setup of our own at this point before reading + * the data. After jpeg_start_decompress() we have the correct scaled + * output image dimensions available, as well as the output colormap + * if we asked for color quantization. + * In this example, we need to make an output work buffer of the right size. + */ + /* JSAMPLEs per row in output buffer */ + + pixelcount = cinfo.output_width * cinfo.output_height; + + if(!cinfo.output_width || !cinfo.output_height + || ((pixelcount * 4) / cinfo.output_width) / 4 != cinfo.output_height + || pixelcount > 0x1FFFFFFF || cinfo.output_components != 3 + ) + { + // Free the memory to make sure we don't leak memory + ri.FS_FreeFile (fbuffer); + jpeg_destroy_decompress(&cinfo); + + ri.Error(ERR_DROP, "LoadJPG: %s has an invalid image format: %dx%d*4=%d, components: %d", filename, + cinfo.output_width, cinfo.output_height, pixelcount * 4, cinfo.output_components); + } + + memcount = pixelcount * 4; + row_stride = cinfo.output_width * cinfo.output_components; + + out = ri.Malloc(memcount); + + *width = cinfo.output_width; + *height = cinfo.output_height; + + /* Step 6: while (scan lines remain to be read) */ + /* jpeg_read_scanlines(...); */ + + /* Here we use the library's state variable cinfo.output_scanline as the + * loop counter, so that we don't have to keep track ourselves. + */ + while (cinfo.output_scanline < cinfo.output_height) { + /* jpeg_read_scanlines expects an array of pointers to scanlines. + * Here the array is only one element long, but you could ask for + * more than one scanline at a time if that's more convenient. + */ + buf = ((out+(row_stride*cinfo.output_scanline))); + buffer = &buf; + (void) jpeg_read_scanlines(&cinfo, buffer, 1); + } + + buf = out; + + // Expand from RGB to RGBA + sindex = pixelcount * cinfo.output_components; + dindex = memcount; + + do + { + buf[--dindex] = 255; + buf[--dindex] = buf[--sindex]; + buf[--dindex] = buf[--sindex]; + buf[--dindex] = buf[--sindex]; + } while(sindex); + + *pic = out; + + /* Step 7: Finish decompression */ + + jpeg_finish_decompress(&cinfo); + /* We can ignore the return value since suspension is not possible + * with the stdio data source. + */ + + /* Step 8: Release JPEG decompression object */ + + /* This is an important step since it will release a good deal of memory. */ + jpeg_destroy_decompress(&cinfo); + + /* After finish_decompress, we can close the input file. + * Here we postpone it until after no more JPEG errors are possible, + * so as to simplify the setjmp error logic above. (Actually, I don't + * think that jpeg_destroy can do an error exit, but why assume anything...) + */ + ri.FS_FreeFile (fbuffer); + + /* At this point you may want to check to see whether any corrupt-data + * warnings occurred (test whether jerr.pub.num_warnings is nonzero). + */ + + /* And we're done! */ +} + + +/* Expanded data destination object for stdio output */ + +typedef struct { + struct jpeg_destination_mgr pub; /* public fields */ + + byte* outfile; /* target stream */ + int size; +} my_destination_mgr; + +typedef my_destination_mgr * my_dest_ptr; + + +/* + * Initialize destination --- called by jpeg_start_compress + * before any data is actually written. + */ + +static void +init_destination (j_compress_ptr cinfo) +{ + my_dest_ptr dest = (my_dest_ptr) cinfo->dest; + + dest->pub.next_output_byte = dest->outfile; + dest->pub.free_in_buffer = dest->size; +} + + +/* + * Empty the output buffer --- called whenever buffer fills up. + * + * In typical applications, this should write the entire output buffer + * (ignoring the current state of next_output_byte & free_in_buffer), + * reset the pointer & count to the start of the buffer, and return TRUE + * indicating that the buffer has been dumped. + * + * In applications that need to be able to suspend compression due to output + * overrun, a FALSE return indicates that the buffer cannot be emptied now. + * In this situation, the compressor will return to its caller (possibly with + * an indication that it has not accepted all the supplied scanlines). The + * application should resume compression after it has made more room in the + * output buffer. Note that there are substantial restrictions on the use of + * suspension --- see the documentation. + * + * When suspending, the compressor will back up to a convenient restart point + * (typically the start of the current MCU). next_output_byte & free_in_buffer + * indicate where the restart point will be if the current call returns FALSE. + * Data beyond this point will be regenerated after resumption, so do not + * write it out when emptying the buffer externally. + */ + +static boolean +empty_output_buffer (j_compress_ptr cinfo) +{ + my_dest_ptr dest = (my_dest_ptr) cinfo->dest; + + jpeg_destroy_compress(cinfo); + + // Make crash fatal or we would probably leak memory. + ri.Error(ERR_FATAL, "Output buffer for encoded JPEG image has insufficient size of %d bytes", + dest->size); + + return FALSE; +} + +/* + * Terminate destination --- called by jpeg_finish_compress + * after all data has been written. Usually needs to flush buffer. + * + * NB: *not* called by jpeg_abort or jpeg_destroy; surrounding + * application must deal with any cleanup that should happen even + * for error exit. + */ + +static void term_destination(j_compress_ptr cinfo) +{ +} + + +/* + * Prepare for output to a stdio stream. + * The caller must have already opened the stream, and is responsible + * for closing it after finishing compression. + */ + +static void +jpegDest (j_compress_ptr cinfo, byte* outfile, int size) +{ + my_dest_ptr dest; + + /* The destination object is made permanent so that multiple JPEG images + * can be written to the same file without re-executing jpeg_stdio_dest. + * This makes it dangerous to use this manager and a different destination + * manager serially with the same JPEG object, because their private object + * sizes may be different. Caveat programmer. + */ + if (cinfo->dest == NULL) { /* first time for this JPEG object? */ + cinfo->dest = (struct jpeg_destination_mgr *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + sizeof(my_destination_mgr)); + } + + dest = (my_dest_ptr) cinfo->dest; + dest->pub.init_destination = init_destination; + dest->pub.empty_output_buffer = empty_output_buffer; + dest->pub.term_destination = term_destination; + dest->outfile = outfile; + dest->size = size; +} + +/* +================= +SaveJPGToBuffer + +Encodes JPEG from image in image_buffer and writes to buffer. +Expects RGB input data +================= +*/ +size_t RE_SaveJPGToBuffer(byte *buffer, size_t bufSize, int quality, + int image_width, int image_height, byte *image_buffer, int padding) +{ + struct jpeg_compress_struct cinfo; + q_jpeg_error_mgr_t jerr; + JSAMPROW row_pointer[1]; /* pointer to JSAMPLE row[s] */ + my_dest_ptr dest; + int row_stride; /* physical row width in image buffer */ + size_t outcount; + + /* Step 1: allocate and initialize JPEG compression object */ + cinfo.err = jpeg_std_error(&jerr.pub); + cinfo.err->error_exit = R_JPGErrorExit; + cinfo.err->output_message = R_JPGOutputMessage; + + /* Establish the setjmp return context for R_JPGErrorExit to use. */ + if (setjmp(jerr.setjmp_buffer)) + { + /* If we get here, the JPEG code has signaled an error. + * We need to clean up the JPEG object and return. + */ + jpeg_destroy_compress(&cinfo); + + ri.Printf(PRINT_ALL, "\n"); + return 0; + } + + /* Now we can initialize the JPEG compression object. */ + jpeg_create_compress(&cinfo); + + /* Step 2: specify data destination (eg, a file) */ + /* Note: steps 2 and 3 can be done in either order. */ + jpegDest(&cinfo, buffer, bufSize); + + /* Step 3: set parameters for compression */ + cinfo.image_width = image_width; /* image width and height, in pixels */ + cinfo.image_height = image_height; + cinfo.input_components = 3; /* # of color components per pixel */ + cinfo.in_color_space = JCS_RGB; /* colorspace of input image */ + + jpeg_set_defaults(&cinfo); + jpeg_set_quality(&cinfo, quality, TRUE /* limit to baseline-JPEG values */); + /* If quality is set high, disable chroma subsampling */ + if (quality >= 85) { + cinfo.comp_info[0].h_samp_factor = 1; + cinfo.comp_info[0].v_samp_factor = 1; + } + + /* Step 4: Start compressor */ + jpeg_start_compress(&cinfo, TRUE); + + /* Step 5: while (scan lines remain to be written) */ + /* jpeg_write_scanlines(...); */ + row_stride = image_width * cinfo.input_components + padding; /* JSAMPLEs per row in image_buffer */ + + while (cinfo.next_scanline < cinfo.image_height) { + /* jpeg_write_scanlines expects an array of pointers to scanlines. + * Here the array is only one element long, but you could pass + * more than one scanline at a time if that's more convenient. + */ + row_pointer[0] = &image_buffer[((cinfo.image_height-1)*row_stride)-cinfo.next_scanline * row_stride]; + (void) jpeg_write_scanlines(&cinfo, row_pointer, 1); + } + + /* Step 6: Finish compression */ + jpeg_finish_compress(&cinfo); + + dest = (my_dest_ptr) cinfo.dest; + outcount = dest->size - dest->pub.free_in_buffer; + + /* Step 7: release JPEG compression object */ + jpeg_destroy_compress(&cinfo); + + /* And we're done! */ + return outcount; +} + +void RE_SaveJPG(char * filename, int quality, int image_width, int image_height, byte *image_buffer, int padding) +{ + byte *out; + size_t bufSize; + + bufSize = image_width * image_height * 3; + out = ri.Hunk_AllocateTempMemory(bufSize); + + bufSize = RE_SaveJPGToBuffer(out, bufSize, quality, image_width, image_height, image_buffer, padding); + ri.FS_WriteFile(filename, out, bufSize); + + ri.Hunk_FreeTempMemory(out); +} diff --git a/code/renderer_vulkan/R_ImageJPG.h b/code/renderer_vulkan/R_ImageJPG.h new file mode 100644 index 0000000000..b6c3c9d60f --- /dev/null +++ b/code/renderer_vulkan/R_ImageJPG.h @@ -0,0 +1,7 @@ +#ifndef R_IMAGEJPG_H_ +#define R_IMAGEJPG_H_ + +void RE_SaveJPG(char * filename, int quality, int image_width, int image_height, unsigned char *image_buffer, int padding); +size_t RE_SaveJPGToBuffer(unsigned char *buffer, size_t bufSize, int quality, int image_width, int image_height, unsigned char *image_buffer, int padding); + +#endif diff --git a/code/renderer_vulkan/R_ImagePCX.c b/code/renderer_vulkan/R_ImagePCX.c new file mode 100644 index 0000000000..c13f90ddaf --- /dev/null +++ b/code/renderer_vulkan/R_ImagePCX.c @@ -0,0 +1,170 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + 2008 Ludwig Nussel + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + +#include "ref_import.h" +#include "image_loader.h" +/* +======================================================================== + +PCX files are used for 8 bit images + +======================================================================== +*/ + +typedef struct { + char manufacturer; + char version; + char encoding; + char bits_per_pixel; + unsigned short xmin,ymin,xmax,ymax; + unsigned short hres,vres; + unsigned char palette[48]; + char reserved; + char color_planes; + unsigned short bytes_per_line; + unsigned short palette_type; + unsigned short hscreensize, vscreensize; + char filler[54]; + char data[]; +} pcx_t; + +void R_LoadPCX ( const char *filename, byte **pic, int *width, int *height) +{ + char* raw; + unsigned char dataByte = 0, runLength = 0; + unsigned char *out, *pix; + unsigned short w, h; + unsigned char *pic8; + unsigned char *palette; + int i; + unsigned size = 0; + + if (width) + *width = 0; + if (height) + *height = 0; + + *pic = NULL; + + // + // load the file + // + int len = ri.FS_ReadFile( filename, (void**)&raw ); + if (!raw || len < 0) { + return; + } + + if((unsigned)len < sizeof(pcx_t)) + { + ri.Printf (PRINT_ALL, "PCX truncated: %s\n", filename); + ri.FS_FreeFile (raw); + return; + } + + // + // parse the PCX file + // + pcx_t* pcx = (pcx_t *)raw; + unsigned char* end = (unsigned char*)raw+len; + + w = LittleShort(pcx->xmax)+1; + h = LittleShort(pcx->ymax)+1; + size = w*h; + + if (pcx->manufacturer != 0x0a + || pcx->version != 5 + || pcx->encoding != 1 + || pcx->color_planes != 1 + || pcx->bits_per_pixel != 8 + || w >= 1024 + || h >= 1024) + { + ri.Printf (PRINT_ALL, "Bad or unsupported pcx file %s (%dx%d@%d)\n", filename, w, h, pcx->bits_per_pixel); + return; + } + + pix = pic8 = ri.Malloc ( size ); + + raw = pcx->data; + // FIXME: should use bytes_per_line but original q3 didn't do that either + while(pix < pic8+size) + { + if(runLength > 0) { + *pix++ = dataByte; + --runLength; + continue; + } + + if((unsigned char*)raw+1 > end) + break; + dataByte = *raw++; + + if((dataByte & 0xC0) == 0xC0) + { + if((unsigned char*)raw+1 > end) + break; + runLength = dataByte & 0x3F; + dataByte = *raw++; + } + else + runLength = 1; + } + + if(pix < pic8+size) + { + ri.Printf (PRINT_ALL, "PCX file truncated: %s\n", filename); + ri.FS_FreeFile (pcx); + ri.Free (pic8); + } + + if ((byte*)raw-(byte*)pcx >= end - (byte*)769 || end[-769] != 0x0c) + { + ri.Printf (PRINT_ALL, "PCX missing palette: %s\n", filename); + ri.FS_FreeFile (pcx); + ri.Free (pic8); + return; + } + + palette = end-768; + + pix = out = ri.Malloc(4 * size ); + for (i = 0 ; i < size ; i++) + { + unsigned char p = pic8[i]; + pix[0] = palette[p*3]; + pix[1] = palette[p*3 + 1]; + pix[2] = palette[p*3 + 2]; + pix[3] = 255; + pix += 4; + } + + if (width) + *width = w; + if (height) + *height = h; + + *pic = out; + + ri.FS_FreeFile (pcx); + ri.Free (pic8); +} diff --git a/code/renderer_vulkan/R_ImagePNG.c b/code/renderer_vulkan/R_ImagePNG.c new file mode 100644 index 0000000000..6c4b0fa4ea --- /dev/null +++ b/code/renderer_vulkan/R_ImagePNG.c @@ -0,0 +1,2483 @@ +/* +=========================================================================== +ioquake3 png decoder +Copyright (C) 2007,2008 Joerg Dietrich + +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +=========================================================================== +*/ + +#include "../renderercommon/puff.h" + +#include "ref_import.h" +#include "image_loader.h" +// we could limit the png size to a lower value here +#ifndef INT_MAX +#define INT_MAX 0x1fffffff +#endif + +/* +================= +PNG LOADING +================= +*/ + +/* + * Quake 3 image format : RGBA + */ + +#define Q3IMAGE_BYTESPERPIXEL (4) + +/* + * PNG specifications + */ + +/* + * The first 8 Bytes of every PNG-File are a fixed signature + * to identify the file as a PNG. + */ + +#define PNG_Signature "\x89\x50\x4E\x47\xD\xA\x1A\xA" +#define PNG_Signature_Size (8) + +/* + * After the signature diverse chunks follow. + * A chunk consists of a header and if Length + * is bigger than 0 a body and a CRC of the body follow. + */ + +struct PNG_ChunkHeader +{ + uint32_t Length; + uint32_t Type; +}; + +#define PNG_ChunkHeader_Size (8) + +typedef uint32_t PNG_ChunkCRC; + +#define PNG_ChunkCRC_Size (4) + +/* + * We use the following ChunkTypes. + * All others are ignored. + */ + +#define MAKE_CHUNKTYPE(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | ((d))) + +#define PNG_ChunkType_IHDR MAKE_CHUNKTYPE('I', 'H', 'D', 'R') +#define PNG_ChunkType_PLTE MAKE_CHUNKTYPE('P', 'L', 'T', 'E') +#define PNG_ChunkType_IDAT MAKE_CHUNKTYPE('I', 'D', 'A', 'T') +#define PNG_ChunkType_IEND MAKE_CHUNKTYPE('I', 'E', 'N', 'D') +#define PNG_ChunkType_tRNS MAKE_CHUNKTYPE('t', 'R', 'N', 'S') + +/* + * Per specification the first chunk after the signature SHALL be IHDR. + */ + +struct PNG_Chunk_IHDR +{ + uint32_t Width; + uint32_t Height; + uint8_t BitDepth; + uint8_t ColourType; + uint8_t CompressionMethod; + uint8_t FilterMethod; + uint8_t InterlaceMethod; +}; + +#define PNG_Chunk_IHDR_Size (13) + +/* + * ColourTypes + */ + +#define PNG_ColourType_Grey (0) +#define PNG_ColourType_True (2) +#define PNG_ColourType_Indexed (3) +#define PNG_ColourType_GreyAlpha (4) +#define PNG_ColourType_TrueAlpha (6) + +/* + * number of colour components + * + * Grey : 1 grey + * True : 1 R, 1 G, 1 B + * Indexed : 1 index + * GreyAlpha : 1 grey, 1 alpha + * TrueAlpha : 1 R, 1 G, 1 B, 1 alpha + */ + +#define PNG_NumColourComponents_Grey (1) +#define PNG_NumColourComponents_True (3) +#define PNG_NumColourComponents_Indexed (1) +#define PNG_NumColourComponents_GreyAlpha (2) +#define PNG_NumColourComponents_TrueAlpha (4) + +/* + * For the different ColourTypes + * different BitDepths are specified. + */ + +#define PNG_BitDepth_1 ( 1) +#define PNG_BitDepth_2 ( 2) +#define PNG_BitDepth_4 ( 4) +#define PNG_BitDepth_8 ( 8) +#define PNG_BitDepth_16 (16) + +/* + * Only one valid CompressionMethod is standardized. + */ + +#define PNG_CompressionMethod_0 (0) + +/* + * Only one valid FilterMethod is currently standardized. + */ + +#define PNG_FilterMethod_0 (0) + +/* + * This FilterMethod defines 5 FilterTypes + */ + +#define PNG_FilterType_None (0) +#define PNG_FilterType_Sub (1) +#define PNG_FilterType_Up (2) +#define PNG_FilterType_Average (3) +#define PNG_FilterType_Paeth (4) + +/* + * Two InterlaceMethods are standardized : + * 0 - NonInterlaced + * 1 - Interlaced + */ + +#define PNG_InterlaceMethod_NonInterlaced (0) +#define PNG_InterlaceMethod_Interlaced (1) + +/* + * The Adam7 interlace method uses 7 passes. + */ + +#define PNG_Adam7_NumPasses (7) + +/* + * The compressed data starts with a header ... + */ + +struct PNG_ZlibHeader +{ + uint8_t CompressionMethod; + uint8_t Flags; +}; + +#define PNG_ZlibHeader_Size (2) + +/* + * ... and is followed by a check value + */ + +#define PNG_ZlibCheckValue_Size (4) + +/* + * Some support functions for buffered files follow. + */ + +/* + * buffered file representation + */ + +struct BufferedFile +{ + byte *Buffer; + int Length; + byte *Ptr; + int BytesLeft; +}; + +/* + * Read a file into a buffer. + */ + +static struct BufferedFile *ReadBufferedFile(const char *name) +{ + struct BufferedFile *BF; + + char* buffer; + + /* + * input verification + */ + + if(!name) + { + return(NULL); + } + + /* + * Allocate control struct. + */ + + BF = ri.Malloc(sizeof(struct BufferedFile)); + if(!BF) + { + return(NULL); + } + + /* + * Initialize the structs components. + */ + + BF->Length = 0; + BF->Buffer = NULL; + BF->Ptr = NULL; + BF->BytesLeft = 0; + + /* + * Read the file. + */ + + BF->Length = ri.FS_ReadFile(name, (void**)&buffer); + BF->Buffer = (unsigned char*)buffer; + + /* + * Did we get it? Is it big enough? + */ + + if(!(BF->Buffer && (BF->Length > 0))) + { + ri.Free(BF); + + return(NULL); + } + + /* + * Set the pointers and counters. + */ + + BF->Ptr = BF->Buffer; + BF->BytesLeft = BF->Length; + + return(BF); +} + +/* + * Close a buffered file. + */ + +static void CloseBufferedFile(struct BufferedFile *BF) +{ + if(BF) + { + if(BF->Buffer) + { + ri.FS_FreeFile(BF->Buffer); + } + + ri.Free(BF); + } +} + +/* + * Get a pointer to the requested bytes. + */ + +static void *BufferedFileRead(struct BufferedFile *BF, unsigned Length) +{ + void *RetVal; + + /* + * input verification + */ + + if(!(BF && Length)) + { + return(NULL); + } + + /* + * not enough bytes left + */ + + if(Length > BF->BytesLeft) + { + return(NULL); + } + + /* + * the pointer to the requested data + */ + + RetVal = BF->Ptr; + + /* + * Raise the pointer and counter. + */ + + BF->Ptr += Length; + BF->BytesLeft -= Length; + + return(RetVal); +} + +/* + * Rewind the buffer. + */ + +static qboolean BufferedFileRewind(struct BufferedFile *BF, unsigned Offset) +{ + unsigned BytesRead; + + /* + * input verification + */ + + if(!BF) + { + return(qfalse); + } + + /* + * special trick to rewind to the beginning of the buffer + */ + + if(Offset == (unsigned)-1) + { + BF->Ptr = BF->Buffer; + BF->BytesLeft = BF->Length; + + return(qtrue); + } + + /* + * How many bytes do we have already read? + */ + + BytesRead = BF->Ptr - BF->Buffer; + + /* + * We can only rewind to the beginning of the BufferedFile. + */ + + if(Offset > BytesRead) + { + return(qfalse); + } + + /* + * lower the pointer and counter. + */ + + BF->Ptr -= Offset; + BF->BytesLeft += Offset; + + return(qtrue); +} + +/* + * Skip some bytes. + */ + +static qboolean BufferedFileSkip(struct BufferedFile *BF, unsigned Offset) +{ + /* + * input verification + */ + + if(!BF) + { + return(qfalse); + } + + /* + * We can only skip to the end of the BufferedFile. + */ + + if(Offset > BF->BytesLeft) + { + return(qfalse); + } + + /* + * lower the pointer and counter. + */ + + BF->Ptr += Offset; + BF->BytesLeft -= Offset; + + return(qtrue); +} + +/* + * Find a chunk + */ + +static qboolean FindChunk(struct BufferedFile *BF, uint32_t ChunkType) +{ + struct PNG_ChunkHeader *CH; + + uint32_t Length; + uint32_t Type; + + /* + * input verification + */ + + if(!BF) + { + return(qfalse); + } + + /* + * cycle trough the chunks + */ + + while(qtrue) + { + /* + * Read the chunk-header. + */ + + CH = BufferedFileRead(BF, PNG_ChunkHeader_Size); + if(!CH) + { + return(qfalse); + } + + /* + * Do not swap the original types + * they might be needed later. + */ + + Length = BigLong(CH->Length); + Type = BigLong(CH->Type); + + /* + * We found it! + */ + + if(Type == ChunkType) + { + /* + * Rewind to the start of the chunk. + */ + + BufferedFileRewind(BF, PNG_ChunkHeader_Size); + + break; + } + else + { + /* + * Skip the rest of the chunk. + */ + + if(Length) + { + if(!BufferedFileSkip(BF, Length + PNG_ChunkCRC_Size)) + { + return(qfalse); + } + } + } + } + + return(qtrue); +} + +/* + * Decompress all IDATs + */ + +static uint32_t DecompressIDATs(struct BufferedFile *BF, uint8_t **Buffer) +{ + uint8_t *DecompressedData; + uint32_t DecompressedDataLength; + + uint8_t *CompressedData; + uint8_t *CompressedDataPtr; + uint32_t CompressedDataLength; + + struct PNG_ChunkHeader *CH; + + uint32_t Length; + uint32_t Type; + + int BytesToRewind; + + int32_t puffResult; + uint8_t *puffDest; + uint32_t puffDestLen; + uint8_t *puffSrc; + uint32_t puffSrcLen; + + /* + * input verification + */ + + if(!(BF && Buffer)) + { + return(-1); + } + + /* + * some zeroing + */ + + DecompressedData = NULL; + *Buffer = DecompressedData; + + CompressedData = NULL; + CompressedDataLength = 0; + + BytesToRewind = 0; + + /* + * Find the first IDAT chunk. + */ + + if(!FindChunk(BF, PNG_ChunkType_IDAT)) + { + return(-1); + } + + /* + * Count the size of the uncompressed data + */ + + while(qtrue) + { + /* + * Read chunk header + */ + + CH = BufferedFileRead(BF, PNG_ChunkHeader_Size); + if(!CH) + { + /* + * Rewind to the start of this adventure + * and return unsuccessfull + */ + + BufferedFileRewind(BF, BytesToRewind); + + return(-1); + } + + /* + * Length and Type of chunk + */ + + Length = BigLong(CH->Length); + Type = BigLong(CH->Type); + + /* + * We have reached the end of the IDAT chunks + */ + + if(!(Type == PNG_ChunkType_IDAT)) + { + BufferedFileRewind(BF, PNG_ChunkHeader_Size); + + break; + } + + /* + * Add chunk header to count. + */ + + BytesToRewind += PNG_ChunkHeader_Size; + + /* + * Skip to next chunk + */ + + if(Length) + { + if(!BufferedFileSkip(BF, Length + PNG_ChunkCRC_Size)) + { + BufferedFileRewind(BF, BytesToRewind); + + return(-1); + } + + BytesToRewind += Length + PNG_ChunkCRC_Size; + CompressedDataLength += Length; + } + } + + BufferedFileRewind(BF, BytesToRewind); + + CompressedData = ri.Malloc(CompressedDataLength); + if(!CompressedData) + { + return(-1); + } + + CompressedDataPtr = CompressedData; + + /* + * Collect the compressed Data + */ + + while(qtrue) + { + /* + * Read chunk header + */ + + CH = BufferedFileRead(BF, PNG_ChunkHeader_Size); + if(!CH) + { + ri.Free(CompressedData); + + return(-1); + } + + /* + * Length and Type of chunk + */ + + Length = BigLong(CH->Length); + Type = BigLong(CH->Type); + + /* + * We have reached the end of the IDAT chunks + */ + + if(!(Type == PNG_ChunkType_IDAT)) + { + BufferedFileRewind(BF, PNG_ChunkHeader_Size); + + break; + } + + /* + * Copy the Data + */ + + if(Length) + { + uint8_t *OrigCompressedData; + + OrigCompressedData = BufferedFileRead(BF, Length); + if(!OrigCompressedData) + { + ri.Free(CompressedData); + + return(-1); + } + + if(!BufferedFileSkip(BF, PNG_ChunkCRC_Size)) + { + ri.Free(CompressedData); + + return(-1); + } + + memcpy(CompressedDataPtr, OrigCompressedData, Length); + CompressedDataPtr += Length; + } + } + + /* + * Let puff() calculate the decompressed data length. + */ + + puffDest = NULL; + puffDestLen = 0; + + /* + * The zlib header and checkvalue don't belong to the compressed data. + */ + + puffSrc = CompressedData + PNG_ZlibHeader_Size; + puffSrcLen = CompressedDataLength - PNG_ZlibHeader_Size - PNG_ZlibCheckValue_Size; + + /* + * first puff() to calculate the size of the uncompressed data + */ + + puffResult = puff(puffDest, &puffDestLen, puffSrc, &puffSrcLen); + if(!((puffResult == 0) && (puffDestLen > 0))) + { + ri.Free(CompressedData); + + return(-1); + } + + /* + * Allocate the buffer for the uncompressed data. + */ + + DecompressedData = ri.Malloc(puffDestLen); + if(!DecompressedData) + { + ri.Free(CompressedData); + + return(-1); + } + + /* + * Set the input again in case something was changed by the last puff() . + */ + + puffDest = DecompressedData; + puffSrc = CompressedData + PNG_ZlibHeader_Size; + puffSrcLen = CompressedDataLength - PNG_ZlibHeader_Size - PNG_ZlibCheckValue_Size; + + /* + * decompression puff() + */ + + puffResult = puff(puffDest, &puffDestLen, puffSrc, &puffSrcLen); + + /* + * The compressed data is not needed anymore. + */ + + ri.Free(CompressedData); + + /* + * Check if the last puff() was successfull. + */ + + if(!((puffResult == 0) && (puffDestLen > 0))) + { + ri.Free(DecompressedData); + + return(-1); + } + + /* + * Set the output of this function. + */ + + DecompressedDataLength = puffDestLen; + *Buffer = DecompressedData; + + return(DecompressedDataLength); +} + +/* + * the Paeth predictor + */ + +static uint8_t PredictPaeth(uint8_t a, uint8_t b, uint8_t c) +{ + /* + * a == Left + * b == Up + * c == UpLeft + */ + + uint8_t Pr; + int p; + int pa, pb, pc; + + p = ((int) a) + ((int) b) - ((int) c); + pa = abs(p - ((int) a)); + pb = abs(p - ((int) b)); + pc = abs(p - ((int) c)); + + if((pa <= pb) && (pa <= pc)) + { + Pr = a; + } + else if(pb <= pc) + { + Pr = b; + } + else + { + Pr = c; + } + + return(Pr); + +} + +/* + * Reverse the filters. + */ + +static qboolean UnfilterImage(uint8_t *DecompressedData, + uint32_t ImageHeight, + uint32_t BytesPerScanline, + uint32_t BytesPerPixel) +{ + uint8_t *DecompPtr; + uint8_t FilterType; + uint8_t *PixelLeft, *PixelUp, *PixelUpLeft; + uint32_t w, h, p; + + /* + * some zeros for the filters + */ + + uint8_t Zeros[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + + /* + * input verification + */ + + if(!(DecompressedData && BytesPerPixel)) + { + return(qfalse); + } + + /* + * ImageHeight and BytesPerScanline can be zero in small interlaced images. + */ + + if((!ImageHeight) || (!BytesPerScanline)) + { + return(qtrue); + } + + /* + * Set the pointer to the start of the decompressed Data. + */ + + DecompPtr = DecompressedData; + + /* + * Un-filtering is done in place. + */ + + /* + * Go trough all scanlines. + */ + + for(h = 0; h < ImageHeight; h++) + { + /* + * Every scanline starts with a FilterType byte. + */ + + FilterType = *DecompPtr; + DecompPtr++; + + /* + * Left pixel of the first byte in a scanline is zero. + */ + + PixelLeft = Zeros; + + /* + * Set PixelUp to previous line only if we are on the second line or above. + * + * Plus one byte for the FilterType + */ + + if(h > 0) + { + PixelUp = DecompPtr - (BytesPerScanline + 1); + } + else + { + PixelUp = Zeros; + } + + /* + * The pixel left to the first pixel of the previous scanline is zero too. + */ + + PixelUpLeft = Zeros; + + /* + * Cycle trough all pixels of the scanline. + */ + + for(w = 0; w < (BytesPerScanline / BytesPerPixel); w++) + { + /* + * Cycle trough the bytes of the pixel. + */ + + for(p = 0; p < BytesPerPixel; p++) + { + switch(FilterType) + { + case PNG_FilterType_None : + { + /* + * The byte is unfiltered. + */ + + break; + } + + case PNG_FilterType_Sub : + { + DecompPtr[p] += PixelLeft[p]; + + break; + } + + case PNG_FilterType_Up : + { + DecompPtr[p] += PixelUp[p]; + + break; + } + + case PNG_FilterType_Average : + { + DecompPtr[p] += ((uint8_t) ((((uint16_t) PixelLeft[p]) + ((uint16_t) PixelUp[p])) / 2)); + + break; + } + + case PNG_FilterType_Paeth : + { + DecompPtr[p] += PredictPaeth(PixelLeft[p], PixelUp[p], PixelUpLeft[p]); + + break; + } + + default : + { + return(qfalse); + } + } + } + + PixelLeft = DecompPtr; + + /* + * We only have an upleft pixel if we are on the second line or above. + */ + + if(h > 0) + { + PixelUpLeft = DecompPtr - (BytesPerScanline + 1); + } + + /* + * Skip to the next pixel. + */ + + DecompPtr += BytesPerPixel; + + /* + * We only have a previous line if we are on the second line and above. + */ + + if(h > 0) + { + PixelUp = DecompPtr - (BytesPerScanline + 1); + } + } + } + + return(qtrue); +} + +/* + * Convert a raw input pixel to Quake 3 RGA format. + */ + +static qboolean ConvertPixel(struct PNG_Chunk_IHDR *IHDR, + byte *OutPtr, + uint8_t *DecompPtr, + qboolean HasTransparentColour, + uint8_t *TransparentColour, + uint8_t *OutPal) +{ + /* + * input verification + */ + + if(!(IHDR && OutPtr && DecompPtr && TransparentColour && OutPal)) + { + return(qfalse); + } + + switch(IHDR->ColourType) + { + case PNG_ColourType_Grey : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_1 : + case PNG_BitDepth_2 : + case PNG_BitDepth_4 : + { + uint8_t Step; + uint8_t GreyValue; + + Step = 0xFF / ((1 << IHDR->BitDepth) - 1); + + GreyValue = DecompPtr[0] * Step; + + OutPtr[0] = GreyValue; + OutPtr[1] = GreyValue; + OutPtr[2] = GreyValue; + OutPtr[3] = 0xFF; + + /* + * Grey supports full transparency for one specified colour + */ + + if(HasTransparentColour) + { + if(TransparentColour[1] == DecompPtr[0]) + { + OutPtr[3] = 0x00; + } + } + + + break; + } + + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + OutPtr[0] = DecompPtr[0]; + OutPtr[1] = DecompPtr[0]; + OutPtr[2] = DecompPtr[0]; + OutPtr[3] = 0xFF; + + /* + * Grey supports full transparency for one specified colour + */ + + if(HasTransparentColour) + { + if(IHDR->BitDepth == PNG_BitDepth_8) + { + if(TransparentColour[1] == DecompPtr[0]) + { + OutPtr[3] = 0x00; + } + } + else + { + if((TransparentColour[0] == DecompPtr[0]) && (TransparentColour[1] == DecompPtr[1])) + { + OutPtr[3] = 0x00; + } + } + } + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_True : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + { + OutPtr[0] = DecompPtr[0]; + OutPtr[1] = DecompPtr[1]; + OutPtr[2] = DecompPtr[2]; + OutPtr[3] = 0xFF; + + /* + * True supports full transparency for one specified colour + */ + + if(HasTransparentColour) + { + if((TransparentColour[1] == DecompPtr[0]) && + (TransparentColour[3] == DecompPtr[1]) && + (TransparentColour[5] == DecompPtr[2])) + { + OutPtr[3] = 0x00; + } + } + + break; + } + + case PNG_BitDepth_16 : + { + /* + * We use only the upper byte. + */ + + OutPtr[0] = DecompPtr[0]; + OutPtr[1] = DecompPtr[2]; + OutPtr[2] = DecompPtr[4]; + OutPtr[3] = 0xFF; + + /* + * True supports full transparency for one specified colour + */ + + if(HasTransparentColour) + { + if((TransparentColour[0] == DecompPtr[0]) && (TransparentColour[1] == DecompPtr[1]) && + (TransparentColour[2] == DecompPtr[2]) && (TransparentColour[3] == DecompPtr[3]) && + (TransparentColour[4] == DecompPtr[4]) && (TransparentColour[5] == DecompPtr[5])) + { + OutPtr[3] = 0x00; + } + } + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_Indexed : + { + OutPtr[0] = OutPal[DecompPtr[0] * Q3IMAGE_BYTESPERPIXEL + 0]; + OutPtr[1] = OutPal[DecompPtr[0] * Q3IMAGE_BYTESPERPIXEL + 1]; + OutPtr[2] = OutPal[DecompPtr[0] * Q3IMAGE_BYTESPERPIXEL + 2]; + OutPtr[3] = OutPal[DecompPtr[0] * Q3IMAGE_BYTESPERPIXEL + 3]; + + break; + } + + case PNG_ColourType_GreyAlpha : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + { + OutPtr[0] = DecompPtr[0]; + OutPtr[1] = DecompPtr[0]; + OutPtr[2] = DecompPtr[0]; + OutPtr[3] = DecompPtr[1]; + + break; + } + + case PNG_BitDepth_16 : + { + /* + * We use only the upper byte. + */ + + OutPtr[0] = DecompPtr[0]; + OutPtr[1] = DecompPtr[0]; + OutPtr[2] = DecompPtr[0]; + OutPtr[3] = DecompPtr[2]; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_TrueAlpha : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + { + OutPtr[0] = DecompPtr[0]; + OutPtr[1] = DecompPtr[1]; + OutPtr[2] = DecompPtr[2]; + OutPtr[3] = DecompPtr[3]; + + break; + } + + case PNG_BitDepth_16 : + { + /* + * We use only the upper byte. + */ + + OutPtr[0] = DecompPtr[0]; + OutPtr[1] = DecompPtr[2]; + OutPtr[2] = DecompPtr[4]; + OutPtr[3] = DecompPtr[6]; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + default : + { + return(qfalse); + } + } + + return(qtrue); +} + + +/* + * Decode a non-interlaced image. + */ + +static qboolean DecodeImageNonInterlaced(struct PNG_Chunk_IHDR *IHDR, + byte *OutBuffer, + uint8_t *DecompressedData, + uint32_t DecompressedDataLength, + qboolean HasTransparentColour, + uint8_t *TransparentColour, + uint8_t *OutPal) +{ + uint32_t IHDR_Width; + uint32_t IHDR_Height; + uint32_t BytesPerScanline, BytesPerPixel, PixelsPerByte; + uint32_t w, h, p; + byte *OutPtr; + uint8_t *DecompPtr; + + /* + * input verification + */ + + if(!(IHDR && OutBuffer && DecompressedData && DecompressedDataLength && TransparentColour && OutPal)) + { + return(qfalse); + } + + /* + * byte swapping + */ + + IHDR_Width = BigLong(IHDR->Width); + IHDR_Height = BigLong(IHDR->Height); + + /* + * information for un-filtering + */ + + switch(IHDR->ColourType) + { + case PNG_ColourType_Grey : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_1 : + case PNG_BitDepth_2 : + case PNG_BitDepth_4 : + { + BytesPerPixel = 1; + PixelsPerByte = 8 / IHDR->BitDepth; + + break; + } + + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + BytesPerPixel = (IHDR->BitDepth / 8) * PNG_NumColourComponents_Grey; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_True : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + BytesPerPixel = (IHDR->BitDepth / 8) * PNG_NumColourComponents_True; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_Indexed : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_1 : + case PNG_BitDepth_2 : + case PNG_BitDepth_4 : + { + BytesPerPixel = 1; + PixelsPerByte = 8 / IHDR->BitDepth; + + break; + } + + case PNG_BitDepth_8 : + { + BytesPerPixel = PNG_NumColourComponents_Indexed; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_GreyAlpha : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + BytesPerPixel = (IHDR->BitDepth / 8) * PNG_NumColourComponents_GreyAlpha; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_TrueAlpha : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + BytesPerPixel = (IHDR->BitDepth / 8) * PNG_NumColourComponents_TrueAlpha; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + default : + { + return(qfalse); + } + } + + /* + * Calculate the size of one scanline + */ + + BytesPerScanline = (IHDR_Width * BytesPerPixel + (PixelsPerByte - 1)) / PixelsPerByte; + + /* + * Check if we have enough data for the whole image. + */ + + if(!(DecompressedDataLength == ((BytesPerScanline + 1) * IHDR_Height))) + { + return(qfalse); + } + + /* + * Unfilter the image. + */ + + if(!UnfilterImage(DecompressedData, IHDR_Height, BytesPerScanline, BytesPerPixel)) + { + return(qfalse); + } + + /* + * Set the working pointers to the beginning of the buffers. + */ + + OutPtr = OutBuffer; + DecompPtr = DecompressedData; + + /* + * Create the output image. + */ + + for(h = 0; h < IHDR_Height; h++) + { + /* + * Count the pixels on the scanline for those multipixel bytes + */ + + uint32_t CurrPixel; + + /* + * skip FilterType + */ + + DecompPtr++; + + /* + * Reset the pixel count. + */ + + CurrPixel = 0; + + for(w = 0; w < (BytesPerScanline / BytesPerPixel); w++) + { + if(PixelsPerByte > 1) + { + uint8_t Mask; + uint32_t Shift; + uint8_t SinglePixel; + + for(p = 0; p < PixelsPerByte; p++) + { + if(CurrPixel < IHDR_Width) + { + Mask = (1 << IHDR->BitDepth) - 1; + Shift = (PixelsPerByte - 1 - p) * IHDR->BitDepth; + + SinglePixel = ((DecompPtr[0] & (Mask << Shift)) >> Shift); + + if(!ConvertPixel(IHDR, OutPtr, &SinglePixel, HasTransparentColour, TransparentColour, OutPal)) + { + return(qfalse); + } + + OutPtr += Q3IMAGE_BYTESPERPIXEL; + CurrPixel++; + } + } + + } + else + { + if(!ConvertPixel(IHDR, OutPtr, DecompPtr, HasTransparentColour, TransparentColour, OutPal)) + { + return(qfalse); + } + + + OutPtr += Q3IMAGE_BYTESPERPIXEL; + } + + DecompPtr += BytesPerPixel; + } + } + + return(qtrue); +} + +/* + * Decode an interlaced image. + */ + +static qboolean DecodeImageInterlaced(struct PNG_Chunk_IHDR *IHDR, + byte *OutBuffer, + uint8_t *DecompressedData, + uint32_t DecompressedDataLength, + qboolean HasTransparentColour, + uint8_t *TransparentColour, + uint8_t *OutPal) +{ + uint32_t IHDR_Width; + uint32_t IHDR_Height; + uint32_t BytesPerScanline[PNG_Adam7_NumPasses], BytesPerPixel, PixelsPerByte; + uint32_t PassWidth[PNG_Adam7_NumPasses], PassHeight[PNG_Adam7_NumPasses]; + uint32_t WSkip[PNG_Adam7_NumPasses], WOffset[PNG_Adam7_NumPasses], HSkip[PNG_Adam7_NumPasses], HOffset[PNG_Adam7_NumPasses]; + uint32_t w, h, p, a; + byte *OutPtr; + uint8_t *DecompPtr; + uint32_t TargetLength; + + /* + * input verification + */ + + if(!(IHDR && OutBuffer && DecompressedData && DecompressedDataLength && TransparentColour && OutPal)) + { + return(qfalse); + } + + /* + * byte swapping + */ + + IHDR_Width = BigLong(IHDR->Width); + IHDR_Height = BigLong(IHDR->Height); + + /* + * Skip and Offset for the passes. + */ + + WSkip[0] = 8; + WOffset[0] = 0; + HSkip[0] = 8; + HOffset[0] = 0; + + WSkip[1] = 8; + WOffset[1] = 4; + HSkip[1] = 8; + HOffset[1] = 0; + + WSkip[2] = 4; + WOffset[2] = 0; + HSkip[2] = 8; + HOffset[2] = 4; + + WSkip[3] = 4; + WOffset[3] = 2; + HSkip[3] = 4; + HOffset[3] = 0; + + WSkip[4] = 2; + WOffset[4] = 0; + HSkip[4] = 4; + HOffset[4] = 2; + + WSkip[5] = 2; + WOffset[5] = 1; + HSkip[5] = 2; + HOffset[5] = 0; + + WSkip[6] = 1; + WOffset[6] = 0; + HSkip[6] = 2; + HOffset[6] = 1; + + /* + * Calculate the sizes of the passes. + */ + + PassWidth[0] = (IHDR_Width + 7) / 8; + PassHeight[0] = (IHDR_Height + 7) / 8; + + PassWidth[1] = (IHDR_Width + 3) / 8; + PassHeight[1] = (IHDR_Height + 7) / 8; + + PassWidth[2] = (IHDR_Width + 3) / 4; + PassHeight[2] = (IHDR_Height + 3) / 8; + + PassWidth[3] = (IHDR_Width + 1) / 4; + PassHeight[3] = (IHDR_Height + 3) / 4; + + PassWidth[4] = (IHDR_Width + 1) / 2; + PassHeight[4] = (IHDR_Height + 1) / 4; + + PassWidth[5] = (IHDR_Width + 0) / 2; + PassHeight[5] = (IHDR_Height + 1) / 2; + + PassWidth[6] = (IHDR_Width + 0) / 1; + PassHeight[6] = (IHDR_Height + 0) / 2; + + /* + * information for un-filtering + */ + + switch(IHDR->ColourType) + { + case PNG_ColourType_Grey : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_1 : + case PNG_BitDepth_2 : + case PNG_BitDepth_4 : + { + BytesPerPixel = 1; + PixelsPerByte = 8 / IHDR->BitDepth; + + break; + } + + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + BytesPerPixel = (IHDR->BitDepth / 8) * PNG_NumColourComponents_Grey; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_True : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + BytesPerPixel = (IHDR->BitDepth / 8) * PNG_NumColourComponents_True; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_Indexed : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_1 : + case PNG_BitDepth_2 : + case PNG_BitDepth_4 : + { + BytesPerPixel = 1; + PixelsPerByte = 8 / IHDR->BitDepth; + + break; + } + + case PNG_BitDepth_8 : + { + BytesPerPixel = PNG_NumColourComponents_Indexed; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_GreyAlpha : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + BytesPerPixel = (IHDR->BitDepth / 8) * PNG_NumColourComponents_GreyAlpha; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + case PNG_ColourType_TrueAlpha : + { + switch(IHDR->BitDepth) + { + case PNG_BitDepth_8 : + case PNG_BitDepth_16 : + { + BytesPerPixel = (IHDR->BitDepth / 8) * PNG_NumColourComponents_TrueAlpha; + PixelsPerByte = 1; + + break; + } + + default : + { + return(qfalse); + } + } + + break; + } + + default : + { + return(qfalse); + } + } + + /* + * Calculate the size of the scanlines per pass + */ + + for(a = 0; a < PNG_Adam7_NumPasses; a++) + { + BytesPerScanline[a] = (PassWidth[a] * BytesPerPixel + (PixelsPerByte - 1)) / PixelsPerByte; + } + + /* + * Calculate the size of all passes + */ + + TargetLength = 0; + + for(a = 0; a < PNG_Adam7_NumPasses; a++) + { + TargetLength += ((BytesPerScanline[a] + (BytesPerScanline[a] ? 1 : 0)) * PassHeight[a]); + } + + /* + * Check if we have enough data for the whole image. + */ + + if(!(DecompressedDataLength == TargetLength)) + { + return(qfalse); + } + + /* + * Unfilter the image. + */ + + DecompPtr = DecompressedData; + + for(a = 0; a < PNG_Adam7_NumPasses; a++) + { + if(!UnfilterImage(DecompPtr, PassHeight[a], BytesPerScanline[a], BytesPerPixel)) + { + return(qfalse); + } + + DecompPtr += ((BytesPerScanline[a] + (BytesPerScanline[a] ? 1 : 0)) * PassHeight[a]); + } + + /* + * Set the working pointers to the beginning of the buffers. + */ + + DecompPtr = DecompressedData; + + /* + * Create the output image. + */ + + for(a = 0; a < PNG_Adam7_NumPasses; a++) + { + for(h = 0; h < PassHeight[a]; h++) + { + /* + * Count the pixels on the scanline for those multipixel bytes + */ + + uint32_t CurrPixel; + + /* + * skip FilterType + * but only when the pass has a width bigger than zero + */ + + if(BytesPerScanline[a]) + { + DecompPtr++; + } + + /* + * Reset the pixel count. + */ + + CurrPixel = 0; + + for(w = 0; w < (BytesPerScanline[a] / BytesPerPixel); w++) + { + if(PixelsPerByte > 1) + { + uint8_t Mask; + uint32_t Shift; + uint8_t SinglePixel; + + for(p = 0; p < PixelsPerByte; p++) + { + if(CurrPixel < PassWidth[a]) + { + Mask = (1 << IHDR->BitDepth) - 1; + Shift = (PixelsPerByte - 1 - p) * IHDR->BitDepth; + + SinglePixel = ((DecompPtr[0] & (Mask << Shift)) >> Shift); + + OutPtr = OutBuffer + (((((h * HSkip[a]) + HOffset[a]) * IHDR_Width) + ((CurrPixel * WSkip[a]) + WOffset[a])) * Q3IMAGE_BYTESPERPIXEL); + + if(!ConvertPixel(IHDR, OutPtr, &SinglePixel, HasTransparentColour, TransparentColour, OutPal)) + { + return(qfalse); + } + + CurrPixel++; + } + } + + } + else + { + OutPtr = OutBuffer + (((((h * HSkip[a]) + HOffset[a]) * IHDR_Width) + ((w * WSkip[a]) + WOffset[a])) * Q3IMAGE_BYTESPERPIXEL); + + if(!ConvertPixel(IHDR, OutPtr, DecompPtr, HasTransparentColour, TransparentColour, OutPal)) + { + return(qfalse); + } + } + + DecompPtr += BytesPerPixel; + } + } + } + + return(qtrue); +} + +/* + * The PNG loader + */ + +void R_LoadPNG(const char *name, byte **pic, int *width, int *height) +{ + struct BufferedFile *ThePNG; + byte *OutBuffer; + uint8_t *Signature; + struct PNG_ChunkHeader *CH; + uint32_t ChunkHeaderLength; + uint32_t ChunkHeaderType; + struct PNG_Chunk_IHDR *IHDR; + uint32_t IHDR_Width; + uint32_t IHDR_Height; + PNG_ChunkCRC *CRC; + uint8_t *InPal; + uint8_t *DecompressedData; + uint32_t DecompressedDataLength; + uint32_t i; + + /* + * palette with 256 RGBA entries + */ + + uint8_t OutPal[1024]; + + /* + * transparent colour from the tRNS chunk + */ + + qboolean HasTransparentColour = qfalse; + uint8_t TransparentColour[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + + /* + * input verification + */ + + if(!(name && pic)) + { + return; + } + + /* + * Zero out return values. + */ + + *pic = NULL; + + if(width) + { + *width = 0; + } + + if(height) + { + *height = 0; + } + + /* + * Read the file. + */ + + ThePNG = ReadBufferedFile(name); + if(!ThePNG) + { + return; + } + + /* + * Read the siganture of the file. + */ + + Signature = BufferedFileRead(ThePNG, PNG_Signature_Size); + if(!Signature) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Is it a PNG? + */ + + if(memcmp(Signature, PNG_Signature, PNG_Signature_Size)) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read the first chunk-header. + */ + + CH = BufferedFileRead(ThePNG, PNG_ChunkHeader_Size); + if(!CH) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * PNG multi-byte types are in Big Endian + */ + + ChunkHeaderLength = BigLong(CH->Length); + ChunkHeaderType = BigLong(CH->Type); + + /* + * Check if the first chunk is an IHDR. + */ + + if(!((ChunkHeaderType == PNG_ChunkType_IHDR) && (ChunkHeaderLength == PNG_Chunk_IHDR_Size))) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read the IHDR. + */ + + IHDR = BufferedFileRead(ThePNG, PNG_Chunk_IHDR_Size); + if(!IHDR) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read the CRC for IHDR + */ + + CRC = BufferedFileRead(ThePNG, PNG_ChunkCRC_Size); + if(!CRC) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Here we could check the CRC if we wanted to. + */ + + /* + * multi-byte type swapping + */ + + IHDR_Width = BigLong(IHDR->Width); + IHDR_Height = BigLong(IHDR->Height); + + /* + * Check if Width and Height are valid. + */ + + if(!((IHDR_Width > 0) && (IHDR_Height > 0)) + || IHDR_Width > INT_MAX / Q3IMAGE_BYTESPERPIXEL / IHDR_Height) + { + CloseBufferedFile(ThePNG); + + ri.Printf( PRINT_WARNING, "%s: invalid image size\n", name ); + + return; + } + + /* + * Do we need to check if the dimensions of the image are valid for Quake3? + */ + + /* + * Check if CompressionMethod and FilterMethod are valid. + */ + + if(!((IHDR->CompressionMethod == PNG_CompressionMethod_0) && (IHDR->FilterMethod == PNG_FilterMethod_0))) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Check if InterlaceMethod is valid. + */ + + if(!((IHDR->InterlaceMethod == PNG_InterlaceMethod_NonInterlaced) || (IHDR->InterlaceMethod == PNG_InterlaceMethod_Interlaced))) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read palette for an indexed image. + */ + + if(IHDR->ColourType == PNG_ColourType_Indexed) + { + /* + * We need the palette first. + */ + + if(!FindChunk(ThePNG, PNG_ChunkType_PLTE)) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read the chunk-header. + */ + + CH = BufferedFileRead(ThePNG, PNG_ChunkHeader_Size); + if(!CH) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * PNG multi-byte types are in Big Endian + */ + + ChunkHeaderLength = BigLong(CH->Length); + ChunkHeaderType = BigLong(CH->Type); + + /* + * Check if the chunk is a PLTE. + */ + + if(!(ChunkHeaderType == PNG_ChunkType_PLTE)) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Check if Length is divisible by 3 + */ + + if(ChunkHeaderLength % 3) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read the raw palette data + */ + + InPal = BufferedFileRead(ThePNG, ChunkHeaderLength); + if(!InPal) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read the CRC for the palette + */ + + CRC = BufferedFileRead(ThePNG, PNG_ChunkCRC_Size); + if(!CRC) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Set some default values. + */ + + for(i = 0; i < 256; i++) + { + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 0] = 0x00; + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 1] = 0x00; + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 2] = 0x00; + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 3] = 0xFF; + } + + /* + * Convert to the Quake3 RGBA-format. + */ + + for(i = 0; i < (ChunkHeaderLength / 3); i++) + { + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 0] = InPal[i*3+0]; + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 1] = InPal[i*3+1]; + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 2] = InPal[i*3+2]; + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 3] = 0xFF; + } + } + + /* + * transparency information is sometimes stored in a tRNS chunk + */ + + /* + * Let's see if there is a tRNS chunk + */ + + if(FindChunk(ThePNG, PNG_ChunkType_tRNS)) + { + uint8_t *Trans; + + /* + * Read the chunk-header. + */ + + CH = BufferedFileRead(ThePNG, PNG_ChunkHeader_Size); + if(!CH) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * PNG multi-byte types are in Big Endian + */ + + ChunkHeaderLength = BigLong(CH->Length); + ChunkHeaderType = BigLong(CH->Type); + + /* + * Check if the chunk is a tRNS. + */ + + if(!(ChunkHeaderType == PNG_ChunkType_tRNS)) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read the transparency information. + */ + + Trans = BufferedFileRead(ThePNG, ChunkHeaderLength); + if(!Trans) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Read the CRC. + */ + + CRC = BufferedFileRead(ThePNG, PNG_ChunkCRC_Size); + if(!CRC) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Only for Grey, True and Indexed ColourType should tRNS exist. + */ + + switch(IHDR->ColourType) + { + case PNG_ColourType_Grey : + { + if(ChunkHeaderLength != 2) + { + CloseBufferedFile(ThePNG); + + return; + } + + HasTransparentColour = qtrue; + + /* + * Grey can have one colour which is completely transparent. + * This colour is always stored in 16 bits. + */ + + TransparentColour[0] = Trans[0]; + TransparentColour[1] = Trans[1]; + + break; + } + + case PNG_ColourType_True : + { + if(ChunkHeaderLength != 6) + { + CloseBufferedFile(ThePNG); + + return; + } + + HasTransparentColour = qtrue; + + /* + * True can have one colour which is completely transparent. + * This colour is always stored in 16 bits. + */ + + TransparentColour[0] = Trans[0]; + TransparentColour[1] = Trans[1]; + TransparentColour[2] = Trans[2]; + TransparentColour[3] = Trans[3]; + TransparentColour[4] = Trans[4]; + TransparentColour[5] = Trans[5]; + + break; + } + + case PNG_ColourType_Indexed : + { + /* + * Maximum of 256 one byte transparency entries. + */ + + if(ChunkHeaderLength > 256) + { + CloseBufferedFile(ThePNG); + + return; + } + + HasTransparentColour = qtrue; + + /* + * alpha values for palette entries + */ + + for(i = 0; i < ChunkHeaderLength; i++) + { + OutPal[i * Q3IMAGE_BYTESPERPIXEL + 3] = Trans[i]; + } + + break; + } + + /* + * All other ColourTypes should not have tRNS chunks + */ + + default : + { + CloseBufferedFile(ThePNG); + + return; + } + } + } + + /* + * Rewind to the start of the file. + */ + + if(!BufferedFileRewind(ThePNG, -1)) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Skip the signature + */ + + if(!BufferedFileSkip(ThePNG, PNG_Signature_Size)) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Decompress all IDAT chunks + */ + + DecompressedDataLength = DecompressIDATs(ThePNG, &DecompressedData); + if(!(DecompressedDataLength && DecompressedData)) + { + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Allocate output buffer. + */ + + OutBuffer = ri.Malloc(IHDR_Width * IHDR_Height * Q3IMAGE_BYTESPERPIXEL); + if(!OutBuffer) + { + ri.Free(DecompressedData); + CloseBufferedFile(ThePNG); + + return; + } + + /* + * Interlaced and Non-interlaced images need to be handled differently. + */ + + switch(IHDR->InterlaceMethod) + { + case PNG_InterlaceMethod_NonInterlaced : + { + if(!DecodeImageNonInterlaced(IHDR, OutBuffer, DecompressedData, DecompressedDataLength, HasTransparentColour, TransparentColour, OutPal)) + { + ri.Free(OutBuffer); + ri.Free(DecompressedData); + CloseBufferedFile(ThePNG); + + return; + } + + break; + } + + case PNG_InterlaceMethod_Interlaced : + { + if(!DecodeImageInterlaced(IHDR, OutBuffer, DecompressedData, DecompressedDataLength, HasTransparentColour, TransparentColour, OutPal)) + { + ri.Free(OutBuffer); + ri.Free(DecompressedData); + CloseBufferedFile(ThePNG); + + return; + } + + break; + } + + default : + { + ri.Free(OutBuffer); + ri.Free(DecompressedData); + CloseBufferedFile(ThePNG); + + return; + } + } + + /* + * update the pointer to the image data + */ + + *pic = OutBuffer; + + /* + * Fill width and height. + */ + + if(width) + { + *width = IHDR_Width; + } + + if(height) + { + *height = IHDR_Height; + } + + /* + * DecompressedData is not needed anymore. + */ + + ri.Free(DecompressedData); + + /* + * We have all data, so close the file. + */ + + CloseBufferedFile(ThePNG); +} diff --git a/code/renderer_vulkan/R_ImageProcess.c b/code/renderer_vulkan/R_ImageProcess.c new file mode 100644 index 0000000000..fecf9b3feb --- /dev/null +++ b/code/renderer_vulkan/R_ImageProcess.c @@ -0,0 +1,534 @@ +#include "tr_cvar.h" +#include "ref_import.h" +#include "vk_image.h" +#include "R_ImageProcess.h" + + +static unsigned char s_intensitytable[256]; +static unsigned char s_gammatable[256]; + +/* +void R_GammaCorrect(unsigned char* buffer, const unsigned int Size) +{ + unsigned int i; + + for( i = 0; i < Size; i++ ) + { + buffer[i] = s_gammatable[buffer[i]]; + } +} +*/ + +void R_SetColorMappings( void ) +{ + int i, j; + int inf; + int shift = 0; + + for (i = 0; i < 255; i++) + { + s_intensitytable[i] = s_gammatable[i] = i; + } + + float g = r_gamma->value; + + for ( i = 0; i < 256; i++ ) { + if ( g == 1 ) { + inf = i; + } else { + inf = 255 * pow ( i/255.0f, 1.0f / g ) + 0.5f; + } + inf <<= shift; + if (inf < 0) { + inf = 0; + } + else if (inf > 255) { + inf = 255; + } + s_gammatable[i] = inf; + } + + + if ( r_intensity->value <= 1 ) { + ri.Cvar_Set( "r_intensity", "1" ); + } + + for (i=0 ; i<256 ; i++) + { + j = i * r_intensity->value; + if (j > 255) { + j = 255; + } + s_intensitytable[i] = j; + } + +} + + +/* +================ +Scale up the pixel values in a texture to increase the lighting range +================ +*/ +void R_LightScaleTexture (unsigned char* dst, unsigned char* in, unsigned int nBytes) +{ + unsigned int i; + + if ( 0 ) + { + for (i=0; i> 9; + data[1] = ( data[1] * inverseAlpha + bG ) >> 9; + data[2] = ( data[2] * inverseAlpha + bB ) >> 9; + } +} + + +/* +================ +R_MipMap + +Operates in place, quartering the size of the texture, no error checking +================ +*/ +void R_MipMap(const unsigned char* in, uint32_t width, uint32_t height, unsigned char* out) +{ + + if ( (width == 1) && (height == 1) ) + { + out[0] = in[0]; + return; + } + uint32_t i; + + const unsigned int row = width * 4; + width >>= 1; + height >>= 1; + + if ( (width == 0) || (height == 0) ) + { + width += height; // get largest + for (i=0; i>1; + out[1] = ( in[1] + in[5] )>>1; + out[2] = ( in[2] + in[6] )>>1; + out[3] = ( in[3] + in[7] )>>1; + } + } + else + { + for (i=0; i>2; + out[1] = (in[1] + in[5] + in[row+1] + in[row+5])>>2; + out[2] = (in[2] + in[6] + in[row+2] + in[row+6])>>2; + out[3] = (in[3] + in[7] + in[row+3] + in[row+7])>>2; + } + } + } +} + + +/* +================ +R_MipMap2 + +Operates in place, quartering the size of the texture +Proper linear filter, no error checking +================ +*/ +void R_MipMap2(const unsigned char* in, uint32_t inWidth, uint32_t inHeight, unsigned char* out) +{ + + int i, j; + + if ( (inWidth == 1) && (inHeight == 1) ) + { + out[0] = in[0]; + return; + } + //ri.Printf (PRINT_ALL, "\n---R_MipMap2---\n"); + // Not run time funs, can be used for best view effects + + unsigned int outWidth = inWidth >> 1; + unsigned int outHeight = inHeight >> 1; + unsigned int nBytes = outWidth * outHeight * 4; + + unsigned char * temp = (unsigned char *)malloc( nBytes ); + + const unsigned int inWidthMask = inWidth - 1; + const unsigned int inHeightMask = inHeight - 1; + + for ( i = 0 ; i < outHeight ; i++ ) + { + for ( j = 0 ; j < outWidth ; j++ ) + { + unsigned int outIndex = i * outWidth + j; + unsigned int k; + for ( k = 0 ; k < 4 ; k++ ) + { + unsigned int r0 = ((i*2-1) & inHeightMask) * inWidth; + unsigned int r1 = ((i*2 ) & inHeightMask) * inWidth; + unsigned int r2 = ((i*2+1) & inHeightMask) * inWidth; + unsigned int r3 = ((i*2+2) & inHeightMask) * inWidth; + + unsigned int c0 = ((j*2-1) & inWidthMask); + unsigned int c1 = ((j*2 ) & inWidthMask); + unsigned int c2 = ((j*2+1) & inWidthMask); + unsigned int c3 = ((j*2+2) & inWidthMask); + + + unsigned int total = + 1 * in[(r0 + c0) * 4 + k] + + 2 * in[(r0 + c1) * 4 + k] + + 2 * in[(r0 + c2) * 4 + k] + + 1 * in[(r0 + c3) * 4 + k] + + + 2 * in[(r1 + c0) * 4 + k] + + 4 * in[(r1 + c1) * 4 + k] + + 4 * in[(r1 + c2) * 4 + k] + + 2 * in[(r1 + c3) * 4 + k] + + + 2 * in[(r2 + c0) * 4 + k] + + 4 * in[(r2 + c1) * 4 + k] + + 4 * in[(r2 + c2) * 4 + k] + + 2 * in[(r2 + c3) * 4 + k] + + + 1 * in[(r3 + c0) * 4 + k] + + 2 * in[(r3 + c1) * 4 + k] + + 2 * in[(r3 + c2) * 4 + k] + + 1 * in[(r3 + c3) * 4 + k] ; + + temp[4*outIndex + k] = total / 36; + } + } + } + + memcpy( out, temp, nBytes ); + free( temp ); +} + +/* +================ + +Used to resample images in a more general than quartering fashion. + +This will only be filtered properly if the resampled size +is greater than half the original size. + +If a larger shrinking is needed, use the mipmap function before or after. +================ +*/ + +void ResampleTexture(unsigned char * pOut, const unsigned int inwidth, const unsigned int inheight, + const unsigned char *pIn, const unsigned int outwidth, const unsigned int outheight) +{ + unsigned int i, j; + unsigned int p1[2048], p2[2048]; + + // printf("inwidth: %d \t outwidth: %d \n", inwidth, outwidth); + + unsigned int fracstep = (inwidth << 16)/outwidth; + + unsigned int frac1 = fracstep>>2; + unsigned int frac2 = 3*(fracstep>>2); + + for(i=0; i>16); + frac1 += fracstep; + + p2[i] = 4*(frac2>>16); + frac2 += fracstep; + } + + + for (i=0; i>2; + pCurPix[1] = (pix1[1] + pix2[1] + pix3[1] + pix4[1])>>2; + pCurPix[2] = (pix1[2] + pix2[2] + pix3[2] + pix4[2])>>2; + pCurPix[3] = (pix1[3] + pix2[3] + pix3[3] + pix4[3])>>2; + } + + pOut += outwidth*4; + } +} + + +void GetScaledDimension(const unsigned int width, const unsigned int height, unsigned int * const outW, unsigned int * const outH, int isPicMip) +{ + const unsigned int max_texture_size = 2048; + + unsigned int scaled_width, scaled_height; + + for(scaled_width = max_texture_size; scaled_width > width; scaled_width>>=1) + ; + + for (scaled_height = max_texture_size; scaled_height > height; scaled_height>>=1) + ; + + // perform optional picmip operation + if ( isPicMip ) + { + scaled_width >>= r_picmip->integer; + scaled_height >>= r_picmip->integer; + } + + // clamp to minimum size + if (scaled_width == 0) { + scaled_width = 1; + } + if (scaled_height == 0) { + scaled_height = 1; + } + + *outW = scaled_width; + *outH = scaled_height; +} + + +/////////////////////////////////////////////////////////////////////////////////////////////// +// DEBUG HELPER FUNCTIONAS ... +/////////////////////////////////////////////////////////////////////////////////////////////// + +void imsave(char *fileName, unsigned char* buffer2, unsigned int width, unsigned int height); +void fsWriteFile( const char *qpath, const void *buffer, int size ); + + +void fsWriteFile( const char *qpath, const void *buffer, int size ) +{ + + unsigned char* buf = (unsigned char *)buffer; + + FILE * f = fopen( qpath, "wb" ); + if ( !f ) + { + fprintf(stderr, "Failed to open %s\n", qpath ); + return; + } + + int remaining = size; + int tries = 0; + int block = 0; + int written = 0; + + while (remaining) + { + block = remaining; + written = fwrite (buf, 1, block, f); + if (written == 0) + { + if (!tries) + { + tries = 1; + } + else + { + fprintf(stderr, "FS_Write: 0 bytes written\n" ); + return; + } + } + + if (written == -1) + { + fprintf(stderr, "FS_Write: -1 bytes written\n" ); + return; + } + + remaining -= written; + buf += written; + } + + //FS_Write( buffer, size, f ); + + fclose(f); +} + + +void imsave(char *fileName, unsigned char* buffer2, unsigned int width, unsigned int height) +{ + + const unsigned int cnPixels = width * height; + + unsigned char* buffer = (unsigned char*) malloc( cnPixels * 3 + 18); + + memset (buffer, 0, 18); + buffer[2] = 2; // uncompressed type + buffer[12] = width & 255; + buffer[13] = width >> 8; + buffer[14] = height & 255; + buffer[15] = height >> 8; + buffer[16] = 24; // pixel size + + unsigned char* buffer_ptr = buffer + 18; + unsigned char* buffer2_ptr = buffer2; + + unsigned int i; + for (i = 0; i < cnPixels; i++) + { + buffer_ptr[0] = buffer2_ptr[0]; + buffer_ptr[1] = buffer2_ptr[1]; + buffer_ptr[2] = buffer2_ptr[2]; + buffer_ptr += 3; + buffer2_ptr += 4; + } + + + // swap rgb to bgr + const unsigned int c = 18 + width * height * 3; + for (i=18; i 0x7FFFFFFF || numPixels / columns / 4 != rows) + { + ri.Error (ERR_DROP, "LoadTGA: %s has an invalid image size", name); + } + + + targa_rgba = ri.Malloc (numPixels); + + if (targa_header.id_length != 0) + { + if (buf_p + targa_header.id_length > end) + ri.Error( ERR_DROP, "LoadTGA: header too short (%s)", name ); + + buf_p += targa_header.id_length; // skip TARGA image comment + } + + if ( targa_header.image_type==2 || targa_header.image_type == 3 ) + { + if(buf_p + columns*rows*targa_header.pixel_size/8 > end) + { + ri.Error (ERR_DROP, "LoadTGA: file truncated (%s)", name); + } + + // Uncompressed RGB or gray scale image + for(row=rows-1; row>=0; row--) + { + pixbuf = targa_rgba + row*columns*4; + for(column=0; column=0; row--) { + pixbuf = targa_rgba + row*columns*4; + for(column=0; column end) + ri.Error (ERR_DROP, "LoadTGA: file truncated (%s)", name); + packetHeader= *buf_p++; + packetSize = 1 + (packetHeader & 0x7f); + if (packetHeader & 0x80) { // run-length packet + if(buf_p + targa_header.pixel_size/8 > end) + ri.Error (ERR_DROP, "LoadTGA: file truncated (%s)", name); + switch (targa_header.pixel_size) { + case 24: + blue = *buf_p++; + green = *buf_p++; + red = *buf_p++; + alphabyte = 255; + break; + case 32: + blue = *buf_p++; + green = *buf_p++; + red = *buf_p++; + alphabyte = *buf_p++; + break; + default: + ri.Error( ERR_DROP, "LoadTGA: illegal pixel_size '%d' in file '%s'", targa_header.pixel_size, name ); + break; + } + + for(j=0;j0) + row--; + else + goto breakOut; + pixbuf = targa_rgba + row*columns*4; + } + } + } + else { // non run-length packet + + if(buf_p + targa_header.pixel_size/8*packetSize > end) + ri.Error (ERR_DROP, "LoadTGA: file truncated (%s)", name); + for(j=0;j0) + row--; + else + goto breakOut; + pixbuf = targa_rgba + row*columns*4; + } + } + } + } + breakOut:; + } + } + +#if 0 + // TTimo: this is the chunk of code to ensure a behavior that meets TGA specs + // bit 5 set => top-down + if (targa_header.attributes & 0x20) { + unsigned char *flip = (unsigned char*)malloc (columns*4); + unsigned char *src, *dst; + + for (row = 0; row < rows/2; row++) { + src = targa_rgba + row * 4 * columns; + dst = targa_rgba + (rows - row - 1) * 4 * columns; + + memcpy (flip, src, columns*4); + memcpy (src, dst, columns*4); + memcpy (dst, flip, columns*4); + } + free (flip); + } +#endif + // instead we just print a warning + if (targa_header.attributes & 0x20) { + ri.Printf( PRINT_WARNING, "WARNING: '%s' TGA file header declares top-down image, ignoring\n", name); + } + + if (width) + *width = columns; + if (height) + *height = rows; + + *pic = targa_rgba; + + ri.FS_FreeFile (buffer); +} diff --git a/code/renderer_vulkan/R_LerpTag.c b/code/renderer_vulkan/R_LerpTag.c new file mode 100644 index 0000000000..57da5c4271 --- /dev/null +++ b/code/renderer_vulkan/R_LerpTag.c @@ -0,0 +1,177 @@ +#include "tr_local.h" +#include "tr_model.h" +#include "ref_import.h" + +static md3Tag_t *R_GetTag( md3Header_t *mod, int frame, const char *tagName ) { + md3Tag_t *tag; + int i; + + if ( frame >= mod->numFrames ) { + // it is possible to have a bad frame while changing models, so don't error + frame = mod->numFrames - 1; + } + + tag = (md3Tag_t *)((byte *)mod + mod->ofsTags) + frame * mod->numTags; + for ( i = 0 ; i < mod->numTags ; i++, tag++ ) { + if ( !strcmp( tag->name, tagName ) ) { + return tag; // found it + } + } + + return NULL; +} + +static md3Tag_t *R_GetAnimTag( mdrHeader_t *mod, int framenum, const char *tagName, md3Tag_t * dest) +{ + int i, j, k; + int frameSize; + mdrFrame_t *frame; + mdrTag_t *tag; + + if ( framenum >= mod->numFrames ) + { + // it is possible to have a bad frame while changing models, so don't error + framenum = mod->numFrames - 1; + } + + tag = (mdrTag_t *)((byte *)mod + mod->ofsTags); + for ( i = 0 ; i < mod->numTags ; i++, tag++ ) + { + if ( !strcmp( tag->name, tagName ) ) + { + Q_strncpyz(dest->name, tag->name, sizeof(dest->name)); + + // uncompressed model... + // + frameSize = (intptr_t)( &((mdrFrame_t *)0)->bones[ mod->numBones ] ); + frame = (mdrFrame_t *)((byte *)mod + mod->ofsFrames + framenum * frameSize ); + + for (j = 0; j < 3; j++) + { + for (k = 0; k < 3; k++) + dest->axis[j][k]=frame->bones[tag->boneIndex].matrix[k][j]; + } + + dest->origin[0]=frame->bones[tag->boneIndex].matrix[0][3]; + dest->origin[1]=frame->bones[tag->boneIndex].matrix[1][3]; + dest->origin[2]=frame->bones[tag->boneIndex].matrix[2][3]; + + return dest; + } + } + + return NULL; +} + +static void Matrix34Multiply_OnlySetOrigin( float *a, float *b, float *out ) { + out[ 3] = a[0] * b[3] + a[1] * b[7] + a[ 2] * b[11] + a[ 3]; + out[ 7] = a[4] * b[3] + a[5] * b[7] + a[ 6] * b[11] + a[ 7]; + out[11] = a[8] * b[3] + a[9] * b[7] + a[10] * b[11] + a[11]; +} + + + +static void ComputeJointMats( iqmData_t *data, int frame, int oldframe, + float backlerp, float *mat ) { + float *mat1; + int i; + + ComputePoseMats( data, frame, oldframe, backlerp, mat ); + + for( i = 0; i < data->num_joints; i++ ) { + float outmat[12]; + mat1 = mat + 12 * i; + + memcpy(outmat, mat1, sizeof(outmat)); + + Matrix34Multiply_OnlySetOrigin( outmat, data->jointMats + 12 * i, mat1 ); + } +} + + +static int R_IQMLerpTag( orientation_t *tag, iqmData_t *data, + int startFrame, int endFrame, + float frac, const char *tagName ) { + float jointMats[IQM_MAX_JOINTS * 12]; + int joint; + char *names = data->names; + + // get joint number by reading the joint names + for( joint = 0; joint < data->num_joints; joint++ ) { + if( !strcmp( tagName, names ) ) + break; + names += strlen( names ) + 1; + } + if( joint >= data->num_joints ) { + memset(tag, 0, sizeof(orientation_t)); + + return qfalse; + } + + ComputeJointMats( data, startFrame, endFrame, frac, jointMats ); + + tag->axis[0][0] = jointMats[12 * joint + 0]; + tag->axis[1][0] = jointMats[12 * joint + 1]; + tag->axis[2][0] = jointMats[12 * joint + 2]; + tag->origin[0] = jointMats[12 * joint + 3]; + tag->axis[0][1] = jointMats[12 * joint + 4]; + tag->axis[1][1] = jointMats[12 * joint + 5]; + tag->axis[2][1] = jointMats[12 * joint + 6]; + tag->origin[1] = jointMats[12 * joint + 7]; + tag->axis[0][2] = jointMats[12 * joint + 8]; + tag->axis[1][2] = jointMats[12 * joint + 9]; + tag->axis[2][2] = jointMats[12 * joint + 10]; + tag->origin[2] = jointMats[12 * joint + 11]; + + return qtrue; +} + + +int RE_LerpTag( orientation_t *tag, qhandle_t handle, int startFrame, int endFrame, + float frac, const char *tagName ) +{ + + // ri.Printf(PRINT_ALL, "R_LerpTag\n"); + + md3Tag_t *start, *end; + md3Tag_t start_space, end_space; + model_t* model = R_GetModelByHandle( handle ); + if ( !model->md3[0] ) + { + if(model->type == MOD_MDR) + { + start = R_GetAnimTag((mdrHeader_t *) model->modelData, startFrame, tagName, &start_space); + end = R_GetAnimTag((mdrHeader_t *) model->modelData, endFrame, tagName, &end_space); + } + else if( model->type == MOD_IQM ) { + return R_IQMLerpTag( tag, model->modelData, + startFrame, endFrame, + frac, tagName ); + } else { + start = end = NULL; + } + } + else + { + start = R_GetTag( model->md3[0], startFrame, tagName ); + end = R_GetTag( model->md3[0], endFrame, tagName ); + } + + if ( !start || !end ) { + memset(tag, 0, sizeof(orientation_t)); + return qfalse; + } + + + uint32_t i; + for ( i = 0 ; i < 3 ; i++ ) { + tag->origin[i] = start->origin[i] + (end->origin[i] - start->origin[i]) * frac; + tag->axis[0][i] = start->axis[0][i] + (end->axis[0][i] - start->axis[0][i]) * frac; + tag->axis[1][i] = start->axis[1][i] + (end->axis[1][i] - start->axis[1][i]) * frac; + tag->axis[2][i] = start->axis[2][i] + (end->axis[2][i] - start->axis[2][i]) * frac; + } + VectorNormalize( tag->axis[0] ); + VectorNormalize( tag->axis[1] ); + VectorNormalize( tag->axis[2] ); + return qtrue; +} diff --git a/code/renderer_vulkan/R_ListShader.c b/code/renderer_vulkan/R_ListShader.c new file mode 100644 index 0000000000..cabf1d315e --- /dev/null +++ b/code/renderer_vulkan/R_ListShader.c @@ -0,0 +1,64 @@ +#include "tr_globals.h" +#include "tr_shader.h" +#include "ref_import.h" + +/* +=============== +Dump information on all valid shaders to the console +A second parameter will cause it to print in sorted order +=============== +*/ +void R_ShaderList_f (void) +{ + int i; + int count = 0; + shader_t* pShader; + + ri.Printf (PRINT_ALL, "-----------------------\n"); + + for ( i = 0 ; i < tr.numShaders ; i++ ) + { + if ( ri.Cmd_Argc() > 1 ) + pShader = tr.sortedShaders[i]; + else + pShader = tr.shaders[i]; + + ri.Printf( PRINT_ALL, "%i ", pShader->numUnfoggedPasses ); + + if (pShader->lightmapIndex >= 0 ) + ri.Printf (PRINT_ALL, "L "); + else + ri.Printf (PRINT_ALL, " "); + + + if ( pShader->multitextureEnv == GL_ADD ) + ri.Printf( PRINT_ALL, "MT(a) " ); + else if ( pShader->multitextureEnv == GL_MODULATE ) + ri.Printf( PRINT_ALL, "MT(m) " ); + else + ri.Printf( PRINT_ALL, " " ); + + + if ( pShader->explicitlyDefined ) + ri.Printf( PRINT_ALL, "E " ); + else + ri.Printf( PRINT_ALL, " " ); + + + if ( !pShader->isSky ) + ri.Printf( PRINT_ALL, "gen " ); + else + ri.Printf( PRINT_ALL, "sky " ); + + + if ( pShader->defaultShader ) + ri.Printf (PRINT_ALL, ": %s (DEFAULTED)\n", pShader->name); + else + ri.Printf (PRINT_ALL, ": %s\n", pShader->name); + + count++; + } + ri.Printf (PRINT_ALL, "%i total shaders\n", count); + ri.Printf (PRINT_ALL, "------------------\n"); +} + diff --git a/code/renderer_vulkan/R_LoadImage.c b/code/renderer_vulkan/R_LoadImage.c new file mode 100644 index 0000000000..6cf38ff09e --- /dev/null +++ b/code/renderer_vulkan/R_LoadImage.c @@ -0,0 +1,131 @@ +#include "tr_local.h" +#include "ref_import.h" +#include "image_loader.h" + +// Description: Loads any of the supported image types into +// a cannonical 32 bit format. + + +typedef struct +{ + char *ext; + void (*ImageLoader)( const char *, unsigned char **, int *, int * ); +} imageExtToLoaderMap_t; + +// Note that the ordering indicates the order of preference used +// when there are multiple images of different formats available +const static imageExtToLoaderMap_t imageLoaders[6] = +{ + { "tga", R_LoadTGA }, + { "jpg", R_LoadJPG }, + { "jpeg", R_LoadJPG }, + { "png", R_LoadPNG }, + { "pcx", R_LoadPCX }, + { "bmp", R_LoadBMP } +}; + +const static int numImageLoaders = 6; + + +void R_LoadImage(const char *name, unsigned char **pic, uint32_t *width, uint32_t *height ) +{ + int orgLoader = -1; + int i; + char localName[ 128 ] = {0}; + int iwidth = 0, iheight = 0; + + const char* pSrc = name; + char* pDst = localName; + char* pExt = NULL; + + *pic = NULL; + *width = 0; + *height = 0; + + + char c; + + // copy name to localName + while( (c = *pDst++ = *pSrc++) ) + { + + if(c == '.') + pExt = pDst; +// else if(c == '/') +// slash = pDst-1; + } + + if( pExt != NULL ) + { + // Look for the correct loader and use it + for( i = 0; i < numImageLoaders; i++ ) + { + if( !Q_stricmp( pExt, imageLoaders[ i ].ext ) ) + { + orgLoader = i; + + // Load + imageLoaders[ i ].ImageLoader( localName, pic, &iwidth, &iheight ); + if( *pic != NULL ) + { + // Something loaded + *width = (uint32_t)iwidth; + *height = (uint32_t)iheight; + return; + } + } + } + + // Loader failed, most likely because the file isn't there; + // Try and find a suitable match using all the image formats supported + + for( i = 0; i < numImageLoaders; i++ ) + { + if (i == orgLoader) + continue; + + strcpy(pExt, imageLoaders[ i ].ext); + + // Load + imageLoaders[ i ].ImageLoader( localName, pic, &iwidth, &iheight ); + + if( *pic != NULL ) + { + // Something loaded + *width = (uint32_t)iwidth; + *height = (uint32_t)iheight; + return; + } + } + + ri.Printf( PRINT_WARNING, "%s not present\n", localName ); + + } + else + { + + // Try and find a suitable match using all the image formats supported + *(pDst-1) = '.'; + + for( i = 0; i < numImageLoaders; i++ ) + { + strcpy(pDst, imageLoaders[ i ].ext); + // Load + imageLoaders[ i ].ImageLoader( localName, pic, &iwidth, &iheight ); + + if( *pic != NULL ) + { + ri.Printf( PRINT_WARNING, "%s without a extension, using %s instead. \n", + name, localName); + *width = (uint32_t)iwidth; + *height = (uint32_t)iheight; + return; + } + } + } + + ri.Printf( PRINT_WARNING, "%s not present.\n", name); + + // try again without the extension + +} diff --git a/code/renderer_vulkan/R_LoadImage2.c b/code/renderer_vulkan/R_LoadImage2.c new file mode 100644 index 0000000000..6fe24e1ddf --- /dev/null +++ b/code/renderer_vulkan/R_LoadImage2.c @@ -0,0 +1,737 @@ +#include "tr_local.h" +#include "ref_import.h" + +static void* q3_stbi_malloc(size_t size) { + return ri.Malloc((int)size); +} +static void q3_stbi_free(void* p) { + ri.Free(p); +} +static void* q3_stbi_realloc(void* p, size_t old_size, size_t new_size) { + if (p == NULL) + return q3_stbi_malloc(new_size); + + void* p_new; + if (old_size < new_size) { + p_new = q3_stbi_malloc(new_size); + memcpy(p_new, p, old_size); + q3_stbi_free(p); + } else { + p_new = p; + } + return p_new; +} +#define STBI_MALLOC q3_stbi_malloc +#define STBI_FREE q3_stbi_free +#define STBI_REALLOC_SIZED q3_stbi_realloc +#define STB_IMAGE_IMPLEMENTATION +// stb_image.h uses 'ri' as a local variable name which shadows +// the global refimport_t ri declared in ref_import.h +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#endif +#include "stb_image.h" +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif + + + +static void LoadTGA( const char* name, unsigned char** pic, uint32_t* width, uint32_t* height); +static void LoadBMP( const char* name, unsigned char** pic, uint32_t* width, uint32_t* height); +static void LoadJPG( const char* name, unsigned char** pic, uint32_t* width, uint32_t* height); +static void LoadPCX32 ( const char* filename, unsigned char** pic, uint32_t* width, uint32_t* height); + + + +/* + Loads any of the supported image types into a cannonical 32 bit format. +*/ +void R_LoadImage2(const char *name, unsigned char **pic, uint32_t* width, uint32_t* height) +{ + + const int len = (int)strlen(name); + if (len<5) + { + ri.Printf( PRINT_WARNING, "R_LoadImage2: try to loading %s ? \n", name ); + return; + } + + // point to '.', .jped are assume not exist + const char* const pPnt = (char*)name + len - 4; + + if(pPnt[0] == '.') + { // have a extension + if( ( (pPnt[1] == 't') && (pPnt[2] == 'g') && (pPnt[3] == 'a') ) || + ( (pPnt[1] == 'T') && (pPnt[2] == 'G') && (pPnt[3] == 'A') ) ) + { + LoadTGA( name, pic, width, height ); + + if (NULL == *pic) + { + // try jpg in place of tga + char altname[128] = {0}; + strncpy( altname, name, 128 ); + char* pt = altname + len - 4; + + pt[1] = 'j'; + pt[2] = 'p'; + pt[3] = 'g'; + + LoadJPG( altname, pic, width, height ); + } + } + else if ( ( (pPnt[1] == 'j') && (pPnt[2] == 'p') && (pPnt[3] == 'g') ) || + ( (pPnt[1] == 'J') && (pPnt[2] == 'P') && (pPnt[3] == 'G') ) ) + { + LoadJPG( name, pic, width, height ); + } + else if ( ( (pPnt[1] == 'b') && (pPnt[2] == 'm') && (pPnt[3] == 'p') ) || + ( (pPnt[1] == 'B') && (pPnt[2] == 'M') && (pPnt[3] == 'P') ) ) + + { + LoadBMP( name, pic, width, height ); + } + else if ( ( (pPnt[1] == 'p') && (pPnt[2] == 'c') && (pPnt[3] == 'x') ) || + ( (pPnt[1] == 'P') && (pPnt[2] == 'C') && (pPnt[3] == 'X') ) ) + { + LoadPCX32( name, pic, width, height ); + } + } + else + { // without a extension + // Try and find a suitable match using all the image formats supported + char altname[128] = {0}; + strcpy( altname, name ); + char* pt = altname + len; + pt[0] = '.'; + pt[1] = 't'; + pt[2] = 'g'; + pt[3] = 'a'; + pt[4] = '\0'; + + LoadTGA( altname, pic, width, height ); + + if (NULL == *pic) + { + // try jpg in place of tga + pt[0] = '.'; + pt[1] = 'j'; + pt[2] = 'p'; + pt[3] = 'g'; + pt[4] = '\0'; + + LoadJPG( altname, pic, width, height ); + } + + if( *pic == NULL ) + { + ri.Printf( PRINT_WARNING, "%s not present.\n", name); + } + // else + // ri.Printf( PRINT_ALL, "%s without a extension, using %s instead. \n", name, altname); + + } +} + + + + +// ====================================== // +// ====================================== // + +typedef struct _TargaHeader { + unsigned char id_length, colormap_type, image_type; + unsigned short colormap_index, colormap_length; + unsigned char colormap_size; + unsigned short x_origin, y_origin, width, height; + unsigned char pixel_size, attributes; +} TargaHeader; + + + +static void LoadTGA( const char* name, unsigned char** pic, uint32_t* width, uint32_t* height) +{ + int columns, rows, numPixels; + unsigned char* pixbuf; + int row, column; + + char* buffer; + TargaHeader targa_header; + + *pic = NULL; + + // + // load the file + // + ri.FS_ReadFile(name, (void**)&buffer); + if (!buffer) { + return; + } + + char* buf_p = buffer; + + targa_header.id_length = *buf_p++; + targa_header.colormap_type = *buf_p++; + targa_header.image_type = *buf_p++; + + targa_header.colormap_index = LittleShort ( *(short *)buf_p ); + buf_p += 2; + targa_header.colormap_length = LittleShort ( *(short *)buf_p ); + buf_p += 2; + targa_header.colormap_size = *buf_p++; + targa_header.x_origin = LittleShort ( *(short *)buf_p ); + buf_p += 2; + targa_header.y_origin = LittleShort ( *(short *)buf_p ); + buf_p += 2; + targa_header.width = LittleShort ( *(short *)buf_p ); + buf_p += 2; + targa_header.height = LittleShort ( *(short *)buf_p ); + buf_p += 2; + targa_header.pixel_size = *buf_p++; + targa_header.attributes = *buf_p++; + + if (targa_header.image_type!=2 + && targa_header.image_type!=10 + && targa_header.image_type != 3 ) + { + ri.Error (ERR_DROP, "LoadTGA: Only type 2 (RGB), 3 (gray), and 10 (RGB) TGA images supported\n"); + } + + if ( targa_header.colormap_type != 0 ) + { + ri.Error( ERR_DROP, "LoadTGA: colormaps not supported\n" ); + } + + if ( ( targa_header.pixel_size != 32 && targa_header.pixel_size != 24 ) && targa_header.image_type != 3 ) + { + ri.Error (ERR_DROP, "LoadTGA: Only 32 or 24 bit images supported (no colormaps)\n"); + } + + columns = targa_header.width; + rows = targa_header.height; + numPixels = columns * rows; + + if (width) + *width = columns; + if (height) + *height = rows; + + unsigned char* targa_rgba = (unsigned char*) ri.Malloc (numPixels*4); + *pic = targa_rgba; + + if (targa_header.id_length != 0) + buf_p += targa_header.id_length; // skip TARGA image comment + + if ( targa_header.image_type==2 || targa_header.image_type == 3 ) + { + // Uncompressed RGB or gray scale image + for(row=rows-1; row>=0; row--) + { + pixbuf = targa_rgba + row*columns*4; + for(column=0; column=0; row--) { + pixbuf = targa_rgba + row*columns*4; + for(column=0; column0) + row--; + else + goto breakOut; + pixbuf = targa_rgba + row*columns*4; + } + } + } + else { // non run-length packet + for(j=0;j0) + row--; + else + goto breakOut; + pixbuf = targa_rgba + row*columns*4; + } + } + } + } + breakOut:; + } + } + +#if 0 + // TTimo: this is the chunk of code to ensure a behavior that meets TGA specs + // bk0101024 - fix from Leonardo + // bit 5 set => top-down + if (targa_header.attributes & 0x20) { + unsigned char *flip = (unsigned char*)malloc (columns*4); + unsigned char *src, *dst; + + for (row = 0; row < rows/2; row++) { + src = targa_rgba + row * 4 * columns; + dst = targa_rgba + (rows - row - 1) * 4 * columns; + + memcpy (flip, src, columns*4); + memcpy (src, dst, columns*4); + memcpy (dst, flip, columns*4); + } + free (flip); + } +#endif + // instead we just print a warning + if (targa_header.attributes & 0x20) { + ri.Printf( PRINT_WARNING, "WARNING: '%s' TGA file header declares top-down image, ignoring\n", name); + } + + ri.FS_FreeFile (buffer); +} + + +static void LoadJPG( const char* name, unsigned char** pic, uint32_t* width, uint32_t* height) +{ + char* fbuffer; + int len = ri.FS_ReadFile(name, (void**)&fbuffer); + if (!fbuffer) { + return; + } + + int components; + *pic = stbi_load_from_memory((unsigned char*)fbuffer, len, (int*)width, (int*)height, &components, STBI_rgb_alpha); + if (*pic == NULL) { + ri.FS_FreeFile(fbuffer); + return; + } + + // clear all the alphas to 255 + { + unsigned int i; + unsigned char* buf = *pic; + + unsigned int nBytes = 4 * (*width) * (*height); + for (i = 3; i < nBytes; i += 4) + { + buf[i] = 255; + } + } + ri.FS_FreeFile(fbuffer); +} + + + +/* +========================================================= + +BMP LOADING + +========================================================= +*/ +typedef struct +{ + char id[2]; + unsigned long fileSize; + unsigned long reserved0; + unsigned long bitmapDataOffset; + unsigned long bitmapHeaderSize; + unsigned long width; + unsigned long height; + unsigned short planes; + unsigned short bitsPerPixel; + unsigned long compression; + unsigned long bitmapDataSize; + unsigned long hRes; + unsigned long vRes; + unsigned long colors; + unsigned long importantColors; + unsigned char palette[256][4]; +} BMPHeader_t; + + + + +static void LoadBMP( const char *name, unsigned char **pic, uint32_t *width, uint32_t *height ) +{ + int columns, rows, numPixels; + unsigned char *pixbuf; + int row, column; + char* buf_p; + char* buffer; + + int length; + BMPHeader_t bmpHeader; + unsigned char *bmpRGBA; + + *pic = NULL; + + // + // load the file + // + length = ri.FS_ReadFile(name, (void**)&buffer); + if (!buffer) { + return; + } + + buf_p = buffer; + + bmpHeader.id[0] = *buf_p++; + bmpHeader.id[1] = *buf_p++; + bmpHeader.fileSize = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.reserved0 = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.bitmapDataOffset = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.bitmapHeaderSize = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.width = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.height = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.planes = LittleShort( * ( short * ) buf_p ); + buf_p += 2; + bmpHeader.bitsPerPixel = LittleShort( * ( short * ) buf_p ); + buf_p += 2; + bmpHeader.compression = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.bitmapDataSize = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.hRes = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.vRes = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.colors = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + bmpHeader.importantColors = LittleLong( * ( long * ) buf_p ); + buf_p += 4; + + memcpy( bmpHeader.palette, buf_p, sizeof( bmpHeader.palette ) ); + + if ( bmpHeader.bitsPerPixel == 8 ) + buf_p += 1024; + + if ( bmpHeader.id[0] != 'B' && bmpHeader.id[1] != 'M' ) + { + ri.Error( ERR_DROP, "LoadBMP: only Windows-style BMP files supported (%s)\n", name ); + } + if ( bmpHeader.fileSize != length ) + { + ri.Error( ERR_DROP, "LoadBMP: header size does not match file size (%ld vs. %d) (%s)\n", bmpHeader.fileSize, length, name ); + } + if ( bmpHeader.compression != 0 ) + { + ri.Error( ERR_DROP, "LoadBMP: only uncompressed BMP files supported (%s)\n", name ); + } + if ( bmpHeader.bitsPerPixel < 8 ) + { + ri.Error( ERR_DROP, "LoadBMP: monochrome and 4-bit BMP files not supported (%s)\n", name ); + } + + columns = bmpHeader.width; + rows = bmpHeader.height; + if ( rows < 0 ) + rows = -rows; + numPixels = columns * rows; + + if ( width ) + *width = columns; + if ( height ) + *height = rows; + + bmpRGBA = (unsigned char*) ri.Malloc( numPixels * 4 ); + *pic = bmpRGBA; + + + for ( row = rows-1; row >= 0; row-- ) + { + pixbuf = bmpRGBA + row*columns*4; + + for ( column = 0; column < columns; column++ ) + { + unsigned char red, green, blue, alpha; + int palIndex; + unsigned short shortPixel; + + switch ( bmpHeader.bitsPerPixel ) + { + case 8: + palIndex = *buf_p++; + *pixbuf++ = bmpHeader.palette[palIndex][2]; + *pixbuf++ = bmpHeader.palette[palIndex][1]; + *pixbuf++ = bmpHeader.palette[palIndex][0]; + *pixbuf++ = 0xff; + break; + case 16: + shortPixel = * ( unsigned short * ) pixbuf; + pixbuf += 2; + *pixbuf++ = ( shortPixel & ( 31 << 10 ) ) >> 7; + *pixbuf++ = ( shortPixel & ( 31 << 5 ) ) >> 2; + *pixbuf++ = ( shortPixel & ( 31 ) ) << 3; + *pixbuf++ = 0xff; + break; + + case 24: + blue = *buf_p++; + green = *buf_p++; + red = *buf_p++; + *pixbuf++ = red; + *pixbuf++ = green; + *pixbuf++ = blue; + *pixbuf++ = 255; + break; + case 32: + blue = *buf_p++; + green = *buf_p++; + red = *buf_p++; + alpha = *buf_p++; + *pixbuf++ = red; + *pixbuf++ = green; + *pixbuf++ = blue; + *pixbuf++ = alpha; + break; + default: + ri.Error( ERR_DROP, "LoadBMP: illegal pixel_size '%d' in file '%s'\n", bmpHeader.bitsPerPixel, name ); + break; + } + } + } + + ri.FS_FreeFile( buffer ); + +} + + +typedef struct { + char manufacturer; + char version; + char encoding; + char bits_per_pixel; + unsigned short xmin,ymin,xmax,ymax; + unsigned short hres,vres; + unsigned char palette[48]; + char reserved; + char color_planes; + unsigned short bytes_per_line; + unsigned short palette_type; + char filler[58]; + char data; // unbounded +} pcx_t; + +static void LoadPCX ( const char *filename, unsigned char **pic, unsigned char **palette, uint32_t *width, uint32_t *height) +{ + char* raw; + pcx_t *pcx; + int x, y; + int len; + int dataByte, runLength; + unsigned char *out, *pix; + int xmax, ymax; + + *pic = NULL; + *palette = NULL; + + // + // load the file + // + len = ri.FS_ReadFile(filename, (void**)&raw); + if (!raw) { + return; + } + + // + // parse the PCX file + // + pcx = (pcx_t *)raw; + raw = &pcx->data; + + xmax = LittleShort(pcx->xmax); + ymax = LittleShort(pcx->ymax); + + if (pcx->manufacturer != 0x0a + || pcx->version != 5 + || pcx->encoding != 1 + || pcx->bits_per_pixel != 8 + || xmax >= 1024 + || ymax >= 1024) + { + ri.Printf (PRINT_ALL, "Bad pcx file %s (%i x %i) (%i x %i)\n", filename, xmax+1, ymax+1, pcx->xmax, pcx->ymax); + return; + } + + out = (unsigned char*) ri.Malloc ( (ymax+1) * (xmax+1) ); + + *pic = out; + + pix = out; + + if (palette) + { + *palette = (unsigned char*) ri.Malloc(768); + memcpy (*palette, (unsigned char *)pcx + len - 768, 768); + } + + if (width) + *width = xmax+1; + if (height) + *height = ymax+1; +// FIXME: use bytes_per_line here? + + for (y=0 ; y<=ymax ; y++, pix += xmax+1) + { + for (x=0 ; x<=xmax ; ) + { + dataByte = *raw++; + + if((dataByte & 0xC0) == 0xC0) + { + runLength = dataByte & 0x3F; + dataByte = *raw++; + } + else + runLength = 1; + + while(runLength-- > 0) + pix[x++] = dataByte; + } + + } + + if ( raw - (char *)pcx > len) + { + ri.Printf (PRINT_DEVELOPER, "PCX file %s was malformed", filename); + ri.Free (*pic); + *pic = NULL; + } + + ri.FS_FreeFile (pcx); +} + + +static void LoadPCX32 ( const char *filename, unsigned char **pic, uint32_t *width, uint32_t *height) +{ + unsigned char *palette; + unsigned char *pic8; + int i, c, p; + unsigned char *pic32; + + LoadPCX (filename, &pic8, &palette, width, height); + if (!pic8) { + *pic = NULL; + return; + } + + c = (*width) * (*height); + pic32 = *pic = (unsigned char*) ri.Malloc(4 * c ); + for (i = 0 ; i < c ; i++) { + p = pic8[i]; + pic32[0] = palette[p*3]; + pic32[1] = palette[p*3 + 1]; + pic32[2] = palette[p*3 + 2]; + pic32[3] = 255; + pic32 += 4; + } + + ri.Free (pic8); + ri.Free (palette); +} diff --git a/code/renderer_vulkan/R_LoadMD3.c b/code/renderer_vulkan/R_LoadMD3.c new file mode 100644 index 0000000000..b861332556 --- /dev/null +++ b/code/renderer_vulkan/R_LoadMD3.c @@ -0,0 +1,270 @@ + +#include "tr_local.h" +#include "tr_model.h" +#include "ref_import.h" +#include "tr_shader.h" +#define LL(x) x=LittleLong(x) + + +static qboolean R_LoadMD3 (model_t *mod, int lod, void *buffer, const char *mod_name ) +{ + int i, j; + md3Header_t *pinmodel; + md3Frame_t *frame; + md3Surface_t *surf; + md3Shader_t *shader; + md3Triangle_t *tri; + md3St_t *st; + md3XyzNormal_t *xyz; + md3Tag_t *tag; + int version; + int size; + + pinmodel = (md3Header_t *)buffer; + + version = LittleLong (pinmodel->version); + if (version != MD3_VERSION) { + ri.Printf( PRINT_WARNING, "R_LoadMD3: %s has wrong version (%i should be %i)\n", + mod_name, version, MD3_VERSION); + return qfalse; + } + + mod->type = MOD_MESH; + size = LittleLong(pinmodel->ofsEnd); + mod->dataSize += size; + mod->md3[lod] = ri.Hunk_Alloc( size, h_low ); + + memcpy (mod->md3[lod], buffer, LittleLong(pinmodel->ofsEnd) ); + + LL(mod->md3[lod]->ident); + LL(mod->md3[lod]->version); + LL(mod->md3[lod]->numFrames); + LL(mod->md3[lod]->numTags); + LL(mod->md3[lod]->numSurfaces); + LL(mod->md3[lod]->ofsFrames); + LL(mod->md3[lod]->ofsTags); + LL(mod->md3[lod]->ofsSurfaces); + LL(mod->md3[lod]->ofsEnd); + + if ( mod->md3[lod]->numFrames < 1 ) { + ri.Printf( PRINT_WARNING, "R_LoadMD3: %s has no frames\n", mod_name ); + return qfalse; + } + + // swap all the frames + frame = (md3Frame_t *) ( (byte *)mod->md3[lod] + mod->md3[lod]->ofsFrames ); + for ( i = 0 ; i < mod->md3[lod]->numFrames ; i++, frame++) { + frame->radius = LittleFloat( frame->radius ); + for ( j = 0 ; j < 3 ; j++ ) { + frame->bounds[0][j] = LittleFloat( frame->bounds[0][j] ); + frame->bounds[1][j] = LittleFloat( frame->bounds[1][j] ); + frame->localOrigin[j] = LittleFloat( frame->localOrigin[j] ); + } + } + + // swap all the tags + tag = (md3Tag_t *) ( (byte *)mod->md3[lod] + mod->md3[lod]->ofsTags ); + for ( i = 0 ; i < mod->md3[lod]->numTags * mod->md3[lod]->numFrames ; i++, tag++) { + for ( j = 0 ; j < 3 ; j++ ) { + tag->origin[j] = LittleFloat( tag->origin[j] ); + tag->axis[0][j] = LittleFloat( tag->axis[0][j] ); + tag->axis[1][j] = LittleFloat( tag->axis[1][j] ); + tag->axis[2][j] = LittleFloat( tag->axis[2][j] ); + } + } + + // swap all the surfaces + surf = (md3Surface_t *) ( (byte *)mod->md3[lod] + mod->md3[lod]->ofsSurfaces ); + for ( i = 0 ; i < mod->md3[lod]->numSurfaces ; i++) { + + LL(surf->ident); + LL(surf->flags); + LL(surf->numFrames); + LL(surf->numShaders); + LL(surf->numTriangles); + LL(surf->ofsTriangles); + LL(surf->numVerts); + LL(surf->ofsShaders); + LL(surf->ofsSt); + LL(surf->ofsXyzNormals); + LL(surf->ofsEnd); + + if ( surf->numVerts >= SHADER_MAX_VERTEXES ) { + ri.Printf(PRINT_WARNING, "R_LoadMD3: %s has more than %i verts on %s (%i).\n", + mod_name, SHADER_MAX_VERTEXES - 1, surf->name[0] ? surf->name : "a surface", + surf->numVerts ); + return qfalse; + } + if ( surf->numTriangles*3 >= SHADER_MAX_INDEXES ) { + ri.Printf(PRINT_WARNING, "R_LoadMD3: %s has more than %i triangles on %s (%i).\n", + mod_name, ( SHADER_MAX_INDEXES / 3 ) - 1, surf->name[0] ? surf->name : "a surface", + surf->numTriangles ); + return qfalse; + } + + // change to surface identifier + surf->ident = SF_MD3; + + // lowercase the surface name so skin compares are faster + Q_strlwr( surf->name ); + + // strip off a trailing _1 or _2 + // this is a crutch for q3data being a mess + j = strlen( surf->name ); + if ( j > 2 && surf->name[j-2] == '_' ) { + surf->name[j-2] = 0; + } + + // register the shaders + shader = (md3Shader_t *) ( (byte *)surf + surf->ofsShaders ); + for ( j = 0 ; j < surf->numShaders ; j++, shader++ ) { + shader_t* sh = R_FindShader( shader->name, LIGHTMAP_NONE, qtrue ); + + if ( sh->defaultShader ) { + shader->shaderIndex = 0; + } else { + shader->shaderIndex = sh->index; + } + } + + // swap all the triangles + tri = (md3Triangle_t *) ( (byte *)surf + surf->ofsTriangles ); + for ( j = 0 ; j < surf->numTriangles ; j++, tri++ ) { + LL(tri->indexes[0]); + LL(tri->indexes[1]); + LL(tri->indexes[2]); + } + + // swap all the ST + st = (md3St_t *) ( (byte *)surf + surf->ofsSt ); + for ( j = 0 ; j < surf->numVerts ; j++, st++ ) { + st->st[0] = LittleFloat( st->st[0] ); + st->st[1] = LittleFloat( st->st[1] ); + } + + // swap all the XyzNormals + xyz = (md3XyzNormal_t *) ( (byte *)surf + surf->ofsXyzNormals ); + for ( j = 0 ; j < surf->numVerts * surf->numFrames ; j++, xyz++ ) + { + xyz->xyz[0] = LittleShort( xyz->xyz[0] ); + xyz->xyz[1] = LittleShort( xyz->xyz[1] ); + xyz->xyz[2] = LittleShort( xyz->xyz[2] ); + + xyz->normal = LittleShort( xyz->normal ); + } + + + // find the next surface + surf = (md3Surface_t *)( (byte *)surf + surf->ofsEnd ); + } + + return qtrue; +} + + + +qhandle_t R_RegisterMD3(const char* name, model_t* mod) +{ + + char* buf; + int numLoaded = 0; + + ri.FS_ReadFile( name, (void**)&buf ); + + if( NULL != buf) + { + qboolean loaded = qfalse; + +#if defined( Q3_BIG_ENDIAN ) + int ident = LittleLong(*(int *)buf); +#else + int ident = *(int *)buf; +#endif + if (ident == MD3_IDENT) + loaded = R_LoadMD3(mod, 0, buf, name); + else + ri.Printf(PRINT_WARNING,"R_RegisterMD3: unknown fileid for %s\n", name); + + ri.FS_FreeFile(buf); + + if(loaded) + { + mod->numLods++; + numLoaded++; + } + else + { + ri.Printf(PRINT_WARNING, "R_RegisterMD3: couldn't load %s\n", name); + + mod->type = MOD_BAD; + return 0; + } + } + else + { + ri.Printf(PRINT_WARNING, "R_RegisterMD3: failed loading %s from disk. \n", name); + } + + + char filename[MAX_QPATH] = {0}; + strcpy(filename, name); + char* const dot = strrchr(filename, '.'); + *dot = 0; + + uint32_t lod; + for (lod = 1; lod < MD3_MAX_LODS; lod++) + { + qboolean loaded = qfalse; + + char namebuf[MAX_QPATH+20] = {0}; + snprintf(namebuf, sizeof(namebuf), "%s_%d.md3", filename, lod); + + ri.FS_ReadFile( namebuf, (void**)&buf ); + if(!buf) + continue; + +#if defined( Q3_BIG_ENDIAN ) + int ident = LittleLong(*(int *)buf); +#else + int ident = *(int *)buf; +#endif + if (ident == MD3_IDENT) + loaded = R_LoadMD3(mod, lod, buf, name); + else + ri.Printf(PRINT_WARNING, "R_RegisterMD3: unknown fileid for %s\n", name); + + ri.FS_FreeFile(buf); + + if(loaded) + { + mod->numLods++; + numLoaded++; + } + else + break; + } + + if(numLoaded) + return mod->index; + else + ri.Printf(PRINT_WARNING, "R_RegisterMD3: couldn't load %s\n", name); + + +/* + if(numLoaded) + { + // duplicate into higher lod spots that weren't loaded, + // in case the user changes r_lodbias on the fly + for(lod--; lod >= 0; lod--) + { + mod->numLods++; + mod->md3[lod] = mod->md3[lod + 1]; + } + + return mod->index; + } +*/ + + mod->type = MOD_BAD; + return 0; +} diff --git a/code/renderer_vulkan/R_LoadMDR.c b/code/renderer_vulkan/R_LoadMDR.c new file mode 100644 index 0000000000..a2e47f61b5 --- /dev/null +++ b/code/renderer_vulkan/R_LoadMDR.c @@ -0,0 +1,527 @@ +#include "tr_local.h" +#include "tr_model.h" +#include "ref_import.h" +#include "tr_shader.h" + +#define LL(x) x=LittleLong(x) + + +/* +============================================================= + +UNCOMPRESSING BONES + +============================================================= +*/ + +#define MC_BITS_X (16) +#define MC_BITS_Y (16) +#define MC_BITS_Z (16) +#define MC_BITS_VECT (16) + +#define MC_SCALE_X (1.0f/64) +#define MC_SCALE_Y (1.0f/64) +#define MC_SCALE_Z (1.0f/64) + +#define MC_MASK_X ((1<<(MC_BITS_X))-1) +#define MC_MASK_Y ((1<<(MC_BITS_Y))-1) +#define MC_MASK_Z ((1<<(MC_BITS_Z))-1) +#define MC_MASK_VECT ((1<<(MC_BITS_VECT))-1) + +#define MC_SCALE_VECT (1.0f/(float)((1<<(MC_BITS_VECT-1))-2)) + +#define MC_POS_X (0) +#define MC_SHIFT_X (0) + +#define MC_POS_Y ((((MC_BITS_X))/8)) +#define MC_SHIFT_Y ((((MC_BITS_X)%8))) + +#define MC_POS_Z ((((MC_BITS_X+MC_BITS_Y))/8)) +#define MC_SHIFT_Z ((((MC_BITS_X+MC_BITS_Y)%8))) + +#define MC_POS_V11 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z))/8)) +#define MC_SHIFT_V11 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z)%8))) + +#define MC_POS_V12 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT))/8)) +#define MC_SHIFT_V12 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT)%8))) + +#define MC_POS_V13 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*2))/8)) +#define MC_SHIFT_V13 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*2)%8))) + +#define MC_POS_V21 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*3))/8)) +#define MC_SHIFT_V21 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*3)%8))) + +#define MC_POS_V22 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*4))/8)) +#define MC_SHIFT_V22 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*4)%8))) + +#define MC_POS_V23 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*5))/8)) +#define MC_SHIFT_V23 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*5)%8))) + +#define MC_POS_V31 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*6))/8)) +#define MC_SHIFT_V31 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*6)%8))) + +#define MC_POS_V32 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*7))/8)) +#define MC_SHIFT_V32 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*7)%8))) + +#define MC_POS_V33 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*8))/8)) +#define MC_SHIFT_V33 ((((MC_BITS_X+MC_BITS_Y+MC_BITS_Z+MC_BITS_VECT*8)%8))) + +void MC_UnCompress(float mat[3][4], const unsigned char* comp) +{ + int val; + + val=(int)((unsigned short *)(comp))[0]; + val-=1<<(MC_BITS_X-1); + mat[0][3]=((float)(val))*MC_SCALE_X; + + val=(int)((unsigned short *)(comp))[1]; + val-=1<<(MC_BITS_Y-1); + mat[1][3]=((float)(val))*MC_SCALE_Y; + + val=(int)((unsigned short *)(comp))[2]; + val-=1<<(MC_BITS_Z-1); + mat[2][3]=((float)(val))*MC_SCALE_Z; + + val=(int)((unsigned short *)(comp))[3]; + val-=1<<(MC_BITS_VECT-1); + mat[0][0]=((float)(val))*MC_SCALE_VECT; + + val=(int)((unsigned short *)(comp))[4]; + val-=1<<(MC_BITS_VECT-1); + mat[0][1]=((float)(val))*MC_SCALE_VECT; + + val=(int)((unsigned short *)(comp))[5]; + val-=1<<(MC_BITS_VECT-1); + mat[0][2]=((float)(val))*MC_SCALE_VECT; + + + val=(int)((unsigned short *)(comp))[6]; + val-=1<<(MC_BITS_VECT-1); + mat[1][0]=((float)(val))*MC_SCALE_VECT; + + val=(int)((unsigned short *)(comp))[7]; + val-=1<<(MC_BITS_VECT-1); + mat[1][1]=((float)(val))*MC_SCALE_VECT; + + val=(int)((unsigned short *)(comp))[8]; + val-=1<<(MC_BITS_VECT-1); + mat[1][2]=((float)(val))*MC_SCALE_VECT; + + + val=(int)((unsigned short *)(comp))[9]; + val-=1<<(MC_BITS_VECT-1); + mat[2][0]=((float)(val))*MC_SCALE_VECT; + + val=(int)((unsigned short *)(comp))[10]; + val-=1<<(MC_BITS_VECT-1); + mat[2][1]=((float)(val))*MC_SCALE_VECT; + + val=(int)((unsigned short *)(comp))[11]; + val-=1<<(MC_BITS_VECT-1); + mat[2][2]=((float)(val))*MC_SCALE_VECT; +} + + + +qboolean R_LoadMDR( model_t *mod, void *buffer, int filesize, const char *mod_name ) +{ + int i, j, k, l; + mdrHeader_t *pinmodel, *mdr; + mdrFrame_t *frame; + mdrLOD_t *lod, *curlod; + mdrSurface_t *surf, *cursurf; + mdrTriangle_t *tri, *curtri; + mdrVertex_t *v, *curv; + mdrWeight_t *weight, *curweight; + mdrTag_t *tag, *curtag; + int size; + shader_t *sh; + + pinmodel = (mdrHeader_t *)buffer; + + pinmodel->version = LittleLong(pinmodel->version); + if (pinmodel->version != MDR_VERSION) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has wrong version (%i should be %i)\n", mod_name, pinmodel->version, MDR_VERSION); + return qfalse; + } + + size = LittleLong(pinmodel->ofsEnd); + + if(size > filesize) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: Header of %s is broken. Wrong filesize declared!\n", mod_name); + return qfalse; + } + + mod->type = MOD_MDR; + + LL(pinmodel->numFrames); + LL(pinmodel->numBones); + LL(pinmodel->ofsFrames); + + // This is a model that uses some type of compressed Bones. We don't want to uncompress every bone for each rendered frame + // over and over again, we'll uncompress it in this function already, so we must adjust the size of the target mdr. + if(pinmodel->ofsFrames < 0) + { + // mdrFrame_t is larger than mdrCompFrame_t: + size += pinmodel->numFrames * sizeof(frame->name); + // now add enough space for the uncompressed bones. + size += pinmodel->numFrames * pinmodel->numBones * ((sizeof(mdrBone_t) - sizeof(mdrCompBone_t))); + } + + // simple bounds check + if(pinmodel->numBones < 0 || + sizeof(*mdr) + pinmodel->numFrames * (sizeof(*frame) + (pinmodel->numBones - 1) * sizeof(*frame->bones)) > size) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has broken structure.\n", mod_name); + return qfalse; + } + + mod->dataSize += size; + mod->modelData = mdr = ri.Hunk_Alloc( size, h_low ); + + // Copy all the values over from the file and fix endian issues in the process, if necessary. + + mdr->ident = LittleLong(pinmodel->ident); + mdr->version = pinmodel->version; // Don't need to swap byte order on this one, we already did above. + Q_strncpyz(mdr->name, pinmodel->name, sizeof(mdr->name)); + mdr->numFrames = pinmodel->numFrames; + mdr->numBones = pinmodel->numBones; + mdr->numLODs = LittleLong(pinmodel->numLODs); + mdr->numTags = LittleLong(pinmodel->numTags); + // We don't care about the other offset values, we'll generate them ourselves while loading. + + mod->numLods = mdr->numLODs; + + if ( mdr->numFrames < 1 ) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has no frames\n", mod_name); + return qfalse; + } + + /* The first frame will be put into the first free space after the header */ + frame = (mdrFrame_t *)(mdr + 1); + mdr->ofsFrames = (int)((byte *) frame - (byte *) mdr); + + if (pinmodel->ofsFrames < 0) + { + mdrCompFrame_t *cframe; + + // compressed model... + cframe = (mdrCompFrame_t *)((byte *) pinmodel - pinmodel->ofsFrames); + + for(i = 0; i < mdr->numFrames; i++) + { + for(j = 0; j < 3; j++) + { + frame->bounds[0][j] = LittleFloat(cframe->bounds[0][j]); + frame->bounds[1][j] = LittleFloat(cframe->bounds[1][j]); + frame->localOrigin[j] = LittleFloat(cframe->localOrigin[j]); + } + + frame->radius = LittleFloat(cframe->radius); + frame->name[0] = '\0'; // No name supplied in the compressed version. + + for(j = 0; j < mdr->numBones; j++) + { +#if defined( Q3_BIG_ENDIAN ) + for(k = 0; k < (sizeof(cframe->bones[j].Comp) / 2); k++) + { + // Do swapping for the uncompressing functions. They seem to use shorts + // values only, so I assume this will work. Never tested it on other + // platforms, though. + + ((unsigned short *)(cframe->bones[j].Comp))[k] = + LittleShort( ((unsigned short *)(cframe->bones[j].Comp))[k] ); + } +#endif + /* Now do the actual uncompressing */ + MC_UnCompress(frame->bones[j].matrix, cframe->bones[j].Comp); + } + + // Next Frame... + // cframe = (mdrCompFrame_t *) &cframe->bones[j]; + // frame = (mdrFrame_t *) &frame->bones[j]; + // this suppress GCC strict-aliasing warning + { + // Next Frame... + mdrCompBone_t *p = &(cframe->bones[j]); + cframe = (mdrCompFrame_t *) p; + } + + { + mdrBone_t *p = &frame->bones[j]; + frame = (mdrFrame_t *) p; + } + } + } + else + { + mdrFrame_t *curframe; + + // uncompressed model... + // + + curframe = (mdrFrame_t *)((byte *) pinmodel + pinmodel->ofsFrames); + + // swap all the frames + for ( i = 0 ; i < mdr->numFrames ; i++) + { + for(j = 0; j < 3; j++) + { + frame->bounds[0][j] = LittleFloat(curframe->bounds[0][j]); + frame->bounds[1][j] = LittleFloat(curframe->bounds[1][j]); + frame->localOrigin[j] = LittleFloat(curframe->localOrigin[j]); + } + + frame->radius = LittleFloat(curframe->radius); + Q_strncpyz(frame->name, curframe->name, sizeof(frame->name)); + + // suppress GCC strict-aliasing warning +#if defined( Q3_BIG_ENDIAN ) + for (j = 0; j < (int) (mdr->numBones * sizeof(mdrBone_t) / 4); j++) + { + ((float *)frame->bones)[j]=FloatSwap(&((float *)curframe->bones)[j]); + } +#else + for (j = 0; j < mdr->numBones; j++) + { + frame->bones[j] = curframe->bones[j]; + } +#endif + + //curframe = (mdrFrame_t *) &curframe->bones[mdr->numBones]; + //frame = (mdrFrame_t *) &frame->bones[mdr->numBones]; + // suppress GCC strict-aliasing warning + { + mdrBone_t* p = &curframe->bones[mdr->numBones]; + curframe = (mdrFrame_t *) p; + } + + { + mdrBone_t* p = &frame->bones[mdr->numBones]; + frame = (mdrFrame_t *) p; + } + + } + } + + // frame should now point to the first free address after all frames. + lod = (mdrLOD_t *) frame; + mdr->ofsLODs = (int) ((byte *) lod - (byte *)mdr); + + curlod = (mdrLOD_t *)((byte *) pinmodel + LittleLong(pinmodel->ofsLODs)); + + // swap all the LOD's + for ( l = 0 ; l < mdr->numLODs ; l++) + { + // simple bounds check + if((byte *) (lod + 1) > (byte *) mdr + size) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has broken structure.\n", mod_name); + return qfalse; + } + + lod->numSurfaces = LittleLong(curlod->numSurfaces); + + // swap all the surfaces + surf = (mdrSurface_t *) (lod + 1); + lod->ofsSurfaces = (int)((byte *) surf - (byte *) lod); + cursurf = (mdrSurface_t *) ((byte *)curlod + LittleLong(curlod->ofsSurfaces)); + + for ( i = 0 ; i < lod->numSurfaces ; i++) + { + // simple bounds check + if((byte *) (surf + 1) > (byte *) mdr + size) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has broken structure.\n", mod_name); + return qfalse; + } + + // first do some copying stuff + + surf->ident = SF_MDR; + Q_strncpyz(surf->name, cursurf->name, sizeof(surf->name)); + Q_strncpyz(surf->shader, cursurf->shader, sizeof(surf->shader)); + + surf->ofsHeader = (byte *) mdr - (byte *) surf; + + surf->numVerts = LittleLong(cursurf->numVerts); + surf->numTriangles = LittleLong(cursurf->numTriangles); + // numBoneReferences and BoneReferences generally seem to be unused + + // now do the checks that may fail. + if ( surf->numVerts >= SHADER_MAX_VERTEXES ) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has more than %i verts on %s (%i).\n", + mod_name, SHADER_MAX_VERTEXES - 1, surf->name[0] ? surf->name : "a surface", + surf->numVerts ); + return qfalse; + } + if ( surf->numTriangles*3 >= SHADER_MAX_INDEXES ) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has more than %i triangles on %s (%i).\n", + mod_name, ( SHADER_MAX_INDEXES / 3 ) - 1, surf->name[0] ? surf->name : "a surface", + surf->numTriangles ); + return qfalse; + } + // lowercase the surface name so skin compares are faster + Q_strlwr( surf->name ); + + // register the shaders + sh = R_FindShader(surf->shader, LIGHTMAP_NONE, qtrue); + if ( sh->defaultShader ) { + surf->shaderIndex = 0; + } else { + surf->shaderIndex = sh->index; + } + + // now copy the vertexes. + v = (mdrVertex_t *) (surf + 1); + surf->ofsVerts = (int)((byte *) v - (byte *) surf); + curv = (mdrVertex_t *) ((byte *)cursurf + LittleLong(cursurf->ofsVerts)); + + for(j = 0; j < surf->numVerts; j++) + { + LL(curv->numWeights); + + // simple bounds check + if(curv->numWeights < 0 || (byte *) (v + 1) + (curv->numWeights - 1) * sizeof(*weight) > (byte *) mdr + size) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has broken structure.\n", mod_name); + return qfalse; + } + + v->normal[0] = LittleFloat(curv->normal[0]); + v->normal[1] = LittleFloat(curv->normal[1]); + v->normal[2] = LittleFloat(curv->normal[2]); + + v->texCoords[0] = LittleFloat(curv->texCoords[0]); + v->texCoords[1] = LittleFloat(curv->texCoords[1]); + + v->numWeights = curv->numWeights; + weight = &v->weights[0]; + curweight = &curv->weights[0]; + + // Now copy all the weights + for(k = 0; k < v->numWeights; k++) + { + weight->boneIndex = LittleLong(curweight->boneIndex); + weight->boneWeight = LittleFloat(curweight->boneWeight); + + weight->offset[0] = LittleFloat(curweight->offset[0]); + weight->offset[1] = LittleFloat(curweight->offset[1]); + weight->offset[2] = LittleFloat(curweight->offset[2]); + + weight++; + curweight++; + } + + v = (mdrVertex_t *) weight; + curv = (mdrVertex_t *) curweight; + } + + // we know the offset to the triangles now: + tri = (mdrTriangle_t *) v; + surf->ofsTriangles = (int)((byte *) tri - (byte *) surf); + curtri = (mdrTriangle_t *)((byte *) cursurf + LittleLong(cursurf->ofsTriangles)); + + // simple bounds check + if(surf->numTriangles < 0 || (byte *) (tri + surf->numTriangles) > (byte *) mdr + size) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has broken structure.\n", mod_name); + return qfalse; + } + + for(j = 0; j < surf->numTriangles; j++) + { + tri->indexes[0] = LittleLong(curtri->indexes[0]); + tri->indexes[1] = LittleLong(curtri->indexes[1]); + tri->indexes[2] = LittleLong(curtri->indexes[2]); + + tri++; + curtri++; + } + + // tri now points to the end of the surface. + surf->ofsEnd = (byte *) tri - (byte *) surf; + surf = (mdrSurface_t *) tri; + + // find the next surface. + cursurf = (mdrSurface_t *) ((byte *) cursurf + LittleLong(cursurf->ofsEnd)); + } + + // surf points to the next lod now. + lod->ofsEnd = (int)((byte *) surf - (byte *) lod); + lod = (mdrLOD_t *) surf; + + // find the next LOD. + curlod = (mdrLOD_t *)((byte *) curlod + LittleLong(curlod->ofsEnd)); + } + + // lod points to the first tag now, so update the offset too. + tag = (mdrTag_t *) lod; + mdr->ofsTags = (int)((byte *) tag - (byte *) mdr); + curtag = (mdrTag_t *) ((byte *)pinmodel + LittleLong(pinmodel->ofsTags)); + + // simple bounds check + if(mdr->numTags < 0 || (byte *) (tag + mdr->numTags) > (byte *) mdr + size) + { + ri.Printf(PRINT_WARNING, "R_LoadMDR: %s has broken structure.\n", mod_name); + return qfalse; + } + + for (i = 0 ; i < mdr->numTags ; i++) + { + tag->boneIndex = LittleLong(curtag->boneIndex); + Q_strncpyz(tag->name, curtag->name, sizeof(tag->name)); + + tag++; + curtag++; + } + + // And finally we know the real offset to the end. + mdr->ofsEnd = (int)((byte *) tag - (byte *) mdr); + + // phew! we're done. + + return qtrue; +} + + +qhandle_t R_RegisterMDR(const char *name, model_t *mod) +{ + + char* buf; + + qboolean loaded = qfalse; + int filesize = ri.FS_ReadFile(name, (void**)&buf); + if(!buf) + { + mod->type = MOD_BAD; + return 0; + } + + +#if defined( Q3_BIG_ENDIAN ) + int ident = LittleLong(*(int *)buf); +#else + int ident = *(int *)buf; +#endif + + + if(ident == MDR_IDENT) + loaded = R_LoadMDR(mod, buf, filesize, name); + + ri.FS_FreeFile(buf); + + if(!loaded) + { + ri.Printf(PRINT_WARNING,"R_RegisterMDR: couldn't load mdr file %s\n", name); + mod->type = MOD_BAD; + return 0; + } + + return mod->index; +} diff --git a/code/renderer_vulkan/R_ModelBounds.c b/code/renderer_vulkan/R_ModelBounds.c new file mode 100644 index 0000000000..111a71b3d3 --- /dev/null +++ b/code/renderer_vulkan/R_ModelBounds.c @@ -0,0 +1,48 @@ +#include "tr_local.h" +#include "tr_model.h" + + +void RE_ModelBounds( qhandle_t handle, vec3_t mins, vec3_t maxs ) +{ + model_t* model = R_GetModelByHandle( handle ); + + if(model->type == MOD_BRUSH) + { + VectorCopy( model->bmodel->bounds[0], mins ); + VectorCopy( model->bmodel->bounds[1], maxs ); + return; + } + else if (model->type == MOD_MESH) + { + md3Header_t* header = model->md3[0]; + md3Frame_t* frame = (md3Frame_t *) ((unsigned char *)header + header->ofsFrames); + + VectorCopy( frame->bounds[0], mins ); + VectorCopy( frame->bounds[1], maxs ); + return; + } + else if (model->type == MOD_MDR) + { + mdrHeader_t* header = (mdrHeader_t *)model->modelData; + mdrFrame_t* frame = (mdrFrame_t *) ((unsigned char *)header + header->ofsFrames); + + VectorCopy( frame->bounds[0], mins ); + VectorCopy( frame->bounds[1], maxs ); + + return; + } + else if(model->type == MOD_IQM) + { + iqmData_t* iqmData = model->modelData; + + if(iqmData->bounds) + { + VectorCopy(iqmData->bounds, mins); + VectorCopy(iqmData->bounds + 3, maxs); + return; + } + } + + VectorClear( mins ); + VectorClear( maxs ); +} diff --git a/code/renderer_vulkan/R_Parser.c b/code/renderer_vulkan/R_Parser.c new file mode 100644 index 0000000000..1b9c0c2875 --- /dev/null +++ b/code/renderer_vulkan/R_Parser.c @@ -0,0 +1,253 @@ +#include "tr_local.h" +#include "ref_import.h" + +/* +============================================================================ + + PARSING + +split those parsing functions from q_shared.c +I want the render part standalone, dont fuck up with game part. + +============================================================================ +*/ + + +static char r_parsename[512]; +static int r_lines; +static int r_tokenline; + +void R_BeginParseSession(const char* name) +{ + r_lines = 1; + r_tokenline = 0; + snprintf(r_parsename, sizeof(r_parsename), "%s", name); +} + +int R_GetCurrentParseLine( void ) +{ + if ( r_tokenline ) + { + return r_tokenline; + } + + return r_lines; +} + + + +int R_Compress( char *data_p ) +{ + qboolean newline = qfalse; + qboolean whitespace = qfalse; + + char* in = data_p; + char* out = data_p; + + if (in) + { + int c; + while ((c = *in) != 0) + { + // skip double slash comments + if ( c == '/' && in[1] == '/' ) + { + while (*in && *in != '\n') { + in++; + } + // skip /* */ comments + } + else if ( c == '/' && in[1] == '*' ) { + while ( *in && ( *in != '*' || in[1] != '/' ) ) + in++; + if ( *in ) + in += 2; + // record when we hit a newline + } + else if ( c == '\n' || c == '\r' ) { + newline = qtrue; + in++; + // record when we hit whitespace + } + else if ( (c == ' ') || (c == '\t') ) + { + whitespace = qtrue; + in++; + // an actual token + } + else + { + // if we have a pending newline, emit it (and it counts as whitespace) + if (newline) + { + *out++ = '\n'; + newline = qfalse; + whitespace = qfalse; + } + if (whitespace) + { + *out++ = ' '; + whitespace = qfalse; + } + + // copy quoted strings unmolested + if (c == '"') { + *out++ = c; + in++; + while (1) { + c = *in; + if (c && c != '"') { + *out++ = c; + in++; + } else { + break; + } + } + if (c == '"') { + *out++ = c; + in++; + } + } else { + *out = c; + out++; + in++; + } + } + } + + *out = 0; + } + return out - data_p; +} + + +/* +============== +Parse a token out of a string +Will never return NULL, just empty strings + +If "allowLineBreaks" is qtrue then an empty +string will be returned if the next token is a newline. +============== +*/ +char* R_ParseExt(char** data_p, qboolean allowLineBreaks) +{ + + unsigned int len = 0; + char *data = *data_p; + + unsigned char c; + static char r_token[512] = {0}; + r_token[0] = 0; + r_tokenline = 0; + + // make sure incoming data is valid + if( !data ) + { + *data_p = NULL; + return r_token; + } + + + while( 1 ) + { + // skip whitespace + //data = SkipWhitespace( data, &hasNewLines ); + + while( (c = *data) <= ' ') + { + if( c == '\n' ) + { + r_lines++; + if( allowLineBreaks == qfalse ) + { + *data_p = data; + return r_token; + } + } + else if( c == 0 ) + { + *data_p = NULL; + return r_token; + } + + data++; + } + + // skip double slash comments + if(data[0] == '/') + { + if(data[1] == '/') + { + data += 2; + while (*data && (*data != '\n')) + { + data++; + } + } + else if( data[1] == '*' ) + { // skip /* */ comments + data += 2; + // Assuming /* and */ occurs in pairs. + while( (data[0] != '*') || (data[1] != '/') ) + { + if ( data[0] == '\n' ) + { + r_lines++; + } + data++; + } + data += 2; + } + else + break; + } + else + break; + } + + // token starts on this line + r_tokenline = r_lines; + + // handle quoted strings + if (c == '\"') + { + data++; + while (1) + { + c = *data++; + if (c == '\"' || !c) + { + r_token[len] = 0; + *data_p = data; + return r_token; + } + else if ( c == '\n' ) + { + r_lines++; + } + + if (len < MAX_TOKEN_CHARS - 1) + { + r_token[len] = c; + len++; + } + } + } + + // parse a regular word + do + { + if (len < MAX_TOKEN_CHARS - 1) + { + r_token[len++] = c; + } + + c = *(++data); + } while(c > ' '); + + r_token[len] = 0; + + *data_p = data; + return r_token; +} diff --git a/code/renderer_vulkan/R_Parser.h b/code/renderer_vulkan/R_Parser.h new file mode 100644 index 0000000000..60140a7214 --- /dev/null +++ b/code/renderer_vulkan/R_Parser.h @@ -0,0 +1,10 @@ +#ifndef R_PARSER_H_ +#define R_PARSER_H_ + +char* R_ParseExt(char** data_p, qboolean allowLineBreaks); +int R_Compress( char *data_p ); +int R_GetCurrentParseLine( void ); +void R_BeginParseSession(const char* name); + + +#endif diff --git a/code/renderer_vulkan/R_PortalPlane.c b/code/renderer_vulkan/R_PortalPlane.c new file mode 100644 index 0000000000..44fea6c13b --- /dev/null +++ b/code/renderer_vulkan/R_PortalPlane.c @@ -0,0 +1,31 @@ +#include "R_PortalPlane.h" + +static struct rplane_s g_portalPlane; // clip anything behind this if mirroring + +inline static float DotProduct( const float v1[3], const float v2[3] ) +{ + return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]; +} + + +void R_SetupPortalPlane(const float axis[3][3], const float origin[3]) +{ + // VectorSubtract( vec3_origin, pCamera->axis[0], g_portalPlane.normal ); + // g_portalPlane.dist = DotProduct( pCamera->origin, g_portalPlane.normal ); + + g_portalPlane.normal[0] = - axis[0][0]; + g_portalPlane.normal[1] = - axis[0][1]; + g_portalPlane.normal[2] = - axis[0][2]; + g_portalPlane.dist = - origin[0] * axis[0][0] + - origin[1] * axis[0][1] + - origin[2] * axis[0][2]; +} + + +void R_TransformPlane(const float R[3][3], const float T[3], struct rplane_s* pDstPlane) +{ + pDstPlane->normal[0] = DotProduct (R[0], g_portalPlane.normal); + pDstPlane->normal[1] = DotProduct (R[1], g_portalPlane.normal); + pDstPlane->normal[2] = DotProduct (R[2], g_portalPlane.normal); + pDstPlane->dist = DotProduct (T, g_portalPlane.normal) - g_portalPlane.dist; +} diff --git a/code/renderer_vulkan/R_PortalPlane.h b/code/renderer_vulkan/R_PortalPlane.h new file mode 100644 index 0000000000..7badefc318 --- /dev/null +++ b/code/renderer_vulkan/R_PortalPlane.h @@ -0,0 +1,16 @@ +#ifndef R_PORTAL_PLANE_H_ +#define R_PORTAL_PLANE_H_ + + +struct rplane_s { + float normal[3]; + float dist; +}; + +// extern struct rplane_s g_portalPlane; // clip anything behind this if mirroring + + +void R_SetupPortalPlane(const float axis[3][3], const float origin[3]); +void R_TransformPlane(const float R[3][3], const float T[3], struct rplane_s* pDstPlane); + +#endif diff --git a/code/renderer_vulkan/R_PrintMat.c b/code/renderer_vulkan/R_PrintMat.c new file mode 100644 index 0000000000..09d52b0d0c --- /dev/null +++ b/code/renderer_vulkan/R_PrintMat.c @@ -0,0 +1,42 @@ +#include +#include +#include "R_PrintMat.h" +#include "ref_import.h" + + +/* + * =============================================================================== + * Filename: R_PrintMat.h + * + * Description: print matrix values or buffer in easier way for debug + * Author: SuiJingfeng, 18949883232@163.com + * =============================================================================== + */ + +void printMat1x3f(const char* name, const float src[3]) +{ + ri.Printf(PRINT_ALL, "\n float %s[3] = {%f, %f, %f};\n", + name, src[0], src[1], src[2]); +} + +void printMat1x4f(const char* name, const float src[4]) +{ + ri.Printf(PRINT_ALL, "\n float %s[4] = {%f, %f, %f, %f};\n", + name, src[0], src[1], src[2], src[3]); +} + +void printMat3x3f(const char* name, const float src[3][3]) +{ + ri.Printf(PRINT_ALL, + "\n float %s[3][3] = {\n%f, %f, %f, \n%f, %f, %f, \n%f, %f, %f };\n", name, + src[0][0], src[0][1], src[0][2], + src[1][0], src[1][1], src[1][2], + src[2][0], src[2][1], src[2][2]); +} + +void printMat4x4f(const char* name, const float src[16]) +{ + ri.Printf(PRINT_ALL, "\n float %s[16] = {\n %f, %f, %f, %f,\n %f, %f, %f, %f, \n %f, %f, %f, %f, \n %f, %f, %f, %f};\n", name, + src[0], src[1], src[2], src[3], src[4], src[5], src[6], src[7], + src[8], src[9], src[10], src[11], src[12], src[13], src[14], src[15] ); +} diff --git a/code/renderer_vulkan/R_PrintMat.h b/code/renderer_vulkan/R_PrintMat.h new file mode 100644 index 0000000000..e594875b53 --- /dev/null +++ b/code/renderer_vulkan/R_PrintMat.h @@ -0,0 +1,21 @@ +/* + * ================================================================================== + * + * Filename: R_PrintMat.h + * + * Description: print matrix values for debug + * Author: Sui Jingfeng (), 18949883232@163.com + * ================================================================================== + */ + +#ifndef R_PRINT_MAT_H_ +#define R_PRINT_MAT_H_ + +// data print helper +void printMat1x3f(const char* name, const float src[3]); +void printMat1x4f(const char* name, const float src[4]); +void printMat3x3f(const char* name, const float src[3][3]); +void printMat4x4f(const char* name, const float src[16]); + + +#endif diff --git a/code/renderer_vulkan/R_StretchRaw.c b/code/renderer_vulkan/R_StretchRaw.c new file mode 100644 index 0000000000..570e234a12 --- /dev/null +++ b/code/renderer_vulkan/R_StretchRaw.c @@ -0,0 +1,43 @@ +#include "ref_import.h" +#include "tr_globals.h" + + +extern void RE_UploadCinematic (int w, int h, int cols, int rows, const byte *data, int client, qboolean dirty); +extern void RE_StretchPic ( float x, float y, float w, float h, + float s1, float t1, float s2, float t2, qhandle_t hShader ); + + +/* +============= +FIXME: not exactly backend +Stretches a raw 32 bit power of 2 bitmap image over the given screen rectangle. +Used for cinematics. +============= +*/ +void RE_StretchRaw (int x, int y, int w, int h, int cols, int rows, const unsigned char *data, int client, qboolean dirty) +{ + int i, j; + + if ( !tr.registered ) { + return; + } + + // make sure rows and cols are powers of 2 + for ( i = 0 ; ( 1 << i ) < cols ; i++ ) + { + ; + } + for ( j = 0 ; ( 1 << j ) < rows ; j++ ) + { + ; + } + + if ( ( 1 << i ) != cols || ( 1 << j ) != rows) { + ri.Error (ERR_DROP, "Draw_StretchRaw: size not a power of 2: %i by %i", cols, rows); + } + + RE_UploadCinematic(w, h, cols, rows, data, client, dirty); + + tr.cinematicShader->stages[0]->bundle[0].image[0] = tr.scratchImage[client]; + RE_StretchPic(x, y, w, h, 0.5f / cols, 0.5f / rows, 1.0f - 0.5f / cols, 1.0f - 0.5 / rows, tr.cinematicShader->index); +} diff --git a/code/renderer_vulkan/VKimpl.h b/code/renderer_vulkan/VKimpl.h new file mode 100644 index 0000000000..5efb9352c6 --- /dev/null +++ b/code/renderer_vulkan/VKimpl.h @@ -0,0 +1,26 @@ +#ifndef VKIMPL_H_ +#define VKIMPL_H_ + + +/* +==================================================================== + +IMPLEMENTATION SPECIFIC FUNCTIONS + +==================================================================== +*/ + + +#define VK_NO_PROTOTYPES +#include "vulkan/vulkan.h" + +void vk_createWindow(void); +void vk_destroyWindow(void); + +void vk_getInstanceProcAddrImpl(void); + +void vk_createSurfaceImpl(void); + +void vk_minimizeWindow( void ); + +#endif diff --git a/code/renderer_vulkan/glConfig.c b/code/renderer_vulkan/glConfig.c new file mode 100644 index 0000000000..b4e45b9a91 --- /dev/null +++ b/code/renderer_vulkan/glConfig.c @@ -0,0 +1,346 @@ +#include "tr_local.h" +#include "ref_import.h" +#include "tr_cvar.h" + +#include "../renderercommon/tr_types.h" + +#include "VKimpl.h" +#include "vk_instance.h" +#include "vk_image.h" +// outside of this file shouldn't modify glConfig +// I want keep it locally, as it belong to OpenGL, not VulKan +// have to use this keep backward Compatibility +static glconfig_t glConfig; + + +static cvar_t* r_customwidth; +static cvar_t* r_customheight; +static cvar_t* r_customaspect; + +typedef struct vidmode_s +{ + const char *description; + int width, height; + float pixelAspect; // pixel width / height +} vidmode_t; + + +static const vidmode_t r_vidModes[] = +{ + { "Mode 0: 320x240", 320, 240, 1 }, + { "Mode 1: 400x300", 400, 300, 1 }, + { "Mode 2: 512x384", 512, 384, 1 }, + { "Mode 3: 640x480 (480p)", 640, 480, 1 }, + { "Mode 4: 800x600", 800, 600, 1 }, + { "Mode 5: 960x720", 960, 720, 1 }, + { "Mode 6: 1024x768", 1024, 768, 1 }, + { "Mode 7: 1152x864", 1152, 864, 1 }, + { "Mode 8: 1280x1024", 1280, 1024, 1 }, + { "Mode 9: 1600x1200", 1600, 1200, 1 }, + { "Mode 10: 2048x1536", 2048, 1536, 1 }, + { "Mode 11: 856x480", 856, 480, 1 }, // Q3 MODES END HERE AND EXTENDED MODES BEGIN + { "Mode 12: 1280x720 (720p)", 1280, 720, 1 }, + { "Mode 13: 1280x768", 1280, 768, 1 }, + { "Mode 14: 1280x800", 1280, 800, 1 }, + { "Mode 15: 1280x960", 1280, 960, 1 }, + { "Mode 16: 1360x768", 1360, 768, 1 }, + { "Mode 17: 1366x768", 1366, 768, 1 }, // yes there are some out there on that extra 6 + { "Mode 18: 1360x1024", 1360, 1024, 1 }, + { "Mode 19: 1400x1050", 1400, 1050, 1 }, + { "Mode 20: 1400x900", 1400, 900, 1 }, + { "Mode 21: 1600x900", 1600, 900, 1 }, + { "Mode 22: 1680x1050", 1680, 1050, 1 }, + { "Mode 23: 1920x1080 (1080p)", 1920, 1080, 1 }, + { "Mode 24: 1920x1200", 1920, 1200, 1 }, + { "Mode 25: 1920x1440", 1920, 1440, 1 }, + { "Mode 26: 2560x1080", 2560, 1080, 1 }, + { "Mode 27: 2560x1600", 2560, 1600, 1 }, + { "Mode 28: 3840x2160 (4K)", 3840, 2160, 1 } +}; +static const int s_numVidModes = 29; + + +void R_DisplayResolutionList_f( void ) +{ + int i; + + ri.Printf( PRINT_ALL, "\n" ); + for ( i = 0; i < s_numVidModes; i++ ) + { + ri.Printf( PRINT_ALL, "%s\n", r_vidModes[i].description ); + } + ri.Printf( PRINT_ALL, "\n" ); +} + + +void R_SetWinMode(int mode, unsigned int width, unsigned int height, unsigned int hz) +{ + + if ( mode < -2 || mode >= s_numVidModes) { + mode = 3; + } + + if (mode == -2) + { + // use desktop video resolution + glConfig.vidWidth = width; + glConfig.vidHeight = height; + glConfig.windowAspect = (float)width / (float)height; + glConfig.displayFrequency = hz; + glConfig.isFullscreen = 1; + } + else if ( mode == -1 ) + { + glConfig.vidWidth = r_customwidth->integer; + glConfig.vidHeight = r_customheight->integer; + glConfig.windowAspect = r_customaspect->value; + glConfig.displayFrequency = 60; + glConfig.isFullscreen = 0; + } + else + { + glConfig.vidWidth = r_vidModes[mode].width; + glConfig.vidHeight = r_vidModes[mode].height; + glConfig.windowAspect = (float)r_vidModes[mode].width / ( r_vidModes[mode].height * r_vidModes[mode].pixelAspect ); + glConfig.displayFrequency = 60; + glConfig.isFullscreen = 0; + + } + + ri.Printf(PRINT_ALL, " MODE: %d, %d x %d, refresh rate: %dhz\n", + mode, glConfig.vidWidth, glConfig.vidHeight, glConfig.displayFrequency); +} + +void R_GetWinResolution(int* w, int* h) +{ + *w = glConfig.vidWidth; + *h = glConfig.vidHeight; +} + +void R_GetWinResolutionF(float* w, float* h) +{ + *w = glConfig.vidWidth; + *h = glConfig.vidHeight; +} + +void R_InitDisplayResolution( void ) +{ + // leilei - -2 is so convenient for modern day PCs + r_mode = ri.Cvar_Get( "r_mode", "-2", CVAR_ARCHIVE | CVAR_LATCH ); + r_customwidth = ri.Cvar_Get( "r_customwidth", "960", CVAR_ARCHIVE | CVAR_LATCH ); + r_customheight = ri.Cvar_Get( "r_customheight", "540", CVAR_ARCHIVE | CVAR_LATCH ); + r_customaspect = ri.Cvar_Get( "r_customaspect", "1.78", CVAR_ARCHIVE | CVAR_LATCH ); +} + +void R_glConfigClear(void) +{ + memset(&glConfig, 0, sizeof(glConfig)); +} + + +//IN: a pointer to the glConfig struct +void R_GetGlConfig(glconfig_t * const pCfg) +{ + *pCfg = glConfig; +} + + +void R_glConfigInit(void) +{ + ri.Printf(PRINT_ALL, "--- R_glConfigInit() ---\n"); + + // These values force the UI to disable driver selection + glConfig.driverType = GLDRV_ICD; + glConfig.hardwareType = GLHW_GENERIC; + + // Only using SDL_SetWindowBrightness to determine if hardware gamma is supported + glConfig.deviceSupportsGamma = qtrue; + + glConfig.textureEnvAddAvailable = 0; // not used + glConfig.textureCompression = TC_NONE; // not used + // init command buffers and SMP + glConfig.stereoEnabled = 0; + glConfig.smpActive = qfalse; // not used + + // hardcode it + glConfig.colorBits = 32; + glConfig.depthBits = 24; + glConfig.stencilBits = 8; +} + +// ================================================== +// ================================================== + +static void printDeviceExtensions(void) +{ + uint32_t nDevExts = 0; + + // To query the extensions available to a given physical device + VK_CHECK( qvkEnumerateDeviceExtensionProperties( vk.physical_device, NULL, &nDevExts, NULL) ); + + assert(nDevExts > 0); + + VkExtensionProperties* pDevExt = + (VkExtensionProperties *) malloc(sizeof(VkExtensionProperties) * nDevExts); + + qvkEnumerateDeviceExtensionProperties( + vk.physical_device, NULL, &nDevExts, pDevExt); + + + ri.Printf(PRINT_ALL, "--------- Total %d Device Extension Supported ---------\n", nDevExts); + uint32_t i; + for (i=0; i 0); + + VkExtensionProperties* pInsExt = (VkExtensionProperties *) malloc(sizeof(VkExtensionProperties) * nInsExt); + + VK_CHECK(qvkEnumerateInstanceExtensionProperties( NULL, &nInsExt, pInsExt)); + + ri.Printf(PRINT_ALL, "\n"); + + ri.Printf(PRINT_ALL, "----- Total %d Instance Extension Supported -----\n", nInsExt); + for (i = 0; i < nInsExt; ++i) + { + ri.Printf(PRINT_ALL, "%s\n", pInsExt[i].extensionName ); + } + ri.Printf(PRINT_ALL, "----- ------------------------------------- -----\n\n"); + + // ================================================================= + + // we enabled all the instance extenstion, dose this reasonable??? + // so we copy it to glConfig.ext str, + // split it with create instance function so that made it clear and clean + if( setting ) + { + uint32_t indicator = 0; + + for (i = 0; i < nInsExt; ++i) + { + uint32_t len = strlen(pInsExt[i].extensionName); + memcpy(glConfig.extensions_string + indicator, + pInsExt[i].extensionName, len); + indicator += len; + glConfig.extensions_string[indicator++] = ' '; + } + + free(pInsExt); + } + // ================================================================== +/* + uint32_t nDevExts = 0; + + // To query the extensions available to a given physical device + VK_CHECK( qvkEnumerateDeviceExtensionProperties( vk.physical_device, NULL, &nDevExts, NULL) ); + + assert(nDevExts > 0); + + VkExtensionProperties* pDevExt = + (VkExtensionProperties *) malloc(sizeof(VkExtensionProperties) * nDevExts); + + qvkEnumerateDeviceExtensionProperties( + vk.physical_device, NULL, &nDevExts, pDevExt); + + + ri.Printf(PRINT_ALL, "---- Total %d Device Extension Supported ----\n", nDevExts); + uint32_t i; + for (i=0; i +#endif + +#include +#include + +#include "ref_import.h" +#include "matrix_multiplication.h" + +#define DEG2RAD( a ) ( ( (a) * M_PI ) / 180.0F ) + +static const float s_Identity3x3[3][3] = { + { 1.0f, 0.0f, 0.0f }, + { 0.0f, 1.0f, 0.0f }, + { 0.0f, 0.0f, 1.0f } +}; + +static const float s_Identity4x4[16] = { + 1.0f, 0.0f, 0.0f, 0.0f, + 0.0f, 1.0f, 0.0f, 0.0f, + 0.0f, 0.0f, 1.0f, 0.0f, + 0.0f, 0.0f, 0.0f, 1.0f +}; + + +void Mat4Copy( const float in[16], float out[16] ) +{ + memcpy(out, in, 64); +} + +void Mat3x3Copy( float dst[3][3], const float src[3][3] ) +{ + memcpy(dst, src, 36); +} + +void VectorLerp( float a[3], float b[3], float lerp, float out[3]) +{ + out[0] = a[0] + (b[0] - a[0]) * lerp; + out[1] = a[1] + (b[1] - a[1]) * lerp; + out[2] = a[2] + (b[2] - a[2]) * lerp; +} + +void VectorNorm( float v[3] ) +{ + float length = v[0]*v[0] + v[1]*v[1] + v[2]*v[2]; + + if(length != 0) + { + /* writing it this way allows gcc to recognize that rsqrt can be used */ + length = 1.0f / sqrtf (length); + v[0] *= length; + v[1] *= length; + v[2] *= length; + } +} + +float VectorLen( const float v[3] ) +{ + return sqrtf(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]); +} + + +void Mat4Identity( float out[16] ) +{ + memcpy(out, s_Identity4x4, 64); +} + +void Mat4Translation( float vec[3], float out[16] ) +{ + memcpy(out, s_Identity4x4, 64); + + out[12] = vec[0]; + out[13] = vec[1]; + out[14] = vec[2]; +} +// +// NOTE; out = b * a ??? +// a, b and c are specified in column-major order +// +void myGlMultMatrix(const float A[16], const float B[16], float out[16]) +{ + int i, j; + for ( i = 0 ; i < 4 ; i++ ) + { + for ( j = 0 ; j < 4 ; j++ ) + { + out[ i * 4 + j ] = + A [ i * 4 + 0 ] * B [ 0 * 4 + j ] + + A [ i * 4 + 1 ] * B [ 1 * 4 + j ] + + A [ i * 4 + 2 ] * B [ 2 * 4 + j ] + + A [ i * 4 + 3 ] * B [ 3 * 4 + j ]; + } + } +} + + +void MatrixMultiply4x4(const float A[16], const float B[16], float out[16]) +{ + out[0] = A[0]*B[0] + A[1]*B[4] + A[2]*B[8] + A[3]*B[12]; + out[1] = A[0]*B[1] + A[1]*B[5] + A[2]*B[9] + A[3]*B[13]; + out[2] = A[0]*B[2] + A[1]*B[6] + A[2]*B[10] + A[3]*B[14]; + out[3] = A[0]*B[3] + A[1]*B[7] + A[2]*B[11] + A[3]*B[15]; + + out[4] = A[4]*B[0] + A[5]*B[4] + A[6]*B[8] + A[7]*B[12]; + out[5] = A[4]*B[1] + A[5]*B[5] + A[6]*B[9] + A[7]*B[13]; + out[6] = A[4]*B[2] + A[5]*B[6] + A[6]*B[10] + A[7]*B[14]; + out[7] = A[4]*B[3] + A[5]*B[7] + A[6]*B[11] + A[7]*B[15]; + + out[8] = A[8]*B[0] + A[9]*B[4] + A[10]*B[8] + A[11]*B[12]; + out[9] = A[8]*B[1] + A[9]*B[5] + A[10]*B[9] + A[11]*B[13]; + out[10] = A[8]*B[2] + A[9]*B[6] + A[10]*B[10] + A[11]*B[14]; + out[11] = A[8]*B[3] + A[9]*B[7] + A[10]*B[11] + A[11]*B[15]; + + out[12] = A[12]*B[0] + A[13]*B[4] + A[14]*B[8] + A[15]*B[12]; + out[13] = A[12]*B[1] + A[13]*B[5] + A[14]*B[9] + A[15]*B[13]; + out[14] = A[12]*B[2] + A[13]*B[6] + A[14]*B[10] + A[15]*B[14]; + out[15] = A[12]*B[3] + A[13]*B[7] + A[14]*B[11] + A[15]*B[15]; +} + +/* + * NOTE; out = B * A in math + * a, b and c are specified in column-major order + * out must be 16 byte aliagned + */ + +void MatrixMultiply4x4_SSE(const float A[16], const float B[16], float out[16]) +{ +#if defined __x86_64__ + __m128 row1 = _mm_load_ps(&B[0]); + __m128 row2 = _mm_load_ps(&B[4]); + __m128 row3 = _mm_load_ps(&B[8]); + __m128 row4 = _mm_load_ps(&B[12]); + + int i; + for(i=0; i<4; i++) + { + __m128 brod1 = _mm_set1_ps(A[4*i ]); + __m128 brod2 = _mm_set1_ps(A[4*i + 1]); + __m128 brod3 = _mm_set1_ps(A[4*i + 2]); + __m128 brod4 = _mm_set1_ps(A[4*i + 3]); + + __m128 row = _mm_add_ps( + _mm_add_ps( _mm_mul_ps(brod1, row1), _mm_mul_ps(brod2, row2) ), + _mm_add_ps( _mm_mul_ps(brod3, row3), _mm_mul_ps(brod4, row4) ) + ); + + _mm_store_ps(&out[4*i], row); + } +#else + MatrixMultiply4x4( A, B, out); +#endif +} + + +void Mat4Transform( const float in1[16], const float in2[4], float out[4] ) +{ + // 16 mult, 12 plus + + float a = in2[0]; + float b = in2[1]; + float c = in2[2]; + float d = in2[3]; + + out[0] = in1[0] * a + in1[4] * b + in1[ 8] * c + in1[12] * d; + out[1] = in1[1] * a + in1[5] * b + in1[ 9] * c + in1[13] * d; + out[2] = in1[2] * a + in1[6] * b + in1[10] * c + in1[14] * d; + out[3] = in1[3] * a + in1[7] * b + in1[11] * c + in1[15] * d; +} + + +void Vec3Transform(const float Mat[16], const float v[3], float out[3]) +{ + float x = v[0]; + float y = v[1]; + float z = v[2]; + + out[0] = Mat[0] * x + Mat[4] * y + Mat[ 8] * z + Mat[12]; + out[1] = Mat[1] * x + Mat[5] * y + Mat[ 9] * z + Mat[13]; + out[2] = Mat[2] * x + Mat[6] * y + Mat[10] * z + Mat[14]; +} + + +// unfortunately, this fun seems not faseter than Mat4Transform +// vector1x4 * mat4x4 +void Mat4x1Transform_SSE( const float A[16], const float x[4], float out[4] ) +{ + // 16 mult, 12 plus + //out[0] = A[0] * x[0] + A[4] * x[1] + A[ 8] * x[2] + A[12] * x[3]; + //out[1] = A[1] * x[0] + A[5] * x[1] + A[ 9] * x[2] + A[13] * x[3]; + //out[2] = A[2] * x[0] + A[6] * x[1] + A[10] * x[2] + A[14] * x[3]; + //out[3] = A[3] * x[0] + A[7] * x[1] + A[11] * x[2] + A[15] * x[3]; + + // 4 mult + 3 plus + 4 broadcast + 8 load (4 _mm_set1_ps + 4 _mm_set1_ps) + // + 1 store +#if defined __x86_64__ + __m128 r1 = _mm_mul_ps( _mm_set1_ps(x[0]), _mm_load_ps(A ) ); + __m128 r2 = _mm_mul_ps( _mm_set1_ps(x[1]), _mm_load_ps(A+4 ) ); + __m128 r3 = _mm_mul_ps( _mm_set1_ps(x[2]), _mm_load_ps(A+8 ) ); + __m128 r4 = _mm_mul_ps( _mm_set1_ps(x[3]), _mm_load_ps(A+12) ); + + _mm_store_ps(out, _mm_add_ps( _mm_add_ps(r1, r2), _mm_add_ps(r3, r4) ) ); +#else + Mat4Transform(A, x, out); +#endif +} + + +/* +#define SHUFFLE_PARAM(x, y, z, w) ( x | y<<2 | z<<4 | w<<6 ) +#define _mm_replicate_x_ps(v) _mm_shuffle_ps(v, v, SHUFFLE_PARAM(0, 0, 0, 0)) +#define _mm_replicate_y_ps(v) _mm_shuffle_ps(v, v, SHUFFLE_PARAM(1, 1, 1, 1)) +#define _mm_replicate_z_ps(v) _mm_shuffle_ps(v, v, SHUFFLE_PARAM(2, 2, 2, 2)) +#define _mm_replicate_w_ps(v) _mm_shuffle_ps(v, v, SHUFFLE_PARAM(3, 3, 3, 3)) + + +void Vec4Transform_SSE( const float A[16], float v[4], float out[4] ) +{ +#if defined __x86_64__ + __m128 x = _mm_load_ps(v); + // 16 mult, 12 plus + //out[0] = A[0] * x[0] + A[4] * x[1] + A[ 8] * x[2] + A[12] * x[3]; + //out[1] = A[1] * x[0] + A[5] * x[1] + A[ 9] * x[2] + A[13] * x[3]; + //out[2] = A[2] * x[0] + A[6] * x[1] + A[10] * x[2] + A[14] * x[3]; + //out[3] = A[3] * x[0] + A[7] * x[1] + A[11] * x[2] + A[15] * x[3]; + + // 4 mult + 3 plus + 4 broadcast + 8 load (4 _mm_set1_ps + 4 _mm_set1_ps) + // + 1 store + __m128 r1 = _mm_mul_ps( _mm_replicate_x_ps( x ), _mm_load_ps(A) ); + __m128 r2 = _mm_mul_ps( _mm_replicate_y_ps( x ), _mm_load_ps(A+4) ); + __m128 r3 = _mm_mul_ps( _mm_replicate_z_ps( x ), _mm_load_ps(A+8) ); + __m128 r4 = _mm_mul_ps( _mm_replicate_w_ps( x ), _mm_load_ps(A+12) ); + + _mm_store_ps(out, _mm_add_ps( _mm_add_ps( r1, r2 ), _mm_add_ps( r3, r4 ) )); +#else + Mat4Transform(A, v, out); +#endif +} +*/ + +void Mat3x3Identity( float pMat[3][3] ) +{ + memcpy(pMat, s_Identity3x3, 36); +} + + +void TransformModelToClip( const float src[3], const float* pMatModel, const float* pMatProj, float eye[4], float dst[4]) +{ + int i; + + for ( i = 0 ; i < 4 ; i++ ) + { + eye[i] = + src[0] * pMatModel[ i + 0 * 4 ] + + src[1] * pMatModel[ i + 1 * 4 ] + + src[2] * pMatModel[ i + 2 * 4 ] + + 1 * pMatModel[ i + 3 * 4 ]; + } + + for ( i = 0 ; i < 4 ; i++ ) + { + dst[i] = + eye[0] * pMatProj[ i + 0 * 4 ] + + eye[1] * pMatProj[ i + 1 * 4 ] + + eye[2] * pMatProj[ i + 2 * 4 ] + + eye[3] * pMatProj[ i + 3 * 4 ]; + } +} + + + +void VectorCross( const float v1[3], const float v2[3], float cross[3] ) +{ + cross[0] = v1[1]*v2[2] - v1[2]*v2[1]; + cross[1] = v1[2]*v2[0] - v1[0]*v2[2]; + cross[2] = v1[0]*v2[1] - v1[1]*v2[0]; +} + + +// fast vector normalize routine that does not check to make sure +// that length != 0, nor does it return length, uses rsqrt approximation +void FastNormalize1f(float v[3]) +{ + // writing it this way allows gcc to recognize that rsqrt can be used + float invLen = 1.0f / sqrtf(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]); + + v[0] = v[0] * invLen; + v[1] = v[1] * invLen; + v[2] = v[2] * invLen; +} + +void FastNormalize2f( const float* v, float* out) +{ + // writing it this way allows gcc to recognize that rsqrt can be used + float invLen = 1.0f / sqrtf(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]); + + out[0] = v[0] * invLen; + out[1] = v[1] * invLen; + out[2] = v[2] * invLen; +} + +// use Rodrigue's rotation formula +// dir are not assumed to be unit vector +void PointRotateAroundVector(float* res, const float* vec, const float* p, const float degrees) +{ + float rad = DEG2RAD( degrees ); + float cos_th = cos( rad ); + float sin_th = sin( rad ); + float k[3]; + + // writing it this way allows gcc to recognize that rsqrt can be used + float invLen = 1.0f / sqrtf(vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2]); + k[0] = vec[0] * invLen; + k[1] = vec[1] * invLen; + k[2] = vec[2] * invLen; + + float d = (1 - cos_th) * (p[0] * k[0] + p[1] * k[1] + p[2] * k[2]); + + res[0] = sin_th * (k[1]*p[2] - k[2]*p[1]); + res[1] = sin_th * (k[2]*p[0] - k[0]*p[2]); + res[2] = sin_th * (k[0]*p[1] - k[1]*p[0]); + + res[0] += cos_th * p[0] + d * k[0]; + res[1] += cos_th * p[1] + d * k[1]; + res[2] += cos_th * p[2] + d * k[2]; +} + +// vector k are assumed to be unit +void RotateAroundUnitVector(float* res, const float* k, const float* p, const float degrees) +{ + float rad = DEG2RAD( degrees ); + float cos_th = cos( rad ); + float sin_th = sin( rad ); + + float d = (1 - cos_th) * (p[0] * k[0] + p[1] * k[1] + p[2] * k[2]); + + res[0] = sin_th * (k[1]*p[2] - k[2]*p[1]); + res[1] = sin_th * (k[2]*p[0] - k[0]*p[2]); + res[2] = sin_th * (k[0]*p[1] - k[1]*p[0]); + + res[0] += cos_th * p[0] + d * k[0]; + res[1] += cos_th * p[1] + d * k[1]; + res[2] += cos_th * p[2] + d * k[2]; +} + + +// note: vector forward are NOT assumed to be nornalized, +// unit: nornalized of forward, +// dst: unit vector which perpendicular of forward(src) +void VectorPerp( const float src[3], float dst[3] ) +{ + float unit[3]; + + float sqlen = src[0]*src[0] + src[1]*src[1] + src[2]*src[2]; + if(0 == sqlen) + { + ri.Printf( PRINT_WARNING, "MakePerpVectors: zero vertor input!\n"); + return; + } + + dst[1] = -src[0]; + dst[2] = src[1]; + dst[0] = src[2]; + // this rotate and negate try to make a vector not colinear with the original + // actually can not guarantee, for example + // forward = (1/sqrt(3), 1/sqrt(3), -1/sqrt(3)), + // then right = (-1/sqrt(3), -1/sqrt(3), 1/sqrt(3)) + + + float invLen = 1.0f / sqrtf(sqlen); + unit[0] = src[0] * invLen; + unit[1] = src[1] * invLen; + unit[2] = src[2] * invLen; + + + float d = DotProduct(unit, dst); + dst[0] -= d*unit[0]; + dst[1] -= d*unit[1]; + dst[2] -= d*unit[2]; + + // normalize the result + invLen = 1.0f / sqrtf(dst[0]*dst[0] + dst[1]*dst[1] + dst[2]*dst[2]); + + dst[0] *= invLen; + dst[1] *= invLen; + dst[2] *= invLen; +} + +// Given a normalized forward vector, create two other perpendicular vectors +// note: vector forward are NOT assumed to be nornalized, +// after this funtion is called , forward are nornalized. +// right, up: perpendicular of forward +float MakeTwoPerpVectors(const float forward[3], float right[3], float up[3]) +{ + + float sqLen = forward[0]*forward[0]+forward[1]*forward[1]+forward[2]*forward[2]; + if(sqLen) + { + float nf[3] = {0, 0, 1}; + float invLen = 1.0f / sqrtf(sqLen); + nf[0] = forward[0] * invLen; + nf[1] = forward[1] * invLen; + nf[2] = forward[2] * invLen; + + float adjlen = DotProduct(nf, right); + + // this rotate and negate guarantees a vector + // not colinear with the original + right[0] = forward[2] - adjlen * nf[0]; + right[1] = -forward[0] - adjlen * nf[1]; + right[2] = forward[1] - adjlen * nf[2]; + + + invLen = 1.0f/sqrtf(right[0]*right[0]+right[1]*right[1]+right[2]*right[2]); + right[0] *= invLen; + right[1] *= invLen; + right[2] *= invLen; + + // get the up vector with the right hand rules + VectorCross(right, nf, up); + + return (sqLen * invLen); + } + return 0; +} + + +void TransformModelToClip_SSE( const float src[3], const float pMatModel[16], const float pMatProj[16], float dst[4] ) +{ +#if defined __x86_64__ + +float AugSrc[4] = {src[0], src[1], src[2], 1.0f}; + + + __m128 row1 = _mm_load_ps(&pMatProj[0]); + __m128 row2 = _mm_load_ps(&pMatProj[4]); + __m128 row3 = _mm_load_ps(&pMatProj[8]); + __m128 row4 = _mm_load_ps(&pMatProj[12]); + + __m128 res[4]; + int i; + for(i=0; i<4; i++) + { + __m128 brod1 = _mm_set1_ps(pMatModel[4*i ]); + __m128 brod2 = _mm_set1_ps(pMatModel[4*i + 1]); + __m128 brod3 = _mm_set1_ps(pMatModel[4*i + 2]); + __m128 brod4 = _mm_set1_ps(pMatModel[4*i + 3]); + + __m128 scol = _mm_set1_ps(AugSrc[i]); + + res[i] =_mm_mul_ps( _mm_add_ps( + _mm_add_ps( _mm_mul_ps(brod1, row1), _mm_mul_ps(brod2, row2) ), + _mm_add_ps( _mm_mul_ps(brod3, row3), _mm_mul_ps(brod4, row4) ) + ), scol); + } + + + _mm_store_ps(dst, _mm_add_ps( _mm_add_ps(res[0], res[1]), _mm_add_ps(res[2], res[3]) ) ); +#else + float eye[4]; + TransformModelToClip(src, pMatModel, pMatProj, eye, dst ); +#endif + + +// print4f("AugSrc", AugSrc); +// printMat4x4f("MatModel", pMatModel); +// printMat4x4f("MatProj", pMatProj); +// MatrixMultiply4x4_SSE(pMatModel, pMatProj, mvp); +// Mat4x1Transform_SSE(mvp, AugSrc, dst); +// print4f("dst", dst); +} + + +/* + +void TransformModelToClip_SSE2( const float x[3], const float pMatModel[16], const float pMatProj[16], float dst[4] ) +{ + + // 7/8 broadcaster, 8 load, 7/8 mult, 6 add + + __m128 row = _mm_add_ps( + _mm_add_ps( _mm_mul_ps( _mm_set1_ps(x[0]), _mm_load_ps(pMatModel ) ) , + _mm_mul_ps( _mm_set1_ps(x[1]), _mm_load_ps(pMatModel+4 ) ) ) + , + _mm_add_ps( _mm_mul_ps( _mm_set1_ps(x[2]), _mm_load_ps(pMatModel+8 ) ) , + _mm_load_ps(pMatModel+12) ) ); + _mm_store_ps(dst, _mm_add_ps( + _mm_add_ps( _mm_mul_ps( _mm_set1_ps(row[0]), _mm_load_ps(pMatProj ) ) , + _mm_mul_ps( _mm_set1_ps(row[1]), _mm_load_ps(pMatProj+4 ) ) ) + , + _mm_add_ps( _mm_mul_ps( _mm_set1_ps(row[2]), _mm_load_ps(pMatProj+8 ) ) , + _mm_mul_ps( _mm_set1_ps(row[3]), _mm_load_ps(pMatProj+12) ) ) ) + ); + + +// _mm_store_ps(dst, row); + +// Mat4x1Transform_SSE(pMatModel, AugSrc, eye); +// Mat4x1Transform_SSE(pMatProj, eye, dst); +} + +*/ + +// =============================================== +// not used now +// =============================================== + +/* +void Mat4SimpleInverse( const float in[16], float out[16]) +{ + float v[3]; + float invSqrLen; + + VectorCopy(in + 0, v); + invSqrLen = 1.0f / DotProduct(v, v); VectorScale(v, invSqrLen, v); + out[ 0] = v[0]; out[ 4] = v[1]; out[ 8] = v[2]; out[12] = -DotProduct(v, &in[12]); + + VectorCopy(in + 4, v); + invSqrLen = 1.0f / DotProduct(v, v); VectorScale(v, invSqrLen, v); + out[ 1] = v[0]; out[ 5] = v[1]; out[ 9] = v[2]; out[13] = -DotProduct(v, &in[12]); + + VectorCopy(in + 8, v); + invSqrLen = 1.0f / DotProduct(v, v); VectorScale(v, invSqrLen, v); + out[ 2] = v[0]; out[ 6] = v[1]; out[10] = v[2]; out[14] = -DotProduct(v, &in[12]); + + out[ 3] = 0.0f; out[ 7] = 0.0f; out[11] = 0.0f; out[15] = 1.0f; +} + + +void Mat4Dump( const float in[16] ) +{ + printf( "%3.5f %3.5f %3.5f %3.5f\n", in[ 0], in[ 4], in[ 8], in[12]); + printf( "%3.5f %3.5f %3.5f %3.5f\n", in[ 1], in[ 5], in[ 9], in[13]); + printf( "%3.5f %3.5f %3.5f %3.5f\n", in[ 2], in[ 6], in[10], in[14]); + printf( "%3.5f %3.5f %3.5f %3.5f\n", in[ 3], in[ 7], in[11], in[15]); +} + +void Mat4View(vec3_t axes[3], vec3_t origin, mat4_t out) +{ + out[0] = axes[0][0]; + out[1] = axes[1][0]; + out[2] = axes[2][0]; + out[3] = 0; + + out[4] = axes[0][1]; + out[5] = axes[1][1]; + out[6] = axes[2][1]; + out[7] = 0; + + out[8] = axes[0][2]; + out[9] = axes[1][2]; + out[10] = axes[2][2]; + out[11] = 0; + + out[12] = -DotProduct(origin, axes[0]); + out[13] = -DotProduct(origin, axes[1]); + out[14] = -DotProduct(origin, axes[2]); + out[15] = 1; +} +*/ diff --git a/code/renderer_vulkan/matrix_multiplication.h b/code/renderer_vulkan/matrix_multiplication.h new file mode 100644 index 0000000000..7119710a45 --- /dev/null +++ b/code/renderer_vulkan/matrix_multiplication.h @@ -0,0 +1,24 @@ +#ifndef MATRIX_MULTIPLICATION_H_ +#define MATRIX_MULTIPLICATION_H_ + + +void Mat4Copy( const float in[16], float out[16] ); +void Mat3x3Copy( float dst[3][3], const float src[3][3] ); +void VectorLerp( float a[3], float b[3], float lerp, float out[3]); +void VectorNorm( float v[3] ); +float VectorLen( const float v[3] ); + +void TransformModelToClip( const float src[3], const float *modelMatrix, const float *projectionMatrix, float eye[4], float dst[4] ); +void TransformModelToClip_SSE( const float src[3], const float pMatModel[16], const float pMatProj[16], float dst[4] ); +void Mat4Identity( float out[16] ); +void MatrixMultiply4x4_SSE(const float A[16], const float B[16], float out[16]); +void Mat4x1Transform_SSE( const float A[16], const float x[4], float out[4] ); +void Mat4Transform( const float in1[16], const float in2[4], float out[4] ); +void Mat4Translation( float vec[3], float out[16] ); +void Mat3x3Identity( float pMat[3][3] ); +void VectorPerp( const vec3_t src, vec3_t dst ); +float MakeTwoPerpVectors(const float forward[3], float right[3], float up[3]); + +void Vec3Transform(const float Mat[16], const float v[3], float out[3]); + +#endif diff --git a/code/renderer_vulkan/ref_import.c b/code/renderer_vulkan/ref_import.c new file mode 100644 index 0000000000..1b81659e0c --- /dev/null +++ b/code/renderer_vulkan/ref_import.c @@ -0,0 +1,158 @@ +#include "../qcommon/q_shared.h" +#include "../renderercommon/tr_public.h" + +extern refexport_t* R_Export(void); + +refimport_t ri; + + +/* +@@@@@@@@@@@@@@@@@@@@@ +GetRefAPI + +@@@@@@@@@@@@@@@@@@@@@ +*/ + +#ifdef USE_RENDERER_DLOPEN +Q_EXPORT refexport_t* QDECL GetRefAPI( int apiVersion, refimport_t *rimp ) +{ +#else +refexport_t* GetRefAPI(int apiVersion, refimport_t *rimp) +{ +#endif + + ri = *rimp; + + if( apiVersion != REF_API_VERSION ) + { + ri.Printf(PRINT_ALL, "Mismatched REF_API_VERSION: expected %i, got %i\n", REF_API_VERSION, apiVersion ); + return NULL; + } + + return R_Export(); +} + +// +// common function replacements for modular renderer +// +#ifdef USE_RENDERER_DLOPEN + +void QDECL Com_Printf( const char *msg, ... ) +{ + va_list argptr; + char text[1024]; + + va_start(argptr, msg); + Q_vsnprintf(text, sizeof(text), msg, argptr); + va_end(argptr); + + ri.Printf(PRINT_ALL, "%s", text); +} + +void QDECL Com_Error( int level, const char *error, ... ) +{ + va_list argptr; + char text[1024]; + + va_start(argptr, error); + Q_vsnprintf(text, sizeof(text), error, argptr); + va_end(argptr); + + ri.Error(level, "%s", text); +} + +#endif + + +// ======================================= +// ======================================= +// ======================================= + +char* R_SkipPath (char *pathname) +{ + char* last = pathname; + while (*pathname) + { + if (*pathname=='/') + last = pathname+1; + pathname++; + } + return last; +} + +/* +char* SkipPath(char *pathname) +{ + char* last = pathname; + char c; + do{ + c = *pathname; + if (c == '/') + last = pathname+1; + pathname++; + }while(c); + + return last; +} +*/ + +void R_StripExtension( const char *in, char *out, int destsize ) +{ + const char *dot = strrchr(in, '.'), *slash; + + if (dot && (!(slash = strrchr(in, '/')) || slash < dot)) + destsize = (destsize < dot-in+1 ? destsize : dot-in+1); + + if ( in == out && destsize > 1 ) + out[destsize-1] = '\0'; + else + Q_strncpyz(out, in, destsize); +} + + +/* +void stripExtension(const char *in, char *out, int destsize) +{ + const char *dot = strrchr(in, '.'); + const char *slash = strrchr(in, '/'); + + + if ((dot != NULL) && ( (slash == NULL) || (slash < dot) ) ) + { + int len = dot-in+1; + if(len <= destsize) + destsize = len; + else + ri.Printf( PRINT_WARNING, "stripExtension: dest size not enough!\n"); + } + + if(in != out) + strncpy(out, in, destsize-1); + + out[destsize-1] = '\0'; +} + + + +const char *R_GetExtension( const char *name ) +{ + const char *dot = strrchr(name, '.'), *slash; + if (dot && (!(slash = strrchr(name, '/')) || slash < dot)) + return dot + 1; + else + return ""; +} +*/ + +/* +char* GetExtension( const char *name ) +{ + char* dot = strrchr(name, '.'); + char* slash = strrchr(name, '/'); + + if ((dot != NULL) && ((slash == NULL) || (slash < dot) )) + return dot + 1; + else + return ""; +} +*/ diff --git a/code/renderer_vulkan/ref_import.h b/code/renderer_vulkan/ref_import.h new file mode 100644 index 0000000000..3258f9ae00 --- /dev/null +++ b/code/renderer_vulkan/ref_import.h @@ -0,0 +1,13 @@ +#ifndef REF_IMPORT_H_ +#define REF_IMPORT_H_ + +#include "../qcommon/q_shared.h" +#include "../renderercommon/tr_public.h" + +extern refimport_t ri; + +char* R_SkipPath (char *pathname); +void R_StripExtension( const char *in, char *out, int destsize ); +// const char* R_GetExtension( const char *name ); + +#endif diff --git a/code/renderer_vulkan/render_export.c b/code/renderer_vulkan/render_export.c new file mode 100644 index 0000000000..15386e3925 --- /dev/null +++ b/code/renderer_vulkan/render_export.c @@ -0,0 +1,65 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +#include "render_export.h" + +refexport_t* R_Export(void) +{ + static refexport_t re; + // memset(&re, 0, sizeof(re)); + + // the RE_ functions are Renderer Entry points + re.Shutdown = RE_Shutdown; + re.BeginRegistration = RE_BeginRegistration; + re.RegisterModel = RE_RegisterModel; + re.RegisterSkin = RE_RegisterSkin; + re.RegisterShader = RE_RegisterShader; + re.RegisterShaderNoMip = RE_RegisterShaderNoMip; + re.LoadWorld = RE_LoadWorldMap; + re.SetWorldVisData = RE_SetWorldVisData; + re.EndRegistration = RE_EndRegistration; + re.ClearScene = RE_ClearScene; + re.AddRefEntityToScene = RE_AddRefEntityToScene; + re.AddPolyToScene = RE_AddPolyToScene; + re.LightForPoint = RE_LightForPoint; + re.AddLightToScene = RE_AddLightToScene; + re.AddAdditiveLightToScene = RE_AddAdditiveLightToScene; + + re.RenderScene = RE_RenderScene; + re.SetColor = RE_SetColor; + re.DrawStretchPic = RE_StretchPic; + re.DrawStretchRaw = RE_StretchRaw; + re.UploadCinematic = RE_UploadCinematic; + + re.BeginFrame = RE_BeginFrame; + re.EndFrame = RE_EndFrame; + re.MarkFragments = RE_MarkFragments; + re.LerpTag = RE_LerpTag; + re.ModelBounds = RE_ModelBounds; + re.RegisterFont = RE_RegisterFont; + re.RemapShader = RE_RemapShader; + re.GetEntityToken = RE_GetEntityToken; + re.inPVS = RE_inPVS; + + re.TakeVideoFrame = RE_TakeVideoFrame; + + return &re; +} diff --git a/code/renderer_vulkan/render_export.h b/code/renderer_vulkan/render_export.h new file mode 100644 index 0000000000..a4b82a803e --- /dev/null +++ b/code/renderer_vulkan/render_export.h @@ -0,0 +1,57 @@ +#ifndef RENDER_EXPORT_H_ +#define RENDER_EXPORT_H_ + +#include "../qcommon/q_shared.h" +#include "../renderercommon/tr_types.h" +#include "../renderercommon/tr_public.h" + + +// Total 30 exported function + +void RE_Shutdown( qboolean destroyWindow ); +void RE_BeginRegistration( glconfig_t *glconfig ); + +qhandle_t RE_RegisterModel( const char *name ); +qhandle_t RE_RegisterSkin( const char *name ); +qhandle_t RE_RegisterShader( const char *name ); +qhandle_t RE_RegisterShaderNoMip( const char *name ); + +void RE_LoadWorldMap( const char *mapname ); +void RE_SetWorldVisData( const byte *vis ); +void RE_EndRegistration( void ); +void RE_ClearScene( void ); +void RE_AddRefEntityToScene( const refEntity_t *ent ); +void RE_AddPolyToScene( qhandle_t hShader , int numVerts, const polyVert_t *verts, int num ); + +int RE_LightForPoint( vec3_t point, vec3_t ambientLight, vec3_t directedLight, vec3_t lightDir ); +void RE_AddLightToScene( const vec3_t org, float intensity, float r, float g, float b ); +void RE_AddAdditiveLightToScene( const vec3_t org, float intensity, float r, float g, float b ); +void RE_RenderScene( const refdef_t *fd ); +void RE_SetColor( const float *rgba ); + +void RE_StretchPic ( float x, float y, float w, float h, float s1, float t1, float s2, float t2, qhandle_t hShader ); +void RE_StretchRaw (int x, int y, int w, int h, int cols, int rows, const unsigned char *data, int client, qboolean dirty); +void RE_UploadCinematic (int w, int h, int cols, int rows, const byte *data, int client, qboolean dirty); + +void RE_BeginFrame( stereoFrame_t stereoFrame ); +void RE_EndFrame( int *frontEndMsec, int *backEndMsec ); + + +// MARKERS, POLYGON PROJECTION ON WORLD POLYGONS +int RE_MarkFragments( int numPoints, const vec3_t *points, const vec3_t projection, + int maxPoints, vec3_t pointBuffer, int maxFragments, markFragment_t *fragmentBuffer ); + +int RE_LerpTag( orientation_t *tag, qhandle_t handle, int startFrame, int endFrame, + float frac, const char *tagName ); + +void RE_ModelBounds( qhandle_t handle, vec3_t mins, vec3_t maxs ); +void RE_RegisterFont(const char *fontName, int pointSize, fontInfo_t *font); + +void RE_RemapShader(const char *oldShader, const char *newShader, const char *timeOffset); + +qboolean RE_GetEntityToken( char *buffer, int size ); + +qboolean RE_inPVS( const vec3_t p1, const vec3_t p2 ); +void RE_TakeVideoFrame( int width, int height, unsigned char *captureBuffer, unsigned char *encodeBuffer, qboolean motionJpeg ); + +#endif diff --git a/code/renderer_vulkan/shaders/.gitignore b/code/renderer_vulkan/shaders/.gitignore new file mode 100644 index 0000000000..8335721d59 --- /dev/null +++ b/code/renderer_vulkan/shaders/.gitignore @@ -0,0 +1,3 @@ +bintoc +*.vspv +*.fspv diff --git a/code/renderer_vulkan/shaders/Compiled/multi_texture_clipping_plane_vert.c b/code/renderer_vulkan/shaders/Compiled/multi_texture_clipping_plane_vert.c new file mode 100644 index 0000000000..df8e63b997 --- /dev/null +++ b/code/renderer_vulkan/shaders/Compiled/multi_texture_clipping_plane_vert.c @@ -0,0 +1,208 @@ +unsigned char multi_texture_clipping_plane_vert_spv[] = { +0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, +0x08, 0x00, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, +0x02, 0x00, 0x20, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00, +0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, +0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00, +0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x0F, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, +0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, +0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, +0x38, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x3E, 0x00, +0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, +0x43, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, +0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, +0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, +0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00, +0x70, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x0C, 0x00, +0x00, 0x00, 0x69, 0x6E, 0x5F, 0x70, 0x6F, 0x73, 0x69, 0x74, +0x69, 0x6F, 0x6E, 0x00, 0x05, 0x00, 0x06, 0x00, 0x16, 0x00, +0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, 0x65, 0x72, 0x56, 0x65, +0x72, 0x74, 0x65, 0x78, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, +0x06, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x67, 0x6C, 0x5F, 0x50, 0x6F, 0x73, 0x69, 0x74, 0x69, 0x6F, +0x6E, 0x00, 0x06, 0x00, 0x07, 0x00, 0x16, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x43, 0x6C, 0x69, +0x70, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6E, 0x63, 0x65, 0x00, +0x05, 0x00, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x54, 0x72, 0x61, 0x6E, 0x73, 0x66, 0x6F, 0x72, 0x6D, 0x00, +0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, 0x5F, 0x73, +0x70, 0x61, 0x63, 0x65, 0x5F, 0x78, 0x66, 0x6F, 0x72, 0x6D, +0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00, 0x1D, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x65, 0x79, 0x65, 0x5F, +0x73, 0x70, 0x61, 0x63, 0x65, 0x5F, 0x78, 0x66, 0x6F, 0x72, +0x6D, 0x00, 0x06, 0x00, 0x07, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, 0x70, 0x69, +0x6E, 0x67, 0x5F, 0x70, 0x6C, 0x61, 0x6E, 0x65, 0x00, 0x00, +0x05, 0x00, 0x03, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x38, 0x00, 0x00, 0x00, +0x66, 0x72, 0x61, 0x67, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x72, +0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x3A, 0x00, 0x00, 0x00, +0x69, 0x6E, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x72, 0x00, 0x00, +0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x3E, 0x00, 0x00, 0x00, +0x66, 0x72, 0x61, 0x67, 0x5F, 0x74, 0x65, 0x78, 0x5F, 0x63, +0x6F, 0x6F, 0x72, 0x64, 0x30, 0x00, 0x05, 0x00, 0x06, 0x00, +0x40, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x74, 0x65, 0x78, +0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x30, 0x00, 0x00, 0x00, +0x05, 0x00, 0x06, 0x00, 0x42, 0x00, 0x00, 0x00, 0x66, 0x72, +0x61, 0x67, 0x5F, 0x74, 0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, +0x72, 0x64, 0x31, 0x00, 0x05, 0x00, 0x06, 0x00, 0x43, 0x00, +0x00, 0x00, 0x69, 0x6E, 0x5F, 0x74, 0x65, 0x78, 0x5F, 0x63, +0x6F, 0x6F, 0x72, 0x64, 0x31, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x16, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x16, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x16, 0x00, +0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x48, 0x00, 0x04, 0x00, +0x1D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, +0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, +0x00, 0x00, 0x48, 0x00, 0x04, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x48, 0x00, +0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x23, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x48, 0x00, +0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, +0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x23, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x47, 0x00, +0x03, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x38, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, +0x3A, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x3E, 0x00, 0x00, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x40, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x42, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x43, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, +0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, +0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, +0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x17, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, +0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x08, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x17, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0A, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, +0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, +0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, +0x00, 0x00, 0x80, 0x3F, 0x15, 0x00, 0x04, 0x00, 0x13, 0x00, +0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x14, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x04, 0x00, +0x15, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x14, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, +0x07, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x16, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x17, 0x00, +0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x15, 0x00, 0x04, 0x00, 0x19, 0x00, 0x00, 0x00, 0x20, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, +0x19, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, +0x07, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x18, 0x00, +0x04, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x05, 0x00, 0x1D, 0x00, +0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, +0x07, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x04, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x1F, 0x00, +0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x20, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1B, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x25, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2B, 0x00, +0x04, 0x00, 0x19, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x19, 0x00, +0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x29, 0x00, 0x00, 0x00, 0x09, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x2D, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1C, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x36, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x04, 0x00, 0x25, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x39, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x04, 0x00, 0x39, 0x00, 0x00, 0x00, 0x3A, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, +0x3C, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x3D, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x04, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x3F, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x04, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x40, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x3D, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x3F, 0x00, 0x00, 0x00, +0x43, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x36, 0x00, +0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x51, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x51, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x50, 0x00, +0x07, 0x00, 0x07, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, +0x0F, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, +0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, +0x09, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x41, 0x00, +0x05, 0x00, 0x20, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, +0x1F, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, +0x21, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x91, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00, 0x24, 0x00, +0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, +0x41, 0x00, 0x05, 0x00, 0x25, 0x00, 0x00, 0x00, 0x26, 0x00, +0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x03, 0x00, 0x26, 0x00, 0x00, 0x00, 0x24, 0x00, +0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x29, 0x00, 0x00, 0x00, +0x2A, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x28, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, +0x09, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x2D, 0x00, +0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, +0x27, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x1C, 0x00, +0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, +0x90, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x30, 0x00, +0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, +0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x31, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x32, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x33, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x50, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00, 0x34, 0x00, +0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, +0x33, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x94, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x41, 0x00, +0x06, 0x00, 0x36, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, +0x18, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x37, 0x00, 0x00, 0x00, +0x35, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x03, 0x00, 0x38, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x3C, 0x00, 0x00, 0x00, +0x41, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x3E, 0x00, +0x03, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x04, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x44, 0x00, +0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, +0x42, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0xFD, 0x00, +0x01, 0x00, 0x38, 0x00, 0x01, 0x00, }; +int multi_texture_clipping_plane_vert_spv_size = 2056; diff --git a/code/renderer_vulkan/shaders/Compiled/multi_texture_frag.c b/code/renderer_vulkan/shaders/Compiled/multi_texture_frag.c new file mode 100644 index 0000000000..d665eec8b7 --- /dev/null +++ b/code/renderer_vulkan/shaders/Compiled/multi_texture_frag.c @@ -0,0 +1,263 @@ +unsigned char multi_texture_frag_spv[] = { +0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, +0x08, 0x00, 0x6D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, +0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, +0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, +0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0A, 0x00, 0x04, 0x00, +0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, +0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x1B, 0x00, +0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, +0x33, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03, 0x00, 0x04, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, +0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, +0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x08, 0x00, +0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, 0x5F, 0x70, 0x6C, 0x61, +0x6E, 0x65, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x0F, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x63, 0x6C, 0x69, +0x70, 0x5F, 0x64, 0x69, 0x73, 0x74, 0x00, 0x00, 0x05, 0x00, +0x04, 0x00, 0x19, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x6C, 0x6F, +0x72, 0x5F, 0x61, 0x00, 0x05, 0x00, 0x05, 0x00, 0x1B, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x63, 0x6F, 0x6C, +0x6F, 0x72, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x20, 0x00, +0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x75, 0x72, 0x65, 0x30, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x24, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x74, 0x65, 0x78, +0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x30, 0x00, 0x05, 0x00, +0x04, 0x00, 0x28, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x6C, 0x6F, +0x72, 0x5F, 0x62, 0x00, 0x05, 0x00, 0x05, 0x00, 0x29, 0x00, +0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x75, 0x72, 0x65, 0x31, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x2B, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x74, 0x65, 0x78, +0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x31, 0x00, 0x05, 0x00, +0x05, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x63, 0x6F, 0x6C, 0x6F, +0x72, 0x5F, 0x6F, 0x70, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, +0x05, 0x00, 0x33, 0x00, 0x00, 0x00, 0x6F, 0x75, 0x74, 0x5F, +0x63, 0x6F, 0x6C, 0x6F, 0x72, 0x00, 0x00, 0x00, 0x05, 0x00, +0x06, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x61, 0x6C, 0x70, 0x68, +0x61, 0x5F, 0x74, 0x65, 0x73, 0x74, 0x5F, 0x66, 0x75, 0x6E, +0x63, 0x00, 0x47, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x1B, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x20, 0x00, 0x00, 0x00, 0x22, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, +0x20, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x24, 0x00, 0x00, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x29, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x29, 0x00, +0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, +0x2E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x33, 0x00, 0x00, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, +0x00, 0x00, 0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x02, 0x00, 0x06, 0x00, +0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, +0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x34, 0x00, 0x06, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0A, 0x00, +0x00, 0x00, 0xAB, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, +0x09, 0x00, 0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x0E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, +0x0F, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, +0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x17, 0x00, +0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x1A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x1B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x19, 0x00, +0x09, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1F, 0x00, 0x00, 0x00, +0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, +0x04, 0x00, 0x22, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x23, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x04, 0x00, 0x23, 0x00, 0x00, 0x00, 0x24, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x1F, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x23, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x06, 0x00, 0x06, 0x00, +0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0xAB, 0x00, 0x00, 0x00, +0x2E, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x32, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x17, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x32, 0x00, +0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x17, 0x00, 0x04, 0x00, 0x34, 0x00, 0x00, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, +0x3A, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x3A, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x32, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x4B, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x34, 0x00, 0x06, 0x00, +0x06, 0x00, 0x00, 0x00, 0x4C, 0x00, 0x00, 0x00, 0xAA, 0x00, +0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x4B, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, +0x07, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x34, 0x00, 0x06, 0x00, 0x06, 0x00, 0x00, 0x00, +0x58, 0x00, 0x00, 0x00, 0xAA, 0x00, 0x00, 0x00, 0x4A, 0x00, +0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x3F, 0x2B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, +0x63, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x34, 0x00, +0x06, 0x00, 0x06, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, +0xAA, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x63, 0x00, +0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, 0x00, 0x00, +0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x18, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x0C, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x0A, 0x00, +0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, +0x0F, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, +0x11, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x0C, 0x00, +0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x0C, 0x00, 0x00, 0x00, +0xF5, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, 0x13, 0x00, +0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, +0x12, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0xF7, 0x00, +0x03, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xFA, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, 0x14, 0x00, +0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, +0x14, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x01, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x15, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x17, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x1B, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x21, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x22, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, +0x24, 0x00, 0x00, 0x00, 0x57, 0x00, 0x05, 0x00, 0x17, 0x00, +0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, +0x25, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x17, 0x00, +0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, +0x26, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x19, 0x00, +0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x29, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x22, 0x00, 0x00, 0x00, +0x2C, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x57, 0x00, +0x05, 0x00, 0x17, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, +0x2A, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x3E, 0x00, +0x03, 0x00, 0x28, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, +0xF7, 0x00, 0x03, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x2F, 0x00, 0x00, 0x00, +0x30, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x30, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x17, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x19, 0x00, +0x00, 0x00, 0x4F, 0x00, 0x08, 0x00, 0x34, 0x00, 0x00, 0x00, +0x36, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x35, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x17, 0x00, +0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, +0x4F, 0x00, 0x08, 0x00, 0x34, 0x00, 0x00, 0x00, 0x38, 0x00, +0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x81, 0x00, 0x05, 0x00, 0x34, 0x00, 0x00, 0x00, +0x39, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0x38, 0x00, +0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x3C, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x41, 0x00, +0x05, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, +0x28, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, +0x3F, 0x00, 0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x41, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, +0x40, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x50, 0x00, 0x07, 0x00, 0x17, 0x00, +0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, +0x43, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x41, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x33, 0x00, 0x00, 0x00, +0x45, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x31, 0x00, +0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x46, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x47, 0x00, +0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x17, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x28, 0x00, +0x00, 0x00, 0x85, 0x00, 0x05, 0x00, 0x17, 0x00, 0x00, 0x00, +0x49, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, 0x48, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x33, 0x00, 0x00, 0x00, +0x49, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x31, 0x00, +0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x31, 0x00, 0x00, 0x00, +0xF7, 0x00, 0x03, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x4C, 0x00, 0x00, 0x00, +0x4D, 0x00, 0x00, 0x00, 0x56, 0x00, 0x00, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x4D, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, +0x4F, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x33, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0x50, 0x00, +0x00, 0x00, 0xB4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, +0x52, 0x00, 0x00, 0x00, 0x51, 0x00, 0x00, 0x00, 0x11, 0x00, +0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x54, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x52, 0x00, +0x00, 0x00, 0x53, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x53, 0x00, 0x00, 0x00, 0xFC, 0x00, +0x01, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x54, 0x00, 0x00, 0x00, +0xF9, 0x00, 0x02, 0x00, 0x4E, 0x00, 0x00, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x56, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, +0x5A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, +0x04, 0x00, 0x58, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, +0x62, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x59, 0x00, +0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x4F, 0x00, 0x00, 0x00, +0x5B, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x5C, 0x00, 0x00, 0x00, 0x5B, 0x00, 0x00, 0x00, 0xBE, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x5E, 0x00, 0x00, 0x00, +0x5C, 0x00, 0x00, 0x00, 0x5D, 0x00, 0x00, 0x00, 0xF7, 0x00, +0x03, 0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xFA, 0x00, 0x04, 0x00, 0x5E, 0x00, 0x00, 0x00, 0x5F, 0x00, +0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, +0x5F, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x01, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x60, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, +0x5A, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x62, 0x00, +0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x66, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x64, 0x00, +0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x65, 0x00, 0x00, 0x00, 0x41, 0x00, +0x05, 0x00, 0x4F, 0x00, 0x00, 0x00, 0x67, 0x00, 0x00, 0x00, +0x33, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, +0x67, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x69, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, +0x5D, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x6B, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, +0x69, 0x00, 0x00, 0x00, 0x6A, 0x00, 0x00, 0x00, 0x6B, 0x00, +0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x6A, 0x00, 0x00, 0x00, +0xFC, 0x00, 0x01, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x6B, 0x00, +0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x66, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x66, 0x00, 0x00, 0x00, 0xF9, 0x00, +0x02, 0x00, 0x5A, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, +0x5A, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x4E, 0x00, +0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x4E, 0x00, 0x00, 0x00, +0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00, }; +int multi_texture_frag_spv_size = 2608; diff --git a/code/renderer_vulkan/shaders/Compiled/multi_texture_vert.c b/code/renderer_vulkan/shaders/Compiled/multi_texture_vert.c new file mode 100644 index 0000000000..d85193899b --- /dev/null +++ b/code/renderer_vulkan/shaders/Compiled/multi_texture_vert.c @@ -0,0 +1,215 @@ +unsigned char multi_texture_vert_spv[] = { +0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, +0x08, 0x00, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, +0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, +0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, +0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0E, 0x00, 0x00, 0x00, +0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, +0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x15, 0x00, +0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, +0x3C, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x42, 0x00, +0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, +0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, +0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, +0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, +0x03, 0x00, 0x09, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, +0x05, 0x00, 0x05, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x69, 0x6E, +0x5F, 0x70, 0x6F, 0x73, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x00, +0x05, 0x00, 0x06, 0x00, 0x13, 0x00, 0x00, 0x00, 0x67, 0x6C, +0x5F, 0x50, 0x65, 0x72, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, +0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, 0x13, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, +0x6F, 0x73, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x00, 0x05, 0x00, +0x03, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x05, 0x00, 0x05, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x54, 0x72, +0x61, 0x6E, 0x73, 0x66, 0x6F, 0x72, 0x6D, 0x00, 0x00, 0x00, +0x06, 0x00, 0x08, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, 0x5F, 0x73, 0x70, 0x61, +0x63, 0x65, 0x5F, 0x78, 0x66, 0x6F, 0x72, 0x6D, 0x00, 0x00, +0x00, 0x00, 0x06, 0x00, 0x07, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x65, 0x79, 0x65, 0x5F, 0x73, 0x70, +0x61, 0x63, 0x65, 0x5F, 0x78, 0x66, 0x6F, 0x72, 0x6D, 0x00, +0x06, 0x00, 0x07, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, 0x70, 0x69, 0x6E, 0x67, +0x5F, 0x70, 0x6C, 0x61, 0x6E, 0x65, 0x00, 0x00, 0x05, 0x00, +0x03, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x05, 0x00, 0x05, 0x00, 0x24, 0x00, 0x00, 0x00, 0x63, 0x6C, +0x69, 0x70, 0x5F, 0x70, 0x6C, 0x61, 0x6E, 0x65, 0x00, 0x00, +0x05, 0x00, 0x06, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x66, 0x72, +0x61, 0x67, 0x5F, 0x63, 0x6C, 0x69, 0x70, 0x5F, 0x64, 0x69, +0x73, 0x74, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x3A, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x63, 0x6F, 0x6C, +0x6F, 0x72, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x3C, 0x00, +0x00, 0x00, 0x69, 0x6E, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x72, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x40, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x74, 0x65, 0x78, +0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x30, 0x00, 0x05, 0x00, +0x06, 0x00, 0x42, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x74, +0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x30, 0x00, +0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x44, 0x00, 0x00, 0x00, +0x66, 0x72, 0x61, 0x67, 0x5F, 0x74, 0x65, 0x78, 0x5F, 0x63, +0x6F, 0x6F, 0x72, 0x64, 0x31, 0x00, 0x05, 0x00, 0x06, 0x00, +0x45, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x74, 0x65, 0x78, +0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x31, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, +0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0B, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, +0x13, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x48, 0x00, +0x04, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x05, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x04, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, +0x48, 0x00, 0x05, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, +0x48, 0x00, 0x05, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, +0x48, 0x00, 0x05, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, +0x47, 0x00, 0x03, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x24, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x3A, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, +0x40, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x42, 0x00, 0x00, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x44, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x45, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, +0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x16, 0x00, 0x03, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, +0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, +0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x07, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0A, 0x00, +0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, +0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3F, 0x1E, 0x00, +0x03, 0x00, 0x13, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x14, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, +0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, +0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x04, 0x00, 0x18, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, +0x18, 0x00, 0x04, 0x00, 0x19, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x05, 0x00, +0x1A, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x19, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x1B, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, +0x1C, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x18, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x22, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x32, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x24, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x02, 0x00, +0x25, 0x00, 0x00, 0x00, 0x34, 0x00, 0x06, 0x00, 0x25, 0x00, +0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0xAB, 0x00, 0x00, 0x00, +0x24, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x29, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x29, 0x00, +0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x2B, 0x00, +0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x2C, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, +0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x31, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x19, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x22, 0x00, +0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x3B, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x3E, 0x00, 0x00, 0x00, +0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x3F, 0x00, +0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x41, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x41, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x3F, 0x00, 0x00, 0x00, +0x44, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x04, 0x00, 0x41, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, +0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, +0x09, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x0C, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x50, 0x00, 0x07, 0x00, 0x07, 0x00, +0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, +0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x0E, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00, +0x12, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x1D, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, +0x17, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x18, 0x00, +0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, +0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x91, 0x00, 0x05, 0x00, +0x07, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x1F, 0x00, +0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, +0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x15, 0x00, +0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, +0x23, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0xF7, 0x00, +0x03, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xFA, 0x00, 0x04, 0x00, 0x26, 0x00, 0x00, 0x00, 0x27, 0x00, +0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, +0x27, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x2C, 0x00, +0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2F, 0x00, +0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, +0x31, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x1C, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x19, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x32, 0x00, +0x00, 0x00, 0x90, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00, +0x34, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x33, 0x00, +0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, +0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, +0x36, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, +0x37, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x50, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00, +0x38, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x36, 0x00, +0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, +0x94, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x39, 0x00, +0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x03, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x39, 0x00, +0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x28, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x28, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, +0x3C, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x3A, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x3E, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x42, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x40, 0x00, 0x00, 0x00, +0x43, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x3E, 0x00, +0x00, 0x00, 0x46, 0x00, 0x00, 0x00, 0x45, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x03, 0x00, 0x44, 0x00, 0x00, 0x00, 0x46, 0x00, +0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00, +}; +int multi_texture_vert_spv_size = 2120; diff --git a/code/renderer_vulkan/shaders/Compiled/single_texture_clipping_plane_vert.c b/code/renderer_vulkan/shaders/Compiled/single_texture_clipping_plane_vert.c new file mode 100644 index 0000000000..53dc3f5dcd --- /dev/null +++ b/code/renderer_vulkan/shaders/Compiled/single_texture_clipping_plane_vert.c @@ -0,0 +1,193 @@ +unsigned char single_texture_clipping_plane_vert_spv[] = { +0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, +0x08, 0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, +0x02, 0x00, 0x20, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x06, 0x00, +0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, 0x2E, 0x73, +0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, 0x00, 0x00, +0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x0F, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, +0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, +0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, +0x38, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x3E, 0x00, +0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, +0x02, 0x00, 0x00, 0x00, 0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, +0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x09, 0x00, +0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, +0x0C, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x70, 0x6F, 0x73, +0x69, 0x74, 0x69, 0x6F, 0x6E, 0x00, 0x05, 0x00, 0x06, 0x00, +0x16, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, 0x65, 0x72, +0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x00, 0x00, 0x00, 0x00, +0x06, 0x00, 0x06, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x67, 0x6C, 0x5F, 0x50, 0x6F, 0x73, 0x69, 0x74, +0x69, 0x6F, 0x6E, 0x00, 0x06, 0x00, 0x07, 0x00, 0x16, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x67, 0x6C, 0x5F, 0x43, +0x6C, 0x69, 0x70, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6E, 0x63, +0x65, 0x00, 0x05, 0x00, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x1D, 0x00, +0x00, 0x00, 0x54, 0x72, 0x61, 0x6E, 0x73, 0x66, 0x6F, 0x72, +0x6D, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x1D, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, +0x5F, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5F, 0x78, 0x66, 0x6F, +0x72, 0x6D, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00, +0x1D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x65, 0x79, +0x65, 0x5F, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5F, 0x78, 0x66, +0x6F, 0x72, 0x6D, 0x00, 0x06, 0x00, 0x07, 0x00, 0x1D, 0x00, +0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, +0x70, 0x69, 0x6E, 0x67, 0x5F, 0x70, 0x6C, 0x61, 0x6E, 0x65, +0x00, 0x00, 0x05, 0x00, 0x03, 0x00, 0x1F, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x38, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x63, 0x6F, 0x6C, +0x6F, 0x72, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x3A, 0x00, +0x00, 0x00, 0x69, 0x6E, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, 0x72, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x3E, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x74, 0x65, 0x78, +0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x05, 0x00, +0x06, 0x00, 0x40, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x74, +0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, 0x00, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, +0x05, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, +0x05, 0x00, 0x16, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x0B, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, +0x03, 0x00, 0x16, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x48, 0x00, 0x04, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, +0x1D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, +0x1D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x04, 0x00, +0x1D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, +0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40, 0x00, +0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10, 0x00, +0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x70, 0x00, +0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x1D, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x38, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, +0x3E, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x40, 0x00, 0x00, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, +0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, 0x03, 0x00, +0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x16, 0x00, +0x03, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, +0x17, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x06, 0x00, +0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x08, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, +0x06, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x0B, 0x00, +0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0E, 0x00, +0x00, 0x00, 0x00, 0x00, 0x80, 0x3F, 0x15, 0x00, 0x04, 0x00, +0x13, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x13, 0x00, 0x00, 0x00, +0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1C, 0x00, +0x04, 0x00, 0x15, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, +0x14, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x04, 0x00, 0x16, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x17, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x19, 0x00, 0x00, 0x00, +0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, +0x04, 0x00, 0x19, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x04, 0x00, 0x1B, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, +0x18, 0x00, 0x04, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x05, 0x00, +0x1D, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x1C, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x1D, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x1F, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x20, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x1B, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x25, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x04, 0x00, 0x19, 0x00, 0x00, 0x00, 0x27, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, +0x19, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x29, 0x00, 0x00, 0x00, +0x09, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x1C, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x36, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x04, 0x00, 0x25, 0x00, 0x00, 0x00, 0x38, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x39, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x39, 0x00, 0x00, 0x00, +0x3A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x17, 0x00, +0x04, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x3D, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x04, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x3E, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x3F, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3C, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x3F, 0x00, 0x00, 0x00, +0x40, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x36, 0x00, +0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x51, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x51, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x50, 0x00, +0x07, 0x00, 0x07, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, +0x0F, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, +0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, +0x09, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x41, 0x00, +0x05, 0x00, 0x20, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, +0x1F, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, +0x21, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x91, 0x00, 0x05, 0x00, 0x07, 0x00, 0x00, 0x00, 0x24, 0x00, +0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, +0x41, 0x00, 0x05, 0x00, 0x25, 0x00, 0x00, 0x00, 0x26, 0x00, +0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x03, 0x00, 0x26, 0x00, 0x00, 0x00, 0x24, 0x00, +0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x29, 0x00, 0x00, 0x00, +0x2A, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x28, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, +0x09, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x2D, 0x00, +0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, +0x27, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x1C, 0x00, +0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, +0x90, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x30, 0x00, +0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, +0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x31, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x32, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x33, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x50, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00, 0x34, 0x00, +0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, +0x33, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x94, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x41, 0x00, +0x06, 0x00, 0x36, 0x00, 0x00, 0x00, 0x37, 0x00, 0x00, 0x00, +0x18, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x37, 0x00, 0x00, 0x00, +0x35, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x03, 0x00, 0x38, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x3C, 0x00, 0x00, 0x00, +0x41, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x3E, 0x00, +0x03, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x41, 0x00, 0x00, 0x00, +0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00, }; +int single_texture_clipping_plane_vert_spv_size = 1908; diff --git a/code/renderer_vulkan/shaders/Compiled/single_texture_frag.c b/code/renderer_vulkan/shaders/Compiled/single_texture_frag.c new file mode 100644 index 0000000000..4d0d980993 --- /dev/null +++ b/code/renderer_vulkan/shaders/Compiled/single_texture_frag.c @@ -0,0 +1,181 @@ +unsigned char single_texture_frag_spv[] = { +0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, +0x08, 0x00, 0x4D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, +0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, +0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, +0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x09, 0x00, 0x04, 0x00, +0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, +0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x19, 0x00, +0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, +0x10, 0x00, 0x03, 0x00, 0x04, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, +0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, +0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, +0x05, 0x00, 0x05, 0x00, 0x08, 0x00, 0x00, 0x00, 0x63, 0x6C, +0x69, 0x70, 0x5F, 0x70, 0x6C, 0x61, 0x6E, 0x65, 0x00, 0x00, +0x05, 0x00, 0x06, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x66, 0x72, +0x61, 0x67, 0x5F, 0x63, 0x6C, 0x69, 0x70, 0x5F, 0x64, 0x69, +0x73, 0x74, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x19, 0x00, +0x00, 0x00, 0x6F, 0x75, 0x74, 0x5F, 0x63, 0x6F, 0x6C, 0x6F, +0x72, 0x00, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x1B, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x63, 0x6F, 0x6C, +0x6F, 0x72, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x20, 0x00, +0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x75, 0x72, 0x65, 0x30, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x24, 0x00, +0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x74, 0x65, 0x78, +0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, 0x05, 0x00, +0x06, 0x00, 0x28, 0x00, 0x00, 0x00, 0x61, 0x6C, 0x70, 0x68, +0x61, 0x5F, 0x74, 0x65, 0x73, 0x74, 0x5F, 0x66, 0x75, 0x6E, +0x63, 0x00, 0x47, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x19, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, +0x20, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x20, 0x00, 0x00, 0x00, +0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x24, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x28, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x21, 0x00, +0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x14, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x00, 0x15, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x09, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x06, 0x00, +0x06, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0xAB, 0x00, +0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x16, 0x00, 0x03, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x20, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x04, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x17, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x18, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x17, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x18, 0x00, 0x00, 0x00, +0x19, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x17, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x19, 0x00, 0x09, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x03, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1F, 0x00, +0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x17, 0x00, 0x04, 0x00, 0x22, 0x00, 0x00, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x23, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x22, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x23, 0x00, 0x00, 0x00, +0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x34, 0x00, 0x06, 0x00, 0x06, 0x00, 0x00, 0x00, 0x2A, 0x00, +0x00, 0x00, 0xAA, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, +0x29, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x2D, 0x00, +0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x04, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x2E, 0x00, +0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x2F, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0D, 0x00, +0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, +0x37, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x34, 0x00, +0x06, 0x00, 0x06, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, +0xAA, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x37, 0x00, +0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0x2B, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x34, 0x00, 0x06, 0x00, 0x06, 0x00, +0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0xAA, 0x00, 0x00, 0x00, +0x28, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x36, 0x00, +0x05, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x05, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, +0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, +0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, +0x0C, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x0B, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x10, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0xB8, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, +0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0xF9, 0x00, +0x02, 0x00, 0x0C, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, +0x0C, 0x00, 0x00, 0x00, 0xF5, 0x00, 0x07, 0x00, 0x06, 0x00, +0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, +0x05, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x0B, 0x00, +0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x13, 0x00, +0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x14, 0x00, 0x00, 0x00, 0xFC, 0x00, +0x01, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x15, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x04, 0x00, 0x17, 0x00, 0x00, 0x00, 0x1C, 0x00, +0x00, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x20, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x22, 0x00, 0x00, 0x00, +0x25, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x57, 0x00, +0x05, 0x00, 0x17, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, +0x21, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00, 0x85, 0x00, +0x05, 0x00, 0x17, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, +0x1C, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x3E, 0x00, +0x03, 0x00, 0x19, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, +0xF7, 0x00, 0x03, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x2A, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, +0x2F, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x19, 0x00, +0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x0D, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x30, 0x00, +0x00, 0x00, 0xB4, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, +0x32, 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x11, 0x00, +0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x34, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x32, 0x00, +0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x33, 0x00, 0x00, 0x00, 0xFC, 0x00, +0x01, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x34, 0x00, 0x00, 0x00, +0xF9, 0x00, 0x02, 0x00, 0x2C, 0x00, 0x00, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x36, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, +0x3A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, +0x04, 0x00, 0x38, 0x00, 0x00, 0x00, 0x39, 0x00, 0x00, 0x00, +0x42, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x39, 0x00, +0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x2F, 0x00, 0x00, 0x00, +0x3B, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x2E, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x3C, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x00, 0x00, 0xBE, 0x00, +0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, +0x3C, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0xF7, 0x00, +0x03, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xFA, 0x00, 0x04, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x3F, 0x00, +0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, +0x3F, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x01, 0x00, 0xF8, 0x00, +0x02, 0x00, 0x40, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, +0x3A, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x42, 0x00, +0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x46, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, 0x44, 0x00, +0x00, 0x00, 0x45, 0x00, 0x00, 0x00, 0x46, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x45, 0x00, 0x00, 0x00, 0x41, 0x00, +0x05, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x47, 0x00, 0x00, 0x00, +0x19, 0x00, 0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x0D, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, +0x47, 0x00, 0x00, 0x00, 0xB8, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x49, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x00, 0x00, 0xF7, 0x00, 0x03, 0x00, 0x4B, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFA, 0x00, 0x04, 0x00, +0x49, 0x00, 0x00, 0x00, 0x4A, 0x00, 0x00, 0x00, 0x4B, 0x00, +0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x4A, 0x00, 0x00, 0x00, +0xFC, 0x00, 0x01, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x4B, 0x00, +0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x46, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x46, 0x00, 0x00, 0x00, 0xF9, 0x00, +0x02, 0x00, 0x3A, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, +0x3A, 0x00, 0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x2C, 0x00, +0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x2C, 0x00, 0x00, 0x00, +0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00, }; +int single_texture_frag_spv_size = 1788; diff --git a/code/renderer_vulkan/shaders/Compiled/single_texture_vert.c b/code/renderer_vulkan/shaders/Compiled/single_texture_vert.c new file mode 100644 index 0000000000..1960c41023 --- /dev/null +++ b/code/renderer_vulkan/shaders/Compiled/single_texture_vert.c @@ -0,0 +1,200 @@ +unsigned char single_texture_vert_spv[] = { +0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x0B, 0x00, +0x08, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x11, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0B, 0x00, +0x06, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x4C, 0x53, 0x4C, +0x2E, 0x73, 0x74, 0x64, 0x2E, 0x34, 0x35, 0x30, 0x00, 0x00, +0x00, 0x00, 0x0E, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0C, 0x00, 0x00, 0x00, +0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, +0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, 0x15, 0x00, +0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, +0x3C, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x42, 0x00, +0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x02, 0x00, 0x00, 0x00, +0xC2, 0x01, 0x00, 0x00, 0x05, 0x00, 0x04, 0x00, 0x04, 0x00, +0x00, 0x00, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x00, 0x00, 0x00, +0x05, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00, 0x70, 0x00, +0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x0C, 0x00, 0x00, 0x00, +0x69, 0x6E, 0x5F, 0x70, 0x6F, 0x73, 0x69, 0x74, 0x69, 0x6F, +0x6E, 0x00, 0x05, 0x00, 0x06, 0x00, 0x13, 0x00, 0x00, 0x00, +0x67, 0x6C, 0x5F, 0x50, 0x65, 0x72, 0x56, 0x65, 0x72, 0x74, +0x65, 0x78, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x06, 0x00, +0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x6C, +0x5F, 0x50, 0x6F, 0x73, 0x69, 0x74, 0x69, 0x6F, 0x6E, 0x00, +0x05, 0x00, 0x03, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x54, 0x72, 0x61, 0x6E, 0x73, 0x66, 0x6F, 0x72, 0x6D, 0x00, +0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, 0x5F, 0x73, +0x70, 0x61, 0x63, 0x65, 0x5F, 0x78, 0x66, 0x6F, 0x72, 0x6D, +0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x07, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x65, 0x79, 0x65, 0x5F, +0x73, 0x70, 0x61, 0x63, 0x65, 0x5F, 0x78, 0x66, 0x6F, 0x72, +0x6D, 0x00, 0x06, 0x00, 0x07, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x63, 0x6C, 0x69, 0x70, 0x70, 0x69, +0x6E, 0x67, 0x5F, 0x70, 0x6C, 0x61, 0x6E, 0x65, 0x00, 0x00, +0x05, 0x00, 0x03, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x05, 0x00, 0x05, 0x00, 0x24, 0x00, 0x00, 0x00, +0x63, 0x6C, 0x69, 0x70, 0x5F, 0x70, 0x6C, 0x61, 0x6E, 0x65, +0x00, 0x00, 0x05, 0x00, 0x06, 0x00, 0x2A, 0x00, 0x00, 0x00, +0x66, 0x72, 0x61, 0x67, 0x5F, 0x63, 0x6C, 0x69, 0x70, 0x5F, +0x64, 0x69, 0x73, 0x74, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, +0x3A, 0x00, 0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x63, +0x6F, 0x6C, 0x6F, 0x72, 0x00, 0x00, 0x05, 0x00, 0x05, 0x00, +0x3C, 0x00, 0x00, 0x00, 0x69, 0x6E, 0x5F, 0x63, 0x6F, 0x6C, +0x6F, 0x72, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x06, 0x00, +0x40, 0x00, 0x00, 0x00, 0x66, 0x72, 0x61, 0x67, 0x5F, 0x74, +0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, 0x00, 0x00, +0x05, 0x00, 0x06, 0x00, 0x42, 0x00, 0x00, 0x00, 0x69, 0x6E, +0x5F, 0x74, 0x65, 0x78, 0x5F, 0x63, 0x6F, 0x6F, 0x72, 0x64, +0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x0C, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x48, 0x00, 0x05, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x47, 0x00, 0x03, 0x00, 0x13, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x48, 0x00, 0x04, 0x00, 0x1A, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x48, 0x00, +0x05, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x00, +0x05, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x07, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x48, 0x00, +0x04, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x05, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, +0x40, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, +0x10, 0x00, 0x00, 0x00, 0x48, 0x00, 0x05, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, +0x70, 0x00, 0x00, 0x00, 0x47, 0x00, 0x03, 0x00, 0x1A, 0x00, +0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, +0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x2A, 0x00, 0x00, 0x00, +0x1E, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x47, 0x00, +0x04, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, 0x3C, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x47, 0x00, 0x04, 0x00, 0x40, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x47, 0x00, 0x04, 0x00, +0x42, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x13, 0x00, 0x02, 0x00, 0x02, 0x00, 0x00, 0x00, +0x21, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x16, 0x00, 0x03, 0x00, 0x06, 0x00, 0x00, 0x00, +0x20, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, +0x0A, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x04, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x06, 0x00, +0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3F, +0x1E, 0x00, 0x03, 0x00, 0x13, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x14, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x04, 0x00, 0x14, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0x15, 0x00, 0x04, 0x00, 0x16, 0x00, +0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x04, 0x00, +0x18, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x04, 0x00, +0x00, 0x00, 0x18, 0x00, 0x04, 0x00, 0x19, 0x00, 0x00, 0x00, +0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x1E, 0x00, +0x05, 0x00, 0x1A, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, +0x19, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x1A, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x1B, 0x00, +0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x1D, 0x00, 0x00, 0x00, 0x09, 0x00, +0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x20, 0x00, 0x04, 0x00, +0x22, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, 0x00, +0x00, 0x00, 0x32, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, +0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, +0x02, 0x00, 0x25, 0x00, 0x00, 0x00, 0x34, 0x00, 0x06, 0x00, +0x25, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0xAB, 0x00, +0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x29, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x29, 0x00, 0x00, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x16, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, +0x04, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, +0x07, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x04, 0x00, 0x16, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x31, 0x00, 0x00, 0x00, 0x09, 0x00, +0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x22, 0x00, 0x00, 0x00, 0x3A, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x3B, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x04, 0x00, 0x3B, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x17, 0x00, 0x04, 0x00, 0x3E, 0x00, +0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, +0x20, 0x00, 0x04, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, +0x3F, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x03, 0x00, +0x00, 0x00, 0x20, 0x00, 0x04, 0x00, 0x41, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x00, 0x00, 0x3B, 0x00, +0x04, 0x00, 0x41, 0x00, 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x36, 0x00, 0x05, 0x00, 0x02, 0x00, +0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x03, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, 0x05, 0x00, +0x00, 0x00, 0x3B, 0x00, 0x04, 0x00, 0x08, 0x00, 0x00, 0x00, +0x09, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x0A, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x0C, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x01, 0x00, 0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, +0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x0D, 0x00, 0x00, 0x00, +0x02, 0x00, 0x00, 0x00, 0x50, 0x00, 0x07, 0x00, 0x07, 0x00, +0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x00, 0x00, +0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x0E, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x09, 0x00, 0x00, 0x00, +0x12, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x1D, 0x00, +0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, +0x17, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x18, 0x00, +0x00, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x1E, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x20, 0x00, +0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x91, 0x00, 0x05, 0x00, +0x07, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0x1F, 0x00, +0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, +0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x15, 0x00, +0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, +0x23, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, 0xF7, 0x00, +0x03, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xFA, 0x00, 0x04, 0x00, 0x26, 0x00, 0x00, 0x00, 0x27, 0x00, +0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0xF8, 0x00, 0x02, 0x00, +0x27, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, 0x2C, 0x00, +0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, 0x1C, 0x00, 0x00, 0x00, +0x2B, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, +0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x2D, 0x00, 0x00, 0x00, +0x3D, 0x00, 0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x2F, 0x00, +0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x41, 0x00, 0x05, 0x00, +0x31, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, 0x1C, 0x00, +0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x19, 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x32, 0x00, +0x00, 0x00, 0x90, 0x00, 0x05, 0x00, 0x0A, 0x00, 0x00, 0x00, +0x34, 0x00, 0x00, 0x00, 0x2F, 0x00, 0x00, 0x00, 0x33, 0x00, +0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, +0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, +0x36, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x01, 0x00, +0x00, 0x00, 0x51, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, +0x37, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x02, 0x00, +0x00, 0x00, 0x50, 0x00, 0x07, 0x00, 0x07, 0x00, 0x00, 0x00, +0x38, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x36, 0x00, +0x00, 0x00, 0x37, 0x00, 0x00, 0x00, 0x0E, 0x00, 0x00, 0x00, +0x94, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x39, 0x00, +0x00, 0x00, 0x2E, 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, +0x3E, 0x00, 0x03, 0x00, 0x2A, 0x00, 0x00, 0x00, 0x39, 0x00, +0x00, 0x00, 0xF9, 0x00, 0x02, 0x00, 0x28, 0x00, 0x00, 0x00, +0xF8, 0x00, 0x02, 0x00, 0x28, 0x00, 0x00, 0x00, 0x3D, 0x00, +0x04, 0x00, 0x07, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, +0x3C, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x3A, 0x00, +0x00, 0x00, 0x3D, 0x00, 0x00, 0x00, 0x3D, 0x00, 0x04, 0x00, +0x3E, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00, 0x00, 0x42, 0x00, +0x00, 0x00, 0x3E, 0x00, 0x03, 0x00, 0x40, 0x00, 0x00, 0x00, +0x43, 0x00, 0x00, 0x00, 0xFD, 0x00, 0x01, 0x00, 0x38, 0x00, +0x01, 0x00, }; +int single_texture_vert_spv_size = 1972; diff --git a/code/renderer_vulkan/shaders/bintoc.c b/code/renderer_vulkan/shaders/bintoc.c new file mode 100644 index 0000000000..38396dbe0a --- /dev/null +++ b/code/renderer_vulkan/shaders/bintoc.c @@ -0,0 +1,26 @@ +#include +#include + +int main(int argc, char** argv) { + if(argc != 3) + return 0; + + char* fn = argv[1]; + FILE* f = fopen(fn, "rb"); + printf("unsigned char %s[] = {\n", argv[2]); + unsigned long n = 0; + + while(!feof(f)) { + unsigned char c; + if(fread(&c, 1, 1, f) == 0) break; + printf("0x%.2X, ", (int)c); + ++n; + if(n % 10 == 0) printf("\n"); + } + + fclose(f); + printf("};\n"); + + printf("int %s_size = %ld;\n", argv[2], n); + return 0; +} diff --git a/code/renderer_vulkan/shaders/compile.bat b/code/renderer_vulkan/shaders/compile.bat new file mode 100644 index 0000000000..762b377172 --- /dev/null +++ b/code/renderer_vulkan/shaders/compile.bat @@ -0,0 +1,25 @@ +@echo off +set "VSCMD_START_DIR=%CD%" +call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\Common7\Tools\VsDevCmd.bat" + +set tools_dir=..\..\..\..\tools +set bin2hex=%tools_dir%\bin2hex.exe +set bin2hex_cpp=%tools_dir%\bin2hex.cpp + +if not exist %bin2hex% ( + cl.exe /EHsc /nologo /Fe%tools_dir%\ /Fo%tools_dir%\ %bin2hex_cpp% +) + +set PATH=%tools_dir%;%PATH% + +for %%f in (*.vert) do ( + %VULKAN_SDK%\Bin\glslangValidator.exe -V %%f + %bin2hex% vert.spv %%~nf_vert_spv > spirv/%%~nf_vert.cpp + del vert.spv +) + +for %%f in (*.frag) do ( + %VULKAN_SDK%\Bin\glslangValidator.exe -V %%f + %bin2hex% frag.spv %%~nf_frag_spv > spirv/%%~nf_frag.cpp + del frag.spv +) diff --git a/code/renderer_vulkan/shaders/compile.sh b/code/renderer_vulkan/shaders/compile.sh new file mode 100755 index 0000000000..8f2ddef539 --- /dev/null +++ b/code/renderer_vulkan/shaders/compile.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +if [[ ! -x "./bintoc" ]] +then + gcc bintoc.c -o bintoc +fi + +find -type f -name "*.vert" | \ + while read f; do glslangValidator -V ${f} -o "Compiled/${f%.*}.vspv"; done + +find -type f -name "*.frag" | \ + while read f; do glslangValidator -V ${f} -o "Compiled/${f%.*}.fspv"; done + + + +find -type f -name "*.vspv" | \ + while read f; do ./bintoc ${f} `basename ${f%.*}`_vert_spv > ${f%.*}_vert.c; done + +find -type f -name "*.fspv" | \ + while read f; do ./bintoc ${f} `basename ${f%.*}`_frag_spv > ${f%.*}_frag.c; done diff --git a/code/renderer_vulkan/shaders/compile_hlsl.bat b/code/renderer_vulkan/shaders/compile_hlsl.bat new file mode 100644 index 0000000000..fed667664f --- /dev/null +++ b/code/renderer_vulkan/shaders/compile_hlsl.bat @@ -0,0 +1,82 @@ +@echo off +set "VSCMD_START_DIR=%CD%" +call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\Common7\Tools\VsDevCmd.bat" + +set tools_dir=..\..\..\..\tools +set bin2hex=%tools_dir%\bin2hex.exe +set bin2hex_cpp=%tools_dir%\bin2hex.cpp + +if not exist %bin2hex% ( + cl.exe /EHsc /nologo /Fe%tools_dir%\ /Fo%tools_dir%\ %bin2hex_cpp% +) + +set PATH=%tools_dir%;%PATH% + +@rem single texture VS +fxc.exe /nologo /T vs_4_0 /E single_texture_vs /Fo shader.bin shaders.hlsl +%bin2hex% shader.bin single_texture_vs > hlsl_compiled/single_texture_vs.cpp +del shader.bin + +fxc.exe /nologo /T vs_4_0 /E single_texture_clipping_plane_vs /Fo shader.bin shaders.hlsl +%bin2hex% shader.bin single_texture_clipping_plane_vs > hlsl_compiled/single_texture_clipping_plane_vs.cpp +del shader.bin + +@rem multi texture VS +fxc.exe /nologo /T vs_4_0 /E multi_texture_vs /Fo shader.bin shaders.hlsl +%bin2hex% shader.bin multi_texture_vs > hlsl_compiled/multi_texture_vs.cpp +del shader.bin + +fxc.exe /nologo /T vs_4_0 /E multi_texture_clipping_plane_vs /Fo shader.bin shaders.hlsl +%bin2hex% shader.bin multi_texture_clipping_plane_vs > hlsl_compiled/multi_texture_clipping_plane_vs.cpp +del shader.bin + +@rem signle texture PS +fxc.exe /nologo /T ps_4_0 /E single_texture_ps /Fo shader.bin shaders.hlsl +%bin2hex% shader.bin single_texture_ps > hlsl_compiled/single_texture_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E single_texture_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_GT0 +%bin2hex% shader.bin single_texture_gt0_ps > hlsl_compiled/single_texture_gt0_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E single_texture_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_LT80 +%bin2hex% shader.bin single_texture_lt80_ps > hlsl_compiled/single_texture_lt80_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E single_texture_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_GE80 +%bin2hex% shader.bin single_texture_ge80_ps > hlsl_compiled/single_texture_ge80_ps.cpp +del shader.bin + +@rem multi texture mul PS +fxc.exe /nologo /T ps_4_0 /E multi_texture_mul_ps /Fo shader.bin shaders.hlsl +%bin2hex% shader.bin multi_texture_mul_ps > hlsl_compiled/multi_texture_mul_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E multi_texture_mul_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_GT0 +%bin2hex% shader.bin multi_texture_mul_gt0_ps > hlsl_compiled/multi_texture_mul_gt0_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E multi_texture_mul_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_LT80 +%bin2hex% shader.bin multi_texture_mul_lt80_ps > hlsl_compiled/multi_texture_mul_lt80_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E multi_texture_mul_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_GE80 +%bin2hex% shader.bin multi_texture_mul_ge80_ps > hlsl_compiled/multi_texture_mul_ge80_ps.cpp +del shader.bin + +@rem multi texture add PS +fxc.exe /nologo /T ps_4_0 /E multi_texture_add_ps /Fo shader.bin shaders.hlsl +%bin2hex% shader.bin multi_texture_add_ps > hlsl_compiled/multi_texture_add_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E multi_texture_add_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_GT0 +%bin2hex% shader.bin multi_texture_add_gt0_ps > hlsl_compiled/multi_texture_add_gt0_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E multi_texture_add_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_LT80 +%bin2hex% shader.bin multi_texture_add_lt80_ps > hlsl_compiled/multi_texture_add_lt80_ps.cpp +del shader.bin + +fxc.exe /nologo /T ps_4_0 /E multi_texture_add_ps /Fo shader.bin shaders.hlsl /DALPHA_TEST_GE80 +%bin2hex% shader.bin multi_texture_add_ge80_ps > hlsl_compiled/multi_texture_add_ge80_ps.cpp +del shader.bin diff --git a/code/renderer_vulkan/shaders/multi_texture.frag b/code/renderer_vulkan/shaders/multi_texture.frag new file mode 100644 index 0000000000..155d640f9d --- /dev/null +++ b/code/renderer_vulkan/shaders/multi_texture.frag @@ -0,0 +1,36 @@ +#version 450 + +layout(set = 0, binding = 0) uniform sampler2D texture0; +layout(set = 1, binding = 0) uniform sampler2D texture1; + +layout(location = 0) in vec4 frag_color; +layout(location = 1) in vec2 frag_tex_coord0; +layout(location = 2) in vec2 frag_tex_coord1; +layout(location = 3) in float frag_clip_dist; + +layout(location = 0) out vec4 out_color; + +layout (constant_id = 0) const int alpha_test_func = 0; +layout (constant_id = 1) const int color_op = 0; +layout (constant_id = 2) const int clip_plane = 0; + +void main() { + if (clip_plane != 0 && frag_clip_dist < 0.0) discard; + + vec4 color_a = frag_color * texture(texture0, frag_tex_coord0); + vec4 color_b = texture(texture1, frag_tex_coord1); + + if (color_op != 0) + out_color = vec4(color_a.rgb + color_b.rgb, color_a.a * color_b.a); + else { + out_color = color_a * color_b; + } + + if (alpha_test_func == 1) { + if (out_color.a == 0.0f) discard; + } else if (alpha_test_func == 2) { + if (out_color.a >= 0.5f) discard; + } else if (alpha_test_func == 3) { + if (out_color.a < 0.5f) discard; + } +} diff --git a/code/renderer_vulkan/shaders/multi_texture.vert b/code/renderer_vulkan/shaders/multi_texture.vert new file mode 100644 index 0000000000..334ef07c7c --- /dev/null +++ b/code/renderer_vulkan/shaders/multi_texture.vert @@ -0,0 +1,37 @@ +#version 450 + +// 128 bytes +layout(push_constant) uniform Transform { + mat4x4 clip_space_xform; + // a single-precision floating-point matrix with 3 columns and 4 rows + mat3x4 eye_space_xform; + vec4 clipping_plane; // in eye space +}; + +layout(location = 0) in vec3 in_position; +layout(location = 1) in vec4 in_color; +layout(location = 2) in vec2 in_tex_coord0; +layout(location = 3) in vec2 in_tex_coord1; + +layout(location = 0) out vec4 frag_color; +layout(location = 1) out vec2 frag_tex_coord0; +layout(location = 2) out vec2 frag_tex_coord1; +layout(location = 3) out float frag_clip_dist; + +layout (constant_id = 2) const int clip_plane = 0; + +out gl_PerVertex { + vec4 gl_Position; +}; + +void main() { + vec4 p = vec4(in_position, 1.0); + gl_Position = clip_space_xform * p; + + if (clip_plane != 0) + frag_clip_dist = dot(clipping_plane, vec4( p * eye_space_xform, 1.0)); + + frag_color = in_color; + frag_tex_coord0 = in_tex_coord0; + frag_tex_coord1 = in_tex_coord1; +} diff --git a/code/renderer_vulkan/shaders/multi_texture_clipping_plane.vert b/code/renderer_vulkan/shaders/multi_texture_clipping_plane.vert new file mode 100644 index 0000000000..b9f12ebf52 --- /dev/null +++ b/code/renderer_vulkan/shaders/multi_texture_clipping_plane.vert @@ -0,0 +1,34 @@ +#version 450 + +// 128 bytes +layout(push_constant) uniform Transform { + mat4x4 clip_space_xform; + // a single-precision floating-point matrix with 3 columns and 4 rows + mat3x4 eye_space_xform; + vec4 clipping_plane; // in eye space +}; + +layout(location = 0) in vec3 in_position; +layout(location = 1) in vec4 in_color; +layout(location = 2) in vec2 in_tex_coord0; +layout(location = 3) in vec2 in_tex_coord1; + +layout(location = 0) out vec4 frag_color; +layout(location = 1) out vec2 frag_tex_coord0; +layout(location = 2) out vec2 frag_tex_coord1; + +out gl_PerVertex { + vec4 gl_Position; + float gl_ClipDistance[1]; +}; + +void main() { + vec4 p = vec4(in_position, 1.0); + + gl_Position = clip_space_xform * p; + gl_ClipDistance[0] = dot(clipping_plane, vec4( p * eye_space_xform, 1.0)); + + frag_color = in_color; + frag_tex_coord0 = in_tex_coord0; + frag_tex_coord1 = in_tex_coord1; +} diff --git a/code/renderer_vulkan/shaders/shaders.hlsl b/code/renderer_vulkan/shaders/shaders.hlsl new file mode 100644 index 0000000000..e50639a41e --- /dev/null +++ b/code/renderer_vulkan/shaders/shaders.hlsl @@ -0,0 +1,131 @@ +struct Single_Texture_PS_Data { + float4 position : SV_POSITION; + float4 color : COLOR; + float2 uv0 : TEXCOORD; +}; + +struct Multi_Texture_PS_Data { + float4 position : SV_POSITION; + float4 color : COLOR; + float2 uv0 : TEXCOORD0; + float2 uv1 : TEXCOORD1; +}; + +cbuffer Constants : register(b0) { + float4x4 clip_space_xform; + float4x3 eye_space_xform; + float4 clipping_plane; // in eye space +}; + +Texture2D texture0 : register(t0); +SamplerState sampler0 : register(s0); + +Texture2D texture1 : register(t1); +SamplerState sampler1 : register(s1); + +Single_Texture_PS_Data single_texture_vs( + float4 position : POSITION, + float4 color : COLOR, + float2 uv0 : TEXCOORD) +{ + Single_Texture_PS_Data ps_data; + ps_data.position = mul(clip_space_xform, position); + ps_data.color = color; + ps_data.uv0 = uv0; + return ps_data; +} + +Single_Texture_PS_Data single_texture_clipping_plane_vs( + float4 position : POSITION, + float4 color : COLOR, + float2 uv0 : TEXCOORD, + out float clip : SV_ClipDistance) +{ + clip = dot(clipping_plane.xyz, mul(position, eye_space_xform)) + clipping_plane.w; + + Single_Texture_PS_Data ps_data; + ps_data.position = mul(clip_space_xform, position); + ps_data.color = color; + ps_data.uv0 = uv0; + return ps_data; +} + +Multi_Texture_PS_Data multi_texture_vs( + float4 position : POSITION, + float4 color : COLOR, + float2 uv0 : TEXCOORD0, + float2 uv1 : TEXCOORD1) +{ + Multi_Texture_PS_Data ps_data; + ps_data.position = mul(clip_space_xform, position); + ps_data.color = color; + ps_data.uv0 = uv0; + ps_data.uv1 = uv1; + return ps_data; +} + +Multi_Texture_PS_Data multi_texture_clipping_plane_vs( + float4 position : POSITION, + float4 color : COLOR, + float2 uv0 : TEXCOORD0, + float2 uv1 : TEXCOORD1, + out float clip : SV_ClipDistance) +{ + clip = dot(clipping_plane.xyz, mul(position, eye_space_xform)) + clipping_plane.w; + + Multi_Texture_PS_Data ps_data; + ps_data.position = mul(clip_space_xform, position); + ps_data.color = color; + ps_data.uv0 = uv0; + ps_data.uv1 = uv1; + return ps_data; +} + +float4 single_texture_ps(Single_Texture_PS_Data data) : SV_TARGET { + float4 out_color = data.color * texture0.Sample(sampler0, data.uv0); + +#if defined(ALPHA_TEST_GT0) + if (out_color.a == 0.0f) discard; +#elif defined(ALPHA_TEST_LT80) + if (out_color.a >= 0.5f) discard; +#elif defined(ALPHA_TEST_GE80) + if (out_color.a < 0.5f) discard; +#endif + + return out_color; +} + +float4 multi_texture_mul_ps(Multi_Texture_PS_Data data) : SV_TARGET { + float4 out_color = data.color * texture0.Sample(sampler0, data.uv0) * texture1.Sample(sampler1, data.uv1); + +#if defined(ALPHA_TEST_GT0) + if (out_color.a == 0.0f) discard; +#elif defined(ALPHA_TEST_LT80) + if (out_color.a >= 0.5f) discard; +#elif defined(ALPHA_TEST_GE80) + if (out_color.a < 0.5f) discard; +#endif + + return out_color; +} + +float4 multi_texture_add_ps(Multi_Texture_PS_Data data) : SV_TARGET { + float4 color_a = data.color * texture0.Sample(sampler0, data.uv0); + float4 color_b = texture1.Sample(sampler1, data.uv1); + + float4 out_color = float4( + color_a.r + color_b.r, + color_a.g + color_b.g, + color_a.b + color_b.b, + color_a.a * color_b.a); + +#if defined(ALPHA_TEST_GT0) + if (out_color.a == 0.0f) discard; +#elif defined(ALPHA_TEST_LT80) + if (out_color.a >= 0.5f) discard; +#elif defined(ALPHA_TEST_GE80) + if (out_color.a < 0.5f) discard; +#endif + + return out_color; +} diff --git a/code/renderer_vulkan/shaders/single_texture.frag b/code/renderer_vulkan/shaders/single_texture.frag new file mode 100644 index 0000000000..f20560e973 --- /dev/null +++ b/code/renderer_vulkan/shaders/single_texture.frag @@ -0,0 +1,26 @@ +#version 450 + +layout(set = 0, binding = 0) uniform sampler2D texture0; + +layout(location = 0) in vec4 frag_color; +layout(location = 1) in vec2 frag_tex_coord; +layout(location = 3) in float frag_clip_dist; + +layout(location = 0) out vec4 out_color; + +layout (constant_id = 0) const int alpha_test_func = 0; +layout (constant_id = 2) const int clip_plane = 0; + +void main() { + if (clip_plane != 0 && frag_clip_dist < 0.0) discard; + + out_color = frag_color * texture(texture0, frag_tex_coord); + + if (alpha_test_func == 1) { + if (out_color.a == 0.0f) discard; + } else if (alpha_test_func == 2) { + if (out_color.a >= 0.5f) discard; + } else if (alpha_test_func == 3) { + if (out_color.a < 0.5f) discard; + } +} diff --git a/code/renderer_vulkan/shaders/single_texture.vert b/code/renderer_vulkan/shaders/single_texture.vert new file mode 100644 index 0000000000..060dc7027f --- /dev/null +++ b/code/renderer_vulkan/shaders/single_texture.vert @@ -0,0 +1,34 @@ +#version 450 + +// 128 bytes +layout(push_constant) uniform Transform { + mat4x4 clip_space_xform; + // a single-precision floating-point matrix with 3 columns and 4 rows + mat3x4 eye_space_xform; + vec4 clipping_plane; // in eye space +}; + +layout(location = 0) in vec3 in_position; +layout(location = 1) in vec4 in_color; +layout(location = 2) in vec2 in_tex_coord; + +layout(location = 0) out vec4 frag_color; +layout(location = 1) out vec2 frag_tex_coord; +layout(location = 3) out float frag_clip_dist; + +layout (constant_id = 2) const int clip_plane = 0; + +out gl_PerVertex { + vec4 gl_Position; +}; + +void main() { + vec4 p = vec4(in_position, 1.0); + gl_Position = clip_space_xform * p; + + if (clip_plane != 0) + frag_clip_dist = dot(clipping_plane, vec4( p * eye_space_xform, 1.0)); + + frag_color = in_color; + frag_tex_coord = in_tex_coord; +} diff --git a/code/renderer_vulkan/shaders/single_texture_clipping_plane.vert b/code/renderer_vulkan/shaders/single_texture_clipping_plane.vert new file mode 100644 index 0000000000..e4b54cbb57 --- /dev/null +++ b/code/renderer_vulkan/shaders/single_texture_clipping_plane.vert @@ -0,0 +1,31 @@ +#version 450 + +// 128 bytes +layout(push_constant) uniform Transform { + mat4x4 clip_space_xform; + // a single-precision floating-point matrix with 3 columns and 4 rows + mat3x4 eye_space_xform; + vec4 clipping_plane; // in eye space +}; + +layout(location = 0) in vec3 in_position; +layout(location = 1) in vec4 in_color; +layout(location = 2) in vec2 in_tex_coord; + +layout(location = 0) out vec4 frag_color; +layout(location = 1) out vec2 frag_tex_coord; + +out gl_PerVertex { + vec4 gl_Position; + float gl_ClipDistance[1]; +}; + +void main() { + vec4 p = vec4(in_position, 1.0); + + gl_Position = clip_space_xform * p; + gl_ClipDistance[0] = dot(clipping_plane, vec4( p * eye_space_xform, 1.0)); + + frag_color = in_color; + frag_tex_coord = in_tex_coord; +} diff --git a/code/renderer_vulkan/stb_image.h b/code/renderer_vulkan/stb_image.h new file mode 100644 index 0000000000..9e61606007 --- /dev/null +++ b/code/renderer_vulkan/stb_image.h @@ -0,0 +1,7049 @@ +/* stb_image - v2.14 - public domain image loader - http://nothings.org/stb_image.h + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) partial animated GIF support + limited 16-bit PSD support + minor bugs, code cleanup, and compiler warnings + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit TGA) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes + Fabian "ryg" Giesen + Arseny Kapoulkine + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Martin Golini Jerry Jansson Joseph Thomson + Dave Moore Roy Eltham Hayaki Saito Phil Jordan + Won Chun Luke Graham Johan Duparc Nathan Reed + the Horde3D community Thomas Ruf Ronny Chevalier Nick Verigakis + Janez Zemva John Bartholomew Michal Cichon github:svdijk + Jonathan Blow Ken Hamada Tero Hanninen Baldur Karlsson + Laurent Gomila Cort Stratton Sergio Gonzalez github:romigrou + Aruelien Pocheville Thibault Reuille Cass Everitt Matthew Gregan + Ryamond Barbiero Paul Du Bois Engin Manap github:snagar + Michaelangel007@github Oriol Ferrer Mesia Dale Weiler github:Zelex + Philipp Wiesemann Josh Tobin github:rlyeh github:grim210@github + Blazej Dariusz Roszkowski github:sammyhw + +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 16-bit-per-channel PNG +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - no 1-bit BMP +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'req_comp' if req_comp is non-zero, or *comp otherwise. +// If req_comp is non-zero, *comp has the number of components that _would_ +// have been output otherwise. E.g. if you set req_comp to 4, you will always +// get RGBA output, but you can check *comp to see if it's trivially opaque +// because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *comp will be unchanged. The function stbi_failure_reason() +// can be queried for an extremely brief, end-user unfriendly explanation +// of why the load failed. Define STBI_NO_FAILURE_STRINGS to avoid +// compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy to use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// make more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image now supports loading HDR images in general, and currently +// the Radiance .HDR file format, although the support is provided +// generically. You can still load any file through the existing interface; +// if you attempt to load an HDR file, it will be automatically remapped to +// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// + + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for req_comp + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif +// @TODO the other variants + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// NOT THREADSAFE +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); + +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// NOTE: not clear do we actually need this for the 64-bit path? +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// (but compiling with -msse2 allows the compiler to use SSE2 everywhere; +// this is just broken and gcc are jerks for not fixing it properly +// http://www.virtualdub.org/blog/pivot/entry.php?id=363 ) +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +static int stbi__sse2_available(void) +{ +#if defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 // GCC 4.8 or later + // GCC 4.8+ has a nice way to do this + return __builtin_cpu_supports("sse2"); +#else + // portable way to do this, preferably without using GCC inline ASM? + // just bail for now. + return 0; +#endif +} +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + fseek((FILE*) user, n, SEEK_CUR); +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +// this is not threadsafe +static const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} + +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load = flag_true_if_should_flip; +} + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 8) { + STBI_ASSERT(ri.bits_per_channel == 16); + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int w = *x, h = *y; + int channels = req_comp ? req_comp : *comp; + int row,col,z; + stbi_uc *image = (stbi_uc *) result; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h>>1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < channels; z++) { + stbi_uc temp = image[(row * w + col) * channels + z]; + image[(row * w + col) * channels + z] = image[((h - row - 1) * w + col) * channels + z]; + image[((h - row - 1) * w + col) * channels + z] = temp; + } + } + } + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 16) { + STBI_ASSERT(ri.bits_per_channel == 8); + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int w = *x, h = *y; + int channels = req_comp ? req_comp : *comp; + int row,col,z; + stbi__uint16 *image = (stbi__uint16 *) result; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h>>1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < channels; z++) { + stbi__uint16 temp = image[(row * w + col) * channels + z]; + image[(row * w + col) * channels + z] = image[((h - row - 1) * w + col) * channels + z]; + image[((h - row - 1) * w + col) * channels + z] = temp; + } + } + } + } + + return (stbi__uint16 *) result; +} + +#ifndef STBI_NO_HDR +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int w = *x, h = *y; + int depth = req_comp ? req_comp : *comp; + int row,col,z; + float temp; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h>>1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < depth; z++) { + temp = result[(row * w + col) * depth + z]; + result[(row * w + col) * depth + z] = result[((h - row - 1) * w + col) * depth + z]; + result[((h - row - 1) * w + col) * depth + z] = temp; + } + } + } + } +} +#endif + +#ifndef STBI_NO_STDIO + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_file(&s,f); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} + +static void stbi__skip(stbi__context *s, int n) +{ + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} + +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} + +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} + +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + + +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0], dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0], dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f; + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0,code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1 << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (-1 << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k << 8) + (run << 4) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc << j->succ_low); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) << shift); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) << shift); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) << 12) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0] << 2; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = sixteen ? stbi__get16be(z->s) : stbi__get8(z->s); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + stbi__skip(z->s, stbi__get16be(z->s)-2); + return 1; + } + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + c = stbi__get8(s); + if (c != 3 && c != 1) return stbi__err("bad component count","Corrupt JPEG"); // JFIF requires + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n; + + if (z->s->img_n == 3 && n < 3 && z->rgb != 3) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4]; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (z->rgb == 3) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (z->rgb == 3) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255; + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + if (z->zbuffer >= z->zbuffer_end) return 0; + return *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s == 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + STBI_ASSERT(z->size[b] == s); + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) stbi__fill_bits(a); + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (int) (z->zout - z->zout_start); + limit = old_limit = (int) (z->zout_end - z->zout_start); + while (cur + n > limit) + limit *= 2; + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) + c = stbi__zreceive(a,3)+3; + else { + STBI_ASSERT(c == 18); + c = stbi__zreceive(a,7)+11; + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + STBI_ASSERT(a->num_bits == 0); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +// @TODO: should statically initialize these for optimal thread safety +static stbi_uc stbi__zdefault_length[288], stbi__zdefault_distance[32]; +static void stbi__init_zdefaults(void) +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zdefault_distance[31]) stbi__init_zdefaults(); + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + if (s->img_x == x && s->img_y == y) { + if (raw_len != img_len) return stbi__err("not enough pixels","Corrupt PNG"); + } else { // interlaced: + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + } + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior = cur - stride; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + STBI_ASSERT(img_width_bytes <= x); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + p[0] = p[2] * 255 / a; + p[1] = p[1] * 255 / a; + p[2] = t * 255 / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } + STBI_FREE(z->expanded); z->expanded = NULL; + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth < 8) + ri->bits_per_channel = 8; + else + ri->bits_per_channel = p->depth; + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) n += 16, z >>= 16; + if (z >= 0x00100) n += 8, z >>= 8; + if (z >= 0x00010) n += 4, z >>= 4; + if (z >= 0x00004) n += 2, z >>= 2; + if (z >= 0x00002) n += 1, z >>= 1; + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +static int stbi__shiftsigned(int v, int shift, int bits) +{ + int result; + int z=0; + + if (shift < 0) v <<= -shift; + else v >>= shift; + result = v; + + z = bits; + while (z < 8) { + result += v >> z; + z += bits; + } + return result; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (info->bpp == 1) return stbi__errpuc("monochrome", "BMP type not supported: 1-bit"); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - 14 - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - 14 - info.hsz) >> 2; + } + + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - 14 - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i], p1[i] = p2[i], p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if(is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // else: fall-through + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fall-through + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y; + STBI_NOTUSED(ri); + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out, *old_out; // output buffer (always 4 components) + int flags, bgindex, ratio, transparent, eflags, delay; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[4096]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + p = &g->out[g->cur_x + g->cur_y]; + c = &g->color_table[g->codes[code].suffix * 4]; + + if (c[3] >= 128) { + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) return stbi__errpuc("no clear code", "Corrupt GIF"); + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 4096) return stbi__errpuc("too many codes", "Corrupt GIF"); + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +static void stbi__fill_gif_background(stbi__gif *g, int x0, int y0, int x1, int y1) +{ + int x, y; + stbi_uc *c = g->pal[g->bgindex]; + for (y = y0; y < y1; y += 4 * g->w) { + for (x = x0; x < x1; x += 4) { + stbi_uc *p = &g->out[y + x]; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = 0; + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp) +{ + int i; + stbi_uc *prev_out = 0; + + if (g->out == 0 && !stbi__gif_header(s, g, comp,0)) + return 0; // stbi__g_failure_reason set by stbi__gif_header + + if (!stbi__mad3sizes_valid(g->w, g->h, 4, 0)) + return stbi__errpuc("too large", "GIF too large"); + + prev_out = g->out; + g->out = (stbi_uc *) stbi__malloc_mad3(4, g->w, g->h, 0); + if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + + switch ((g->eflags & 0x1C) >> 2) { + case 0: // unspecified (also always used on 1st frame) + stbi__fill_gif_background(g, 0, 0, 4 * g->w, 4 * g->w * g->h); + break; + case 1: // do not dispose + if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); + g->old_out = prev_out; + break; + case 2: // dispose to background + if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); + stbi__fill_gif_background(g, g->start_x, g->start_y, g->max_x, g->max_y); + break; + case 3: // dispose to previous + if (g->old_out) { + for (i = g->start_y; i < g->max_y; i += 4 * g->w) + memcpy(&g->out[i + g->start_x], &g->old_out[i + g->start_x], g->max_x - g->start_x); + } + break; + } + + for (;;) { + switch (stbi__get8(s)) { + case 0x2C: /* Image Descriptor */ + { + int prev_trans = -1; + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + if (g->transparent >= 0 && (g->eflags & 0x01)) { + prev_trans = g->pal[g->transparent][3]; + g->pal[g->transparent][3] = 0; + } + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (o == NULL) return NULL; + + if (prev_trans != -1) + g->pal[g->transparent][3] = (stbi_uc) prev_trans; + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + if (stbi__get8(s) == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = stbi__get16le(s); + g->transparent = stbi__get8(s); + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) + stbi__skip(s, len); + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } + + STBI_NOTUSED(req_comp); +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + memset(g, 0, sizeof(*g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, g, comp, req_comp); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g->w; + *y = g->h; + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g->w, g->h); + } + else if (g->out) + STBI_FREE(g->out); + STBI_FREE(g); + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind( s ); + if (p == NULL) + return 0; + *x = s->img_x; + *y = s->img_y; + *comp = info.ma ? 4 : 3; + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + if (stbi__get16be(s) != 8) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained; + stbi__pic_packet packets[10]; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + *x = s->img_x; + *y = s->img_y; + *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv; + char c, p, t; + + stbi__rewind( s ); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/code/renderer_vulkan/tr_Cull.c b/code/renderer_vulkan/tr_Cull.c new file mode 100644 index 0000000000..752e573b87 --- /dev/null +++ b/code/renderer_vulkan/tr_Cull.c @@ -0,0 +1,121 @@ +#include "tr_local.h" +#include "tr_cvar.h" +#include "tr_globals.h" +/* +================= + +Returns CULL_IN, CULL_CLIP, or CULL_OUT +================= +*/ +int R_CullLocalBox (vec3_t bounds[2]) +{ + int i, j; + vec3_t transformed[8]; + float dists[8]; + vec3_t v; + cplane_t *frust; + int anyBack; + int front, back; + + if ( r_nocull->integer ) { + return CULL_CLIP; + } + + // transform into world space + for (i = 0 ; i < 8 ; i++) { + v[0] = bounds[i&1][0]; + v[1] = bounds[(i>>1)&1][1]; + v[2] = bounds[(i>>2)&1][2]; + + VectorCopy( tr.or.origin, transformed[i] ); + VectorMA( transformed[i], v[0], tr.or.axis[0], transformed[i] ); + VectorMA( transformed[i], v[1], tr.or.axis[1], transformed[i] ); + VectorMA( transformed[i], v[2], tr.or.axis[2], transformed[i] ); + } + + // check against frustum planes + anyBack = 0; + for (i = 0 ; i < 4 ; i++) { + frust = &tr.viewParms.frustum[i]; + + front = back = 0; + for (j = 0 ; j < 8 ; j++) { + dists[j] = DotProduct(transformed[j], frust->normal); + if ( dists[j] > frust->dist ) { + front = 1; + if ( back ) { + break; // a point is in front + } + } else { + back = 1; + } + } + if ( !front ) { + // all points were behind one of the planes + return CULL_OUT; + } + anyBack |= back; + } + + if ( !anyBack ) { + return CULL_IN; // completely inside frustum + } + + return CULL_CLIP; // partially clipped +} + + +static void R_LocalPointToWorld (vec3_t local, const orientationr_t * const pRT, vec3_t world) +{ + world[0] = local[0] * pRT->axis[0][0] + local[1] * pRT->axis[1][0] + local[2] * pRT->axis[2][0] + pRT->origin[0]; + world[1] = local[0] * pRT->axis[0][1] + local[1] * pRT->axis[1][1] + local[2] * pRT->axis[2][1] + pRT->origin[1]; + world[2] = local[0] * pRT->axis[0][2] + local[1] * pRT->axis[1][2] + local[2] * pRT->axis[2][2] + pRT->origin[2]; +} + + +int R_CullLocalPointAndRadius( vec3_t pt, float radius ) +{ + vec3_t transformed; + + R_LocalPointToWorld( pt, &tr.or, transformed ); + + return R_CullPointAndRadius( transformed, radius ); +} + +/* +** R_CullPointAndRadius +*/ +int R_CullPointAndRadius( vec3_t pt, float radius ) +{ + int i; + float dist; + cplane_t *frust; + qboolean mightBeClipped = qfalse; + + if ( r_nocull->integer ) { + return CULL_CLIP; + } + + // check against frustum planes + for (i = 0 ; i < 4 ; i++) + { + frust = &tr.viewParms.frustum[i]; + + dist = DotProduct( pt, frust->normal) - frust->dist; + if ( dist < -radius ) + { + return CULL_OUT; + } + else if ( dist <= radius ) + { + mightBeClipped = qtrue; + } + } + + if ( mightBeClipped ) + { + return CULL_CLIP; + } + + return CULL_IN; // completely inside frustum +} diff --git a/code/renderer_vulkan/tr_animation.c b/code/renderer_vulkan/tr_animation.c new file mode 100644 index 0000000000..5edb765c00 --- /dev/null +++ b/code/renderer_vulkan/tr_animation.c @@ -0,0 +1,286 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + +#include "tr_local.h" +#include "tr_globals.h" +#include "tr_cvar.h" +#include "vk_shade_geometry.h" +#include "ref_import.h" +#include "tr_light.h" +#include "tr_shader.h" +/* + +All bones should be an identity orientation to display the mesh exactly +as it is specified. + +For all other frames, the bones represent the transformation from the +orientation of the bone in the base frame to the orientation in this +frame. + +*/ + + +extern int R_ComputeLOD( trRefEntity_t *ent ); + +static int R_MDRCullModel( mdrHeader_t *header, trRefEntity_t *ent ) +{ + vec3_t bounds[2]; + int i; + + int frameSize = (size_t)( &((mdrFrame_t *)0)->bones[ header->numBones ] ); + + // compute frame pointers + mdrFrame_t* newFrame = ( mdrFrame_t * ) ( ( byte * ) header + header->ofsFrames + frameSize * ent->e.frame); + mdrFrame_t* oldFrame = ( mdrFrame_t * ) ( ( byte * ) header + header->ofsFrames + frameSize * ent->e.oldframe); + + // cull bounding sphere ONLY if this is not an upscaled entity + if ( !ent->e.nonNormalizedAxes ) + { + if ( ent->e.frame == ent->e.oldframe ) + { + switch ( R_CullLocalPointAndRadius( newFrame->localOrigin, newFrame->radius ) ) + { + // Ummm... yeah yeah I know we don't really have an md3 here.. but we pretend + // we do. After all, the purpose of mdrs are not that different, are they? + + case CULL_OUT: + tr.pc.c_sphere_cull_md3_out++; + return CULL_OUT; + + case CULL_IN: + tr.pc.c_sphere_cull_md3_in++; + return CULL_IN; + + case CULL_CLIP: + tr.pc.c_sphere_cull_md3_clip++; + break; + } + } + else + { + int sphereCullB; + + int sphereCull = R_CullLocalPointAndRadius( newFrame->localOrigin, newFrame->radius ); + if ( newFrame == oldFrame ) + { + sphereCullB = sphereCull; + } + else + { + sphereCullB = R_CullLocalPointAndRadius( oldFrame->localOrigin, oldFrame->radius ); + } + + if ( sphereCull == sphereCullB ) + { + if ( sphereCull == CULL_OUT ) + { + tr.pc.c_sphere_cull_md3_out++; + return CULL_OUT; + } + else if ( sphereCull == CULL_IN ) + { + tr.pc.c_sphere_cull_md3_in++; + return CULL_IN; + } + else + { + tr.pc.c_sphere_cull_md3_clip++; + } + } + } + } + + // calculate a bounding box in the current coordinate system + for (i = 0 ; i < 3 ; i++) + { + bounds[0][i] = oldFrame->bounds[0][i] < newFrame->bounds[0][i] ? oldFrame->bounds[0][i] : newFrame->bounds[0][i]; + bounds[1][i] = oldFrame->bounds[1][i] > newFrame->bounds[1][i] ? oldFrame->bounds[1][i] : newFrame->bounds[1][i]; + } + + switch ( R_CullLocalBox( bounds ) ) + { + case CULL_IN: + tr.pc.c_box_cull_md3_in++; + return CULL_IN; + case CULL_CLIP: + tr.pc.c_box_cull_md3_clip++; + return CULL_CLIP; + case CULL_OUT: + default: + tr.pc.c_box_cull_md3_out++; + return CULL_OUT; + } +} + +static int R_MDRComputeFogNum( mdrHeader_t *header, trRefEntity_t *ent ) +{ + vec3_t localOrigin; + + if ( tr.refdef.rd.rdflags & RDF_NOWORLDMODEL ) { + return 0; + } + + int frameSize = (size_t)( &((mdrFrame_t *)0)->bones[ header->numBones ] ); + + // FIXME: non-normalized axis issues + mdrFrame_t* mdrFrame = ( mdrFrame_t * ) ( ( byte * ) header + header->ofsFrames + frameSize * ent->e.frame); + VectorAdd( ent->e.origin, mdrFrame->localOrigin, localOrigin ); + + int i, j; + for ( i = 1 ; i < tr.world->numfogs ; i++ ) + { + fog_t* fog = &tr.world->fogs[i]; + for ( j = 0 ; j < 3 ; j++ ) + { + if ( localOrigin[j] - mdrFrame->radius >= fog->bounds[1][j] ) + break; + + if ( localOrigin[j] + mdrFrame->radius <= fog->bounds[0][j] ) + break; + + } + + if ( j == 3 ) + return i; + } + + return 0; +} + +// much stuff in there is just copied from R_AddMd3Surfaces in tr_mesh.c + +void R_MDRAddAnimSurfaces( trRefEntity_t *ent ) +{ + shader_t* shader; + int i, j; + + mdrHeader_t* header = (mdrHeader_t *) tr.currentModel->modelData; + qboolean personalModel = (ent->e.renderfx & RF_THIRD_PERSON) && !tr.viewParms.isPortal; + + if( ent->e.renderfx & RF_WRAP_FRAMES ) + { + ent->e.frame %= header->numFrames; + ent->e.oldframe %= header->numFrames; + } + + + // Validate the frames so there is no chance of a crash. + // This will write directly into the entity structure, + // so when the surfaces are rendered, + // they don't need to be range checked again. + + if ((ent->e.frame >= header->numFrames) || (ent->e.frame < 0) || + (ent->e.oldframe >= header->numFrames) || (ent->e.oldframe < 0) ) + { + ri.Printf( PRINT_ALL, "R_MDRAddAnimSurfaces: no such frame %d to %d for '%s'\n", ent->e.oldframe, ent->e.frame, tr.currentModel->name ); + ent->e.frame = 0; + ent->e.oldframe = 0; + } + + // + // cull the entire model if merged bounding box of both frames + // is outside the view frustum. + // + int cull = R_MDRCullModel(header, ent); + if ( cull == CULL_OUT ) { + return; + } + + // figure out the current LOD of the model we're rendering, and set the lod pointer respectively. + int lodnum = 0; + + if ( tr.currentModel->numLods > 1 ) + lodnum = R_ComputeLOD( ent ); + + // check whether this model has as that many LODs at all. If not, try the closest thing we got. + if(header->numLODs <= 0) + return; + if(header->numLODs <= lodnum) + lodnum = header->numLODs - 1; + + mdrLOD_t* lod = (mdrLOD_t *)( (unsigned char *)header + header->ofsLODs); + for(i = 0; i < lodnum; i++) + { + lod = (mdrLOD_t *) ((unsigned char *)lod + lod->ofsEnd); + } + + // set up lighting + if ( !personalModel || r_shadows->integer > 1 ) + { + R_SetupEntityLighting( &tr.refdef, ent ); + } + + // fogNum? + int fogNum = R_MDRComputeFogNum( header, ent ); + + mdrSurface_t* surface = (mdrSurface_t *)( (unsigned char *)lod + lod->ofsSurfaces ); + + for ( i = 0 ; i < lod->numSurfaces ; i++ ) + { + if(ent->e.customShader) + shader = R_GetShaderByHandle(ent->e.customShader); + else if((ent->e.customSkin > 0) && (ent->e.customSkin < tr.numSkins)) + { + skin_t* skin = tr.skins[ent->e.customSkin]; + shader = tr.defaultShader; + + for(j = 0; j < skin->numSurfaces; j++) + { + if (0 == strcmp(skin->pSurfaces[j].name, surface->name)) + { + shader = skin->pSurfaces[j].shader; + break; + } + } + } + else if(surface->shaderIndex > 0) + shader = R_GetShaderByHandle( surface->shaderIndex ); + else + shader = tr.defaultShader; + + // we will add shadows even if the main object isn't visible in the view + + // stencil shadows can't do personal models unless I polyhedron clip + if ( (personalModel == 0) && (r_shadows->integer == 2) && (fogNum == 0) + && !(ent->e.renderfx & ( RF_NOSHADOW | RF_DEPTHHACK ) ) + && (shader->sort == SS_OPAQUE) ) + { + R_AddDrawSurf( (void *)surface, tr.shadowShader, 0, qfalse ); + } + + // projection shadows work fine with personal models + if ( (r_shadows->integer == 3) && (fogNum == 0) + && (ent->e.renderfx & RF_SHADOW_PLANE ) && (shader->sort == SS_OPAQUE) ) + { + R_AddDrawSurf( (void *)surface, tr.projectionShadowShader, 0, qfalse ); + } + + if (!personalModel) + R_AddDrawSurf( (void *)surface, shader, fogNum, qfalse ); + + surface = (mdrSurface_t *)( (byte *)surface + surface->ofsEnd ); + } +} + + + + diff --git a/code/renderer_vulkan/tr_backend.c b/code/renderer_vulkan/tr_backend.c new file mode 100644 index 0000000000..c6271512d0 --- /dev/null +++ b/code/renderer_vulkan/tr_backend.c @@ -0,0 +1,30 @@ +#include "ref_import.h" +#include "tr_backend.h" +#include "R_PrintMat.h" +#include "glConfig.h" +backEndState_t backEnd; + + +void R_ClearBackendState(void) +{ + ri.Printf(PRINT_ALL, " backend state cleared. \n"); + // clear all our internal state + memset( &backEnd, 0, sizeof( backEnd ) ); + + int width, height; + R_GetWinResolution(&width, &height); + backEnd.viewParms.viewportWidth = width; + backEnd.viewParms.viewportHeight = height; +} + + +void R_PrintBackEnd_OR_f(void) +{ + // in world coordinates + printMat1x3f("backEnd.or.origin", backEnd.or.origin); + // orientation in world + printMat3x3f("backEnd.or.axis", backEnd.or.axis); + // viewParms->or.origin in local coordinates + printMat1x3f("backEnd.or.viewOrigin", backEnd.or.viewOrigin); + printMat4x4f("backEnd.or.modelMatrix", backEnd.or.modelMatrix); +} diff --git a/code/renderer_vulkan/tr_backend.h b/code/renderer_vulkan/tr_backend.h new file mode 100644 index 0000000000..c9c2a0f6d1 --- /dev/null +++ b/code/renderer_vulkan/tr_backend.h @@ -0,0 +1,39 @@ +#ifndef TR_BACKEND_H_ +#define TR_BACKEND_H_ + +#include "tr_local.h" + +typedef struct { + int c_surfaces; + int c_shaders; + int c_vertexes; + int c_indexes; + int c_totalIndexes; + int c_dlightVertexes; + int c_dlightIndexes; + int msec; // total msec for backend run +} backEndCounters_t; + + +// all state modified by the back end is seperated +// from the front end state +typedef struct { + trRefdef_t refdef; + viewParms_t viewParms; + orientationr_t or; + backEndCounters_t pc; + trRefEntity_t entity2D; // currentEntity will point at this when doing 2D rendering + trRefEntity_t* currentEntity; + + unsigned char Color2D[4]; + qboolean projection2D; // if qtrue, drawstretchpic doesn't need to change modes + qboolean isHyperspace; + +} backEndState_t; + +extern backEndState_t backEnd; + +void R_ClearBackendState(void); +void R_PrintBackEnd_OR_f(void); + +#endif diff --git a/code/renderer_vulkan/tr_bsp.c b/code/renderer_vulkan/tr_bsp.c new file mode 100644 index 0000000000..eef8eee7e7 --- /dev/null +++ b/code/renderer_vulkan/tr_bsp.c @@ -0,0 +1,1891 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_map.c + +#include "tr_local.h" +#include "tr_globals.h" +#include "vk_image.h" +#include "tr_cvar.h" +#include "ref_import.h" +#include "R_Parser.h" +#include "tr_shader.h" +/* + +Loads and prepares a map file for scene rendering. + +A single entry point: + +void RE_LoadWorldMap( const char *name ); + +*/ + +static world_t s_worldData ; +static unsigned char* fileBase = NULL; + + +//=============================================================================== + +static void HSVtoRGB( float h, float s, float v, float rgb[3] ) +{ + int i; + float f; + float p, q, t; + + h *= 5; + + i = floor( h ); + f = h - i; + + p = v * ( 1 - s ); + q = v * ( 1 - s * f ); + t = v * ( 1 - s * ( 1 - f ) ); + + switch ( i ) + { + case 0: + rgb[0] = v; + rgb[1] = t; + rgb[2] = p; + break; + case 1: + rgb[0] = q; + rgb[1] = v; + rgb[2] = p; + break; + case 2: + rgb[0] = p; + rgb[1] = v; + rgb[2] = t; + break; + case 3: + rgb[0] = p; + rgb[1] = q; + rgb[2] = v; + break; + case 4: + rgb[0] = t; + rgb[1] = p; + rgb[2] = v; + break; + case 5: + rgb[0] = v; + rgb[1] = p; + rgb[2] = q; + break; + } +} + +/* +=============== +R_ColorShiftLightingBytes + +=============== +*/ +static void R_ColorShiftLightingBytes( byte in[4], byte out[4] ) { + int r, g, b; + + // shift the color data based on overbright range + int shift = 2; + //r_mapOverBrightBits->integer; + + // shift the data based on overbright range + r = in[0] * shift; + g = in[1] * shift; + b = in[2] * shift; + + // normalize by color instead of saturating to white + if ( ( r | g | b ) > 255 ) { + int max; + + max = r > g ? r : g; + max = max > b ? max : b; + r = r * 255 / max; + g = g * 255 / max; + b = b * 255 / max; + } + + out[0] = r; + out[1] = g; + out[2] = b; + out[3] = in[3]; +} + +/* +=============== +R_LoadLightmaps + +=============== +*/ +#define LIGHTMAP_SIZE 128 +static void R_LoadLightmaps( lump_t *l ) { + byte *buf, *buf_p; + int len; + unsigned char image[LIGHTMAP_SIZE*LIGHTMAP_SIZE*4]; + int i, j; + float maxIntensity = 0; + double sumIntensity = 0; + (void)sumIntensity; // used for accumulation but never read + + len = l->filelen; + if ( !len ) { + return; + } + buf = fileBase + l->fileofs; + + // create all the lightmaps + tr.numLightmaps = len / (LIGHTMAP_SIZE * LIGHTMAP_SIZE * 3); + if ( tr.numLightmaps == 1 ) { + //FIXME: HACK: maps with only one lightmap turn up fullbright for some reason. + //this avoids this, but isn't the correct solution. + tr.numLightmaps++; + } + + // if we are in r_vertexLight mode, we don't need the lightmaps at all + if ( r_vertexLight->integer ) { + return; + } + + for ( i = 0 ; i < tr.numLightmaps ; i++ ) { + // expand the 24 bit on-disk to 32 bit + buf_p = buf + i * LIGHTMAP_SIZE*LIGHTMAP_SIZE * 3; + + if ( r_lightmap->integer == 2 ) + { // color code by intensity as development tool (FIXME: check range) + for ( j = 0; j < LIGHTMAP_SIZE * LIGHTMAP_SIZE; j++ ) + { + float r = buf_p[j*3+0]; + float g = buf_p[j*3+1]; + float b = buf_p[j*3+2]; + float intensity; + float out[3] = {0}; + + intensity = 0.33f * r + 0.685f * g + 0.063f * b; + + if ( intensity > 255 ) + intensity = 1.0f; + else + intensity /= 255.0f; + + if ( intensity > maxIntensity ) + maxIntensity = intensity; + + HSVtoRGB( intensity, 1.00, 0.50, out ); + + image[j*4+0] = out[0] * 255; + image[j*4+1] = out[1] * 255; + image[j*4+2] = out[2] * 255; + image[j*4+3] = 255; + + sumIntensity += intensity; + } + } else { + for ( j = 0 ; j < LIGHTMAP_SIZE * LIGHTMAP_SIZE; j++ ) { + R_ColorShiftLightingBytes( &buf_p[j*3], &image[j*4] ); + image[j*4+3] = 255; + } + } + tr.lightmaps[i] = R_CreateImage( va("*lightmap%d",i), image, + LIGHTMAP_SIZE, LIGHTMAP_SIZE, qfalse, qfalse, GL_CLAMP); + } + + if ( r_lightmap->integer == 2 ) { + ri.Printf( PRINT_ALL, "Brightest lightmap value: %d\n", ( int ) ( maxIntensity * 255 ) ); + } +} + + +/* +================= +RE_SetWorldVisData + +This is called by the clipmodel subsystem so we can share the 1.8 megs of +space in big maps... +================= +*/ +void RE_SetWorldVisData( const byte *vis ) { + tr.externalVisData = vis; +} + + +/* +================= +R_LoadVisibility +================= +*/ +static void R_LoadVisibility( lump_t *l ) +{ + ri.Printf (PRINT_ALL, "\n---R_LoadVisibility---\n"); + + unsigned char* buf; + + int len = ( s_worldData.numClusters + 63 ) & ~63; + s_worldData.novis = (byte*) ri.Hunk_Alloc( len, h_low ); + memset( s_worldData.novis, 0xff, len ); + + len = l->filelen; + if ( !len ) { + return; + } + buf = fileBase + l->fileofs; + + s_worldData.numClusters = LittleLong( ((int *)buf)[0] ); + s_worldData.clusterBytes = LittleLong( ((int *)buf)[1] ); + + // CM_Load should have given us the vis data to share, so + // we don't need to allocate another copy + if ( tr.externalVisData ) { + s_worldData.vis = tr.externalVisData; + } else { + byte *dest; + + dest = (byte*) ri.Hunk_Alloc( len - 8, h_low ); + memcpy( dest, buf + 8, len - 8 ); + s_worldData.vis = dest; + } +} + +//=============================================================================== + + +static shader_t *ShaderForShaderNum( int shaderNum, int lightmapNum ) +{ + int _shaderNum = LittleLong( shaderNum ); + if ( _shaderNum < 0 || _shaderNum >= s_worldData.numShaders ) { + ri.Error( ERR_DROP, "ShaderForShaderNum: bad num %i", _shaderNum ); + } + dshader_t* dsh = &s_worldData.shaders[ _shaderNum ]; + + if ( r_vertexLight->integer ) { + lightmapNum = LIGHTMAP_BY_VERTEX; + } + + if ( r_fullbright->integer ) { + lightmapNum = LIGHTMAP_WHITEIMAGE; + } + + shader_t* shader = R_FindShader( dsh->shader, lightmapNum, qtrue ); + + // if the shader had errors, just use default shader + if ( shader->defaultShader ) { + return tr.defaultShader; + } + + return shader; +} + + +static void setPlaneSignbits (cplane_t *out) { + int bits = 0; + int j; + + // for fast box on planeside test + for (j=0 ; j<3 ; j++) { + if (out->normal[j] < 0) { + bits |= 1<signbits = bits; +} + +/* +=============== +ParseFace +=============== +*/ +static void ParseFace( dsurface_t *ds, drawVert_t *verts, msurface_t *surf, int *indexes ) { + int i, j; + srfSurfaceFace_t *cv; + int numPoints, numIndexes; + int lightmapNum; + int sfaceSize, ofsIndexes; + + lightmapNum = LittleLong( ds->lightmapNum ); + + // get fog volume + surf->fogIndex = LittleLong( ds->fogNum ) + 1; + + // get shader value + surf->shader = ShaderForShaderNum( ds->shaderNum, lightmapNum ); + if ( r_singleShader->integer && !surf->shader->isSky ) { + surf->shader = tr.defaultShader; + } + + numPoints = LittleLong( ds->numVerts ); + if (numPoints > MAX_FACE_POINTS) { + ri.Printf( PRINT_WARNING, "WARNING: MAX_FACE_POINTS exceeded: %i\n", numPoints); + numPoints = MAX_FACE_POINTS; + surf->shader = tr.defaultShader; + } + + numIndexes = LittleLong( ds->numIndexes ); + + // create the srfSurfaceFace_t + sfaceSize = (int)(intptr_t) &((srfSurfaceFace_t *)0)->points[numPoints]; + ofsIndexes = sfaceSize; + sfaceSize += sizeof( int ) * numIndexes; + + cv = (srfSurfaceFace_t*) ri.Hunk_Alloc( sfaceSize, h_low ); + cv->surfaceType = SF_FACE; + cv->numPoints = numPoints; + cv->numIndices = numIndexes; + cv->ofsIndices = ofsIndexes; + + verts += LittleLong( ds->firstVert ); + for ( i = 0 ; i < numPoints ; i++ ) { + for ( j = 0 ; j < 3 ; j++ ) { + cv->points[i][j] = LittleFloat( verts[i].xyz[j] ); + } + for ( j = 0 ; j < 2 ; j++ ) { + cv->points[i][3+j] = LittleFloat( verts[i].st[j] ); + cv->points[i][5+j] = LittleFloat( verts[i].lightmap[j] ); + } + R_ColorShiftLightingBytes( verts[i].color, (byte *)&cv->points[i][7] ); + } + + indexes += LittleLong( ds->firstIndex ); + for ( i = 0 ; i < numIndexes ; i++ ) { + ((int *)((byte *)cv + cv->ofsIndices ))[i] = LittleLong( indexes[ i ] ); + } + + // take the plane information from the lightmap vector + for ( i = 0 ; i < 3 ; i++ ) { + cv->plane.normal[i] = LittleFloat( ds->lightmapVecs[2][i] ); + } + cv->plane.dist = DotProduct( cv->points[0], cv->plane.normal ); + setPlaneSignbits( &cv->plane ); + cv->plane.type = PlaneTypeForNormal( cv->plane.normal ); + + surf->data = (surfaceType_t *)cv; +} + + +/* +=============== +ParseMesh +=============== +*/ +static void ParseMesh ( dsurface_t *ds, drawVert_t *verts, msurface_t *surf ) { + srfGridMesh_t *grid; + int i, j; + int width, height, numPoints; + drawVert_t points[MAX_PATCH_SIZE*MAX_PATCH_SIZE]; + int lightmapNum; + vec3_t bounds[2]; + vec3_t tmpVec; + static surfaceType_t skipData = SF_SKIP; + + lightmapNum = LittleLong( ds->lightmapNum ); + + // get fog volume + surf->fogIndex = LittleLong( ds->fogNum ) + 1; + + // get shader value + surf->shader = ShaderForShaderNum( ds->shaderNum, lightmapNum ); + if ( r_singleShader->integer && !surf->shader->isSky ) { + surf->shader = tr.defaultShader; + } + + // we may have a nodraw surface, because they might still need to + // be around for movement clipping + if ( s_worldData.shaders[ LittleLong( ds->shaderNum ) ].surfaceFlags & SURF_NODRAW ) { + surf->data = &skipData; + return; + } + + width = LittleLong( ds->patchWidth ); + height = LittleLong( ds->patchHeight ); + + verts += LittleLong( ds->firstVert ); + numPoints = width * height; + for ( i = 0 ; i < numPoints ; i++ ) { + for ( j = 0 ; j < 3 ; j++ ) { + points[i].xyz[j] = LittleFloat( verts[i].xyz[j] ); + points[i].normal[j] = LittleFloat( verts[i].normal[j] ); + } + for ( j = 0 ; j < 2 ; j++ ) { + points[i].st[j] = LittleFloat( verts[i].st[j] ); + points[i].lightmap[j] = LittleFloat( verts[i].lightmap[j] ); + } + R_ColorShiftLightingBytes( verts[i].color, points[i].color ); + } + + // pre-tesseleate + grid = R_SubdividePatchToGrid( width, height, points ); + surf->data = (surfaceType_t *)grid; + + // copy the level of detail origin, which is the center + // of the group of all curves that must subdivide the same + // to avoid cracking + for ( i = 0 ; i < 3 ; i++ ) { + bounds[0][i] = LittleFloat( ds->lightmapVecs[0][i] ); + bounds[1][i] = LittleFloat( ds->lightmapVecs[1][i] ); + } + VectorAdd( bounds[0], bounds[1], bounds[1] ); + VectorScale( bounds[1], 0.5f, grid->lodOrigin ); + VectorSubtract( bounds[0], grid->lodOrigin, tmpVec ); + grid->lodRadius = VectorLength( tmpVec ); +} + +/* +=============== +ParseTriSurf +=============== +*/ +static void ParseTriSurf( dsurface_t *ds, drawVert_t *verts, msurface_t *surf, int *indexes ) { + srfTriangles_t *tri; + int i, j; + int numVerts, numIndexes; + + // get fog volume + surf->fogIndex = LittleLong( ds->fogNum ) + 1; + + // get shader + surf->shader = ShaderForShaderNum( ds->shaderNum, LIGHTMAP_BY_VERTEX ); + if ( r_singleShader->integer && !surf->shader->isSky ) { + surf->shader = tr.defaultShader; + } + + numVerts = LittleLong( ds->numVerts ); + numIndexes = LittleLong( ds->numIndexes ); + + tri = (srfTriangles_t*) ri.Hunk_Alloc( sizeof( *tri ) + numVerts * sizeof( tri->verts[0] ) + + numIndexes * sizeof( tri->indexes[0] ), h_low ); + tri->surfaceType = SF_TRIANGLES; + tri->numVerts = numVerts; + tri->numIndexes = numIndexes; + tri->verts = (drawVert_t *)(tri + 1); + tri->indexes = (int *)(tri->verts + tri->numVerts ); + + surf->data = (surfaceType_t *)tri; + + // copy vertexes + //ClearBounds( tri->bounds[0], tri->bounds[1] ); + + tri->bounds[0][0] = tri->bounds[0][1] = tri->bounds[0][2] = 99999; + tri->bounds[1][0] = tri->bounds[1][1] = tri->bounds[1][2] = -99999; + + verts += LittleLong( ds->firstVert ); + for ( i = 0 ; i < numVerts ; i++ ) { + for ( j = 0 ; j < 3 ; j++ ) { + tri->verts[i].xyz[j] = LittleFloat( verts[i].xyz[j] ); + tri->verts[i].normal[j] = LittleFloat( verts[i].normal[j] ); + } + AddPointToBounds( tri->verts[i].xyz, tri->bounds[0], tri->bounds[1] ); + for ( j = 0 ; j < 2 ; j++ ) { + tri->verts[i].st[j] = LittleFloat( verts[i].st[j] ); + tri->verts[i].lightmap[j] = LittleFloat( verts[i].lightmap[j] ); + } + + R_ColorShiftLightingBytes( verts[i].color, tri->verts[i].color ); + } + + // copy indexes + indexes += LittleLong( ds->firstIndex ); + for ( i = 0 ; i < numIndexes ; i++ ) { + tri->indexes[i] = LittleLong( indexes[i] ); + if ( tri->indexes[i] < 0 || tri->indexes[i] >= numVerts ) { + ri.Error( ERR_DROP, "Bad index in triangle surface" ); + } + } +} + +/* +=============== +ParseFlare +=============== +*/ +static void ParseFlare( dsurface_t *ds, drawVert_t *verts, msurface_t *surf, int *indexes ) { + srfFlare_t *flare; + int i; + + // get fog volume + surf->fogIndex = LittleLong( ds->fogNum ) + 1; + + // get shader + surf->shader = ShaderForShaderNum( ds->shaderNum, LIGHTMAP_BY_VERTEX ); + if ( r_singleShader->integer && !surf->shader->isSky ) { + surf->shader = tr.defaultShader; + } + + flare = (srfFlare_t*) ri.Hunk_Alloc( sizeof( *flare ), h_low ); + flare->surfaceType = SF_FLARE; + + surf->data = (surfaceType_t *)flare; + + for ( i = 0 ; i < 3 ; i++ ) { + flare->origin[i] = LittleFloat( ds->lightmapOrigin[i] ); + flare->color[i] = LittleFloat( ds->lightmapVecs[0][i] ); + flare->normal[i] = LittleFloat( ds->lightmapVecs[2][i] ); + } +} + + +/* +================= +R_MergedWidthPoints + +returns true if there are grid points merged on a width edge +================= +*/ +int R_MergedWidthPoints(srfGridMesh_t *grid, int offset) { + int i, j; + + for (i = 1; i < grid->width-1; i++) { + for (j = i + 1; j < grid->width-1; j++) { + if ( fabs(grid->verts[i + offset].xyz[0] - grid->verts[j + offset].xyz[0]) > .1) continue; + if ( fabs(grid->verts[i + offset].xyz[1] - grid->verts[j + offset].xyz[1]) > .1) continue; + if ( fabs(grid->verts[i + offset].xyz[2] - grid->verts[j + offset].xyz[2]) > .1) continue; + return qtrue; + } + } + return qfalse; +} + +/* +================= +R_MergedHeightPoints + +returns true if there are grid points merged on a height edge +================= +*/ +int R_MergedHeightPoints(srfGridMesh_t *grid, int offset) { + int i, j; + + for (i = 1; i < grid->height-1; i++) { + for (j = i + 1; j < grid->height-1; j++) { + if ( fabs(grid->verts[grid->width * i + offset].xyz[0] - grid->verts[grid->width * j + offset].xyz[0]) > .1) continue; + if ( fabs(grid->verts[grid->width * i + offset].xyz[1] - grid->verts[grid->width * j + offset].xyz[1]) > .1) continue; + if ( fabs(grid->verts[grid->width * i + offset].xyz[2] - grid->verts[grid->width * j + offset].xyz[2]) > .1) continue; + return qtrue; + } + } + return qfalse; +} + +/* +================= +R_FixSharedVertexLodError_r + +NOTE: never sync LoD through grid edges with merged points! + +FIXME: write generalized version that also avoids cracks between a patch and one that meets half way? +================= +*/ +void R_FixSharedVertexLodError_r( int start, srfGridMesh_t *grid1 ) { + int j, k, l, m, n, offset1, offset2, touch; + srfGridMesh_t *grid2; + + for ( j = start; j < s_worldData.numsurfaces; j++ ) { + // + grid2 = (srfGridMesh_t *) s_worldData.surfaces[j].data; + // if this surface is not a grid + if ( grid2->surfaceType != SF_GRID ) continue; + // if the LOD errors are already fixed for this patch + if ( grid2->lodFixed == 2 ) continue; + // grids in the same LOD group should have the exact same lod radius + if ( grid1->lodRadius != grid2->lodRadius ) continue; + // grids in the same LOD group should have the exact same lod origin + if ( grid1->lodOrigin[0] != grid2->lodOrigin[0] ) continue; + if ( grid1->lodOrigin[1] != grid2->lodOrigin[1] ) continue; + if ( grid1->lodOrigin[2] != grid2->lodOrigin[2] ) continue; + // + touch = qfalse; + for (n = 0; n < 2; n++) { + // + if (n) offset1 = (grid1->height-1) * grid1->width; + else offset1 = 0; + if (R_MergedWidthPoints(grid1, offset1)) continue; + for (k = 1; k < grid1->width-1; k++) { + for (m = 0; m < 2; m++) { + + if (m) offset2 = (grid2->height-1) * grid2->width; + else offset2 = 0; + if (R_MergedWidthPoints(grid2, offset2)) continue; + for ( l = 1; l < grid2->width-1; l++) { + // + if ( fabs(grid1->verts[k + offset1].xyz[0] - grid2->verts[l + offset2].xyz[0]) > .1) continue; + if ( fabs(grid1->verts[k + offset1].xyz[1] - grid2->verts[l + offset2].xyz[1]) > .1) continue; + if ( fabs(grid1->verts[k + offset1].xyz[2] - grid2->verts[l + offset2].xyz[2]) > .1) continue; + // ok the points are equal and should have the same lod error + grid2->widthLodError[l] = grid1->widthLodError[k]; + touch = qtrue; + } + } + for (m = 0; m < 2; m++) { + + if (m) offset2 = grid2->width-1; + else offset2 = 0; + if (R_MergedHeightPoints(grid2, offset2)) continue; + for ( l = 1; l < grid2->height-1; l++) { + // + if ( fabs(grid1->verts[k + offset1].xyz[0] - grid2->verts[grid2->width * l + offset2].xyz[0]) > .1) continue; + if ( fabs(grid1->verts[k + offset1].xyz[1] - grid2->verts[grid2->width * l + offset2].xyz[1]) > .1) continue; + if ( fabs(grid1->verts[k + offset1].xyz[2] - grid2->verts[grid2->width * l + offset2].xyz[2]) > .1) continue; + // ok the points are equal and should have the same lod error + grid2->heightLodError[l] = grid1->widthLodError[k]; + touch = qtrue; + } + } + } + } + for (n = 0; n < 2; n++) { + // + if (n) offset1 = grid1->width-1; + else offset1 = 0; + if (R_MergedHeightPoints(grid1, offset1)) continue; + for (k = 1; k < grid1->height-1; k++) { + for (m = 0; m < 2; m++) { + + if (m) offset2 = (grid2->height-1) * grid2->width; + else offset2 = 0; + if (R_MergedWidthPoints(grid2, offset2)) continue; + for ( l = 1; l < grid2->width-1; l++) { + // + if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[0] - grid2->verts[l + offset2].xyz[0]) > .1) continue; + if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[1] - grid2->verts[l + offset2].xyz[1]) > .1) continue; + if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[2] - grid2->verts[l + offset2].xyz[2]) > .1) continue; + // ok the points are equal and should have the same lod error + grid2->widthLodError[l] = grid1->heightLodError[k]; + touch = qtrue; + } + } + for (m = 0; m < 2; m++) { + + if (m) offset2 = grid2->width-1; + else offset2 = 0; + if (R_MergedHeightPoints(grid2, offset2)) continue; + for ( l = 1; l < grid2->height-1; l++) { + // + if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[0] - grid2->verts[grid2->width * l + offset2].xyz[0]) > .1) continue; + if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[1] - grid2->verts[grid2->width * l + offset2].xyz[1]) > .1) continue; + if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[2] - grid2->verts[grid2->width * l + offset2].xyz[2]) > .1) continue; + // ok the points are equal and should have the same lod error + grid2->heightLodError[l] = grid1->heightLodError[k]; + touch = qtrue; + } + } + } + } + if (touch) { + grid2->lodFixed = 2; + R_FixSharedVertexLodError_r ( start, grid2 ); + //NOTE: this would be correct but makes things really slow + //grid2->lodFixed = 1; + } + } +} + +/* +================= +R_FixSharedVertexLodError + +This function assumes that all patches in one group are nicely stitched together for the highest LoD. +If this is not the case this function will still do its job but won't fix the highest LoD cracks. +================= +*/ +void R_FixSharedVertexLodError( void ) { + int i; + srfGridMesh_t *grid1; + + for ( i = 0; i < s_worldData.numsurfaces; i++ ) { + // + grid1 = (srfGridMesh_t *) s_worldData.surfaces[i].data; + // if this surface is not a grid + if ( grid1->surfaceType != SF_GRID ) + continue; + // + if ( grid1->lodFixed ) + continue; + // + grid1->lodFixed = 2; + // recursively fix other patches in the same LOD group + R_FixSharedVertexLodError_r( i + 1, grid1); + } +} + + +/* +=============== +R_StitchPatches +=============== +*/ +int R_StitchPatches( int grid1num, int grid2num ) { + float *v1, *v2; + srfGridMesh_t *grid1, *grid2; + int k, l, m, n, offset1, offset2, row, column; + + grid1 = (srfGridMesh_t *) s_worldData.surfaces[grid1num].data; + grid2 = (srfGridMesh_t *) s_worldData.surfaces[grid2num].data; + for (n = 0; n < 2; n++) { + // + if (n) offset1 = (grid1->height-1) * grid1->width; + else offset1 = 0; + if (R_MergedWidthPoints(grid1, offset1)) + continue; + for (k = 0; k < grid1->width-2; k += 2) { + + for (m = 0; m < 2; m++) { + + if ( grid2->width >= MAX_GRID_SIZE ) + break; + if (m) offset2 = (grid2->height-1) * grid2->width; + else offset2 = 0; + for ( l = 0; l < grid2->width-1; l++) { + // + v1 = grid1->verts[k + offset1].xyz; + v2 = grid2->verts[l + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + + v1 = grid1->verts[k + 2 + offset1].xyz; + v2 = grid2->verts[l + 1 + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + // + v1 = grid2->verts[l + offset2].xyz; + v2 = grid2->verts[l + 1 + offset2].xyz; + if ( fabs(v1[0] - v2[0]) < .01 && + fabs(v1[1] - v2[1]) < .01 && + fabs(v1[2] - v2[2]) < .01) + continue; + // + //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); + // insert column into grid2 right after after column l + if (m) row = grid2->height-1; + else row = 0; + grid2 = R_GridInsertColumn( grid2, l+1, row, + grid1->verts[k + 1 + offset1].xyz, grid1->widthLodError[k+1]); + grid2->lodStitched = qfalse; + s_worldData.surfaces[grid2num].data = (surfaceType_t*) (void *) grid2; + return qtrue; + } + } + for (m = 0; m < 2; m++) { + + if (grid2->height >= MAX_GRID_SIZE) + break; + if (m) offset2 = grid2->width-1; + else offset2 = 0; + for ( l = 0; l < grid2->height-1; l++) { + // + v1 = grid1->verts[k + offset1].xyz; + v2 = grid2->verts[grid2->width * l + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + + v1 = grid1->verts[k + 2 + offset1].xyz; + v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + // + v1 = grid2->verts[grid2->width * l + offset2].xyz; + v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) < .01 && + fabs(v1[1] - v2[1]) < .01 && + fabs(v1[2] - v2[2]) < .01) + continue; + // + //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); + // insert row into grid2 right after after row l + if (m) column = grid2->width-1; + else column = 0; + grid2 = R_GridInsertRow( grid2, l+1, column, + grid1->verts[k + 1 + offset1].xyz, grid1->widthLodError[k+1]); + grid2->lodStitched = qfalse; + s_worldData.surfaces[grid2num].data = (surfaceType_t*) (void *) grid2; + return qtrue; + } + } + } + } + for (n = 0; n < 2; n++) { + // + if (n) offset1 = grid1->width-1; + else offset1 = 0; + if (R_MergedHeightPoints(grid1, offset1)) + continue; + for (k = 0; k < grid1->height-2; k += 2) { + for (m = 0; m < 2; m++) { + + if ( grid2->width >= MAX_GRID_SIZE ) + break; + if (m) offset2 = (grid2->height-1) * grid2->width; + else offset2 = 0; + for ( l = 0; l < grid2->width-1; l++) { + // + v1 = grid1->verts[grid1->width * k + offset1].xyz; + v2 = grid2->verts[l + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + + v1 = grid1->verts[grid1->width * (k + 2) + offset1].xyz; + v2 = grid2->verts[l + 1 + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + // + v1 = grid2->verts[l + offset2].xyz; + v2 = grid2->verts[(l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) < .01 && + fabs(v1[1] - v2[1]) < .01 && + fabs(v1[2] - v2[2]) < .01) + continue; + // + //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); + // insert column into grid2 right after after column l + if (m) row = grid2->height-1; + else row = 0; + grid2 = R_GridInsertColumn( grid2, l+1, row, + grid1->verts[grid1->width * (k + 1) + offset1].xyz, grid1->heightLodError[k+1]); + grid2->lodStitched = qfalse; + s_worldData.surfaces[grid2num].data = (surfaceType_t*) (void *) grid2; + return qtrue; + } + } + for (m = 0; m < 2; m++) { + + if (grid2->height >= MAX_GRID_SIZE) + break; + if (m) offset2 = grid2->width-1; + else offset2 = 0; + for ( l = 0; l < grid2->height-1; l++) { + // + v1 = grid1->verts[grid1->width * k + offset1].xyz; + v2 = grid2->verts[grid2->width * l + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + + v1 = grid1->verts[grid1->width * (k + 2) + offset1].xyz; + v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + // + v1 = grid2->verts[grid2->width * l + offset2].xyz; + v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) < .01 && + fabs(v1[1] - v2[1]) < .01 && + fabs(v1[2] - v2[2]) < .01) + continue; + // + //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); + // insert row into grid2 right after after row l + if (m) column = grid2->width-1; + else column = 0; + grid2 = R_GridInsertRow( grid2, l+1, column, + grid1->verts[grid1->width * (k + 1) + offset1].xyz, grid1->heightLodError[k+1]); + grid2->lodStitched = qfalse; + s_worldData.surfaces[grid2num].data = (surfaceType_t*) (void *) grid2; + return qtrue; + } + } + } + } + for (n = 0; n < 2; n++) { + // + if (n) offset1 = (grid1->height-1) * grid1->width; + else offset1 = 0; + if (R_MergedWidthPoints(grid1, offset1)) + continue; + for (k = grid1->width-1; k > 1; k -= 2) { + + for (m = 0; m < 2; m++) { + + if ( grid2->width >= MAX_GRID_SIZE ) + break; + if (m) offset2 = (grid2->height-1) * grid2->width; + else offset2 = 0; + for ( l = 0; l < grid2->width-1; l++) { + // + v1 = grid1->verts[k + offset1].xyz; + v2 = grid2->verts[l + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + + v1 = grid1->verts[k - 2 + offset1].xyz; + v2 = grid2->verts[l + 1 + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + // + v1 = grid2->verts[l + offset2].xyz; + v2 = grid2->verts[(l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) < .01 && + fabs(v1[1] - v2[1]) < .01 && + fabs(v1[2] - v2[2]) < .01) + continue; + // + //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); + // insert column into grid2 right after after column l + if (m) row = grid2->height-1; + else row = 0; + grid2 = R_GridInsertColumn( grid2, l+1, row, + grid1->verts[k - 1 + offset1].xyz, grid1->widthLodError[k+1]); + grid2->lodStitched = qfalse; + s_worldData.surfaces[grid2num].data = (surfaceType_t*) (void *) grid2; + return qtrue; + } + } + for (m = 0; m < 2; m++) { + + if (grid2->height >= MAX_GRID_SIZE) + break; + if (m) offset2 = grid2->width-1; + else offset2 = 0; + for ( l = 0; l < grid2->height-1; l++) { + // + v1 = grid1->verts[k + offset1].xyz; + v2 = grid2->verts[grid2->width * l + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + + v1 = grid1->verts[k - 2 + offset1].xyz; + v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + // + v1 = grid2->verts[grid2->width * l + offset2].xyz; + v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) < .01 && + fabs(v1[1] - v2[1]) < .01 && + fabs(v1[2] - v2[2]) < .01) + continue; + // + //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); + // insert row into grid2 right after after row l + if (m) column = grid2->width-1; + else column = 0; + grid2 = R_GridInsertRow( grid2, l+1, column, + grid1->verts[k - 1 + offset1].xyz, grid1->widthLodError[k+1]); + if (!grid2) + break; + grid2->lodStitched = qfalse; + s_worldData.surfaces[grid2num].data = (surfaceType_t*)(void *)grid2; + return qtrue; + } + } + } + } + for (n = 0; n < 2; n++) { + // + if (n) offset1 = grid1->width-1; + else offset1 = 0; + if (R_MergedHeightPoints(grid1, offset1)) + continue; + for (k = grid1->height-1; k > 1; k -= 2) { + for (m = 0; m < 2; m++) { + + if ( grid2->width >= MAX_GRID_SIZE ) + break; + if (m) offset2 = (grid2->height-1) * grid2->width; + else offset2 = 0; + for ( l = 0; l < grid2->width-1; l++) { + // + v1 = grid1->verts[grid1->width * k + offset1].xyz; + v2 = grid2->verts[l + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + + v1 = grid1->verts[grid1->width * (k - 2) + offset1].xyz; + v2 = grid2->verts[l + 1 + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + // + v1 = grid2->verts[l + offset2].xyz; + v2 = grid2->verts[(l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) < .01 && + fabs(v1[1] - v2[1]) < .01 && + fabs(v1[2] - v2[2]) < .01) + continue; + // + //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); + // insert column into grid2 right after after column l + if (m) row = grid2->height-1; + else row = 0; + grid2 = R_GridInsertColumn( grid2, l+1, row, + grid1->verts[grid1->width * (k - 1) + offset1].xyz, grid1->heightLodError[k+1]); + grid2->lodStitched = qfalse; + s_worldData.surfaces[grid2num].data = (surfaceType_t*)(void *)grid2; + return qtrue; + } + } + for (m = 0; m < 2; m++) { + + if (grid2->height >= MAX_GRID_SIZE) + break; + if (m) offset2 = grid2->width-1; + else offset2 = 0; + for ( l = 0; l < grid2->height-1; l++) { + // + v1 = grid1->verts[grid1->width * k + offset1].xyz; + v2 = grid2->verts[grid2->width * l + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + + v1 = grid1->verts[grid1->width * (k - 2) + offset1].xyz; + v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) > .1) + continue; + if ( fabs(v1[1] - v2[1]) > .1) + continue; + if ( fabs(v1[2] - v2[2]) > .1) + continue; + // + v1 = grid2->verts[grid2->width * l + offset2].xyz; + v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; + if ( fabs(v1[0] - v2[0]) < .01 && + fabs(v1[1] - v2[1]) < .01 && + fabs(v1[2] - v2[2]) < .01) + continue; + // + //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); + // insert row into grid2 right after after row l + if (m) column = grid2->width-1; + else column = 0; + grid2 = R_GridInsertRow( grid2, l+1, column, + grid1->verts[grid1->width * (k - 1) + offset1].xyz, grid1->heightLodError[k+1]); + grid2->lodStitched = qfalse; + s_worldData.surfaces[grid2num].data = (surfaceType_t*)(void *)grid2; + return qtrue; + } + } + } + } + return qfalse; +} + +/* +=============== +R_TryStitchPatch + +This function will try to stitch patches in the same LoD group together for the highest LoD. + +Only single missing vertice cracks will be fixed. + +Vertices will be joined at the patch side a crack is first found, at the other side +of the patch (on the same row or column) the vertices will not be joined and cracks +might still appear at that side. +=============== +*/ +int R_TryStitchingPatch( int grid1num ) { + int j, numstitches; + srfGridMesh_t *grid1, *grid2; + + numstitches = 0; + grid1 = (srfGridMesh_t *) s_worldData.surfaces[grid1num].data; + for ( j = 0; j < s_worldData.numsurfaces; j++ ) { + // + grid2 = (srfGridMesh_t *) s_worldData.surfaces[j].data; + // if this surface is not a grid + if ( grid2->surfaceType != SF_GRID ) continue; + // grids in the same LOD group should have the exact same lod radius + if ( grid1->lodRadius != grid2->lodRadius ) continue; + // grids in the same LOD group should have the exact same lod origin + if ( grid1->lodOrigin[0] != grid2->lodOrigin[0] ) continue; + if ( grid1->lodOrigin[1] != grid2->lodOrigin[1] ) continue; + if ( grid1->lodOrigin[2] != grid2->lodOrigin[2] ) continue; + // + while (R_StitchPatches(grid1num, j)) + { + numstitches++; + } + } + return numstitches; +} + +/* +=============== +R_StitchAllPatches +=============== +*/ +void R_StitchAllPatches( void ) { + int i, stitched, numstitches; + srfGridMesh_t *grid1; + + numstitches = 0; + do + { + stitched = qfalse; + for ( i = 0; i < s_worldData.numsurfaces; i++ ) { + // + grid1 = (srfGridMesh_t *) s_worldData.surfaces[i].data; + // if this surface is not a grid + if ( grid1->surfaceType != SF_GRID ) + continue; + // + if ( grid1->lodStitched ) + continue; + // + grid1->lodStitched = qtrue; + stitched = qtrue; + // + numstitches += R_TryStitchingPatch( i ); + } + } + while (stitched); + ri.Printf( PRINT_ALL, "stitched %d LoD cracks\n", numstitches ); +} + +/* +=============== +R_MovePatchSurfacesToHunk +=============== +*/ +void R_MovePatchSurfacesToHunk(void) { + int i, size; + srfGridMesh_t *grid, *hunkgrid; + + for ( i = 0; i < s_worldData.numsurfaces; i++ ) { + // + grid = (srfGridMesh_t *) s_worldData.surfaces[i].data; + // if this surface is not a grid + if ( grid->surfaceType != SF_GRID ) + continue; + // + size = (grid->width * grid->height - 1) * sizeof( drawVert_t ) + sizeof( *grid ); + hunkgrid = (srfGridMesh_t*) ri.Hunk_Alloc( size, h_low ); + memcpy(hunkgrid, grid, size); + + hunkgrid->widthLodError = (float*) ri.Hunk_Alloc( grid->width * 4, h_low ); + memcpy( hunkgrid->widthLodError, grid->widthLodError, grid->width * 4 ); + + hunkgrid->heightLodError = (float*) ri.Hunk_Alloc( grid->height * 4, h_low ); + memcpy( hunkgrid->heightLodError, grid->heightLodError, grid->height * 4 ); + + R_FreeSurfaceGridMesh( grid ); + + s_worldData.surfaces[i].data = (surfaceType_t*) (void *) hunkgrid; + } +} + +/* +=============== +R_LoadSurfaces +=============== +*/ +static void R_LoadSurfaces( lump_t *surfs, lump_t *verts, lump_t *indexLump ) { + dsurface_t *in; + msurface_t *out; + drawVert_t *dv; + int *indexes; + int count; + int numFaces, numMeshes, numTriSurfs, numFlares; + int i; + + numFaces = 0; + numMeshes = 0; + numTriSurfs = 0; + numFlares = 0; + + in = (dsurface_t*) (void *)(fileBase + surfs->fileofs); + if (surfs->filelen % sizeof(*in)) + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + count = surfs->filelen / sizeof(*in); + + dv = (drawVert_t*) (void *)(fileBase + verts->fileofs); + if (verts->filelen % sizeof(*dv)) + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + + indexes = (int*) (void *)(fileBase + indexLump->fileofs); + if ( indexLump->filelen % sizeof(*indexes)) + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + + out = (msurface_t*) ri.Hunk_Alloc ( count * sizeof(*out), h_low ); + + s_worldData.surfaces = out; + s_worldData.numsurfaces = count; + + for ( i = 0 ; i < count ; i++, in++, out++ ) { + switch ( LittleLong( in->surfaceType ) ) { + case MST_PATCH: + ParseMesh ( in, dv, out ); + numMeshes++; + break; + case MST_TRIANGLE_SOUP: + ParseTriSurf( in, dv, out, indexes ); + numTriSurfs++; + break; + case MST_PLANAR: + ParseFace( in, dv, out, indexes ); + numFaces++; + break; + case MST_FLARE: + ParseFlare( in, dv, out, indexes ); + numFlares++; + break; + default: + ri.Error( ERR_DROP, "Bad surfaceType" ); + } + } + +#ifdef PATCH_STITCHING + R_StitchAllPatches(); +#endif + + R_FixSharedVertexLodError(); + +#ifdef PATCH_STITCHING + R_MovePatchSurfacesToHunk(); +#endif + + ri.Printf( PRINT_ALL, "...loaded %d faces, %i meshes, %i trisurfs, %i flares\n", + numFaces, numMeshes, numTriSurfs, numFlares ); +} + + + +/* +================= +R_LoadSubmodels +================= +*/ +static void R_LoadSubmodels( lump_t *l ) +{ + + ri.Printf (PRINT_ALL, "\n---R_LoadSubmodels---\n"); + + + dmodel_t *in; + bmodel_t *out; + int i, j, count; + + in = (dmodel_t*) (void *)(fileBase + l->fileofs); + if (l->filelen % sizeof(*in)) + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + count = l->filelen / sizeof(*in); + + s_worldData.bmodels = out = (bmodel_t*) ri.Hunk_Alloc( count * sizeof(*out), h_low ); + + for ( i=0 ; iindex = tr.numModels; + model->type = MOD_BRUSH; + model->bmodel = out; + snprintf( model->name, sizeof( model->name ), "*%d", i ); + + tr.models[tr.numModels] = model; + + if ( ++tr.numModels == MAX_MOD_KNOWN ) + { + ri.Printf(PRINT_WARNING, "R_AllocModel: MAX_MOD_KNOWN.\n"); + } + ri.Printf( PRINT_ALL, "Allocate Memory for %s model. \n", model->name); + + for (j=0 ; j<3 ; j++) { + out->bounds[0][j] = LittleFloat (in->mins[j]); + out->bounds[1][j] = LittleFloat (in->maxs[j]); + } + + out->firstSurface = s_worldData.surfaces + LittleLong( in->firstSurface ); + out->numSurfaces = LittleLong( in->numSurfaces ); + } +} + + + +//================================================================== + +/* +================= +R_SetParent +================= +*/ +static void R_SetParent (mnode_t *node, mnode_t *parent) +{ + node->parent = parent; + if (node->contents != -1) + return; + R_SetParent (node->children[0], node); + R_SetParent (node->children[1], node); +} + +/* +================= +R_LoadNodesAndLeafs +================= +*/ +static void R_LoadNodesAndLeafs (lump_t *nodeLump, lump_t *leafLump) +{ + ri.Printf (PRINT_ALL, "\n---R_LoadNodesAndLeafs---\n"); + + int i, j, p; + dnode_t *in; + dleaf_t *inLeaf; + mnode_t *out; + int numNodes, numLeafs; + + in = (dnode_t*) (void *)(fileBase + nodeLump->fileofs); + if (nodeLump->filelen % sizeof(dnode_t) || + leafLump->filelen % sizeof(dleaf_t) ) { + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + } + numNodes = nodeLump->filelen / sizeof(dnode_t); + numLeafs = leafLump->filelen / sizeof(dleaf_t); + + out = (mnode_t*) ri.Hunk_Alloc ( (numNodes + numLeafs) * sizeof(*out), h_low); + + s_worldData.nodes = out; + s_worldData.numnodes = numNodes + numLeafs; + s_worldData.numDecisionNodes = numNodes; + + // load nodes + for ( i=0 ; imins[j] = LittleLong (in->mins[j]); + out->maxs[j] = LittleLong (in->maxs[j]); + } + + p = LittleLong(in->planeNum); + out->plane = s_worldData.planes + p; + + out->contents = CONTENTS_NODE; // differentiate from leafs + + for (j=0 ; j<2 ; j++) + { + p = LittleLong (in->children[j]); + if (p >= 0) + out->children[j] = s_worldData.nodes + p; + else + out->children[j] = s_worldData.nodes + numNodes + (-1 - p); + } + } + + // load leafs + inLeaf = (dleaf_t*) (void *)(fileBase + leafLump->fileofs); + for ( i=0 ; imins[j] = LittleLong (inLeaf->mins[j]); + out->maxs[j] = LittleLong (inLeaf->maxs[j]); + } + + out->cluster = LittleLong(inLeaf->cluster); + out->area = LittleLong(inLeaf->area); + + if ( out->cluster >= s_worldData.numClusters ) { + s_worldData.numClusters = out->cluster + 1; + } + + out->firstmarksurface = s_worldData.marksurfaces + + LittleLong(inLeaf->firstLeafSurface); + out->nummarksurfaces = LittleLong(inLeaf->numLeafSurfaces); + } + + // chain decendants + R_SetParent (s_worldData.nodes, NULL); +} + +//============================================================================= + +/* +================= +R_LoadShaders +================= +*/ +static void R_LoadShaders( lump_t *l ) { + int i, count; + dshader_t *in, *out; + + in = (dshader_t*) (void *)(fileBase + l->fileofs); + if (l->filelen % sizeof(*in)) + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + count = l->filelen / sizeof(*in); + out = (dshader_t*) ri.Hunk_Alloc ( count*sizeof(*out), h_low ); + + s_worldData.shaders = out; + s_worldData.numShaders = count; + + memcpy( out, in, count*sizeof(*out) ); + + for ( i=0 ; ifileofs); + if (l->filelen % sizeof(*in)) + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + count = l->filelen / sizeof(*in); + out = (msurface_t**) ri.Hunk_Alloc ( count*sizeof(*out), h_low); + + s_worldData.marksurfaces = out; + s_worldData.nummarksurfaces = count; + + for ( i=0 ; ifileofs); + if (l->filelen % sizeof(*in)) + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + count = l->filelen / sizeof(*in); + out = (cplane_t*) ri.Hunk_Alloc ( count*2*sizeof(*out), h_low); + + s_worldData.planes = out; + s_worldData.numplanes = count; + + for ( i=0 ; inormal[j] = LittleFloat (in->normal[j]); + if (out->normal[j] < 0) { + bits |= 1<dist = LittleFloat (in->dist); + out->type = PlaneTypeForNormal( out->normal ); + out->signbits = bits; + } +} + +/* +================= +R_LoadFogs + +================= +*/ +static void R_LoadFogs( lump_t *l, lump_t *brushesLump, lump_t *sidesLump ) { + int i; + fog_t *out; + dfog_t *fogs; + dbrush_t *brushes, *brush; + dbrushside_t *sides; + int count, brushesCount, sidesCount; + int sideNum; + int planeNum; + shader_t *shader; + float d; + int firstSide; + + fogs = (dfog_t*) (void *)(fileBase + l->fileofs); + if (l->filelen % sizeof(*fogs)) { + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + } + count = l->filelen / sizeof(*fogs); + + // create fog strucutres for them + s_worldData.numfogs = count + 1; + s_worldData.fogs = (fog_t*) ri.Hunk_Alloc ( s_worldData.numfogs*sizeof(*out), h_low); + out = s_worldData.fogs + 1; + + if ( !count ) { + return; + } + + brushes = (dbrush_t*) (void *)(fileBase + brushesLump->fileofs); + if (brushesLump->filelen % sizeof(*brushes)) { + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + } + brushesCount = brushesLump->filelen / sizeof(*brushes); + + sides = (dbrushside_t*) (void *)(fileBase + sidesLump->fileofs); + if (sidesLump->filelen % sizeof(*sides)) { + ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); + } + sidesCount = sidesLump->filelen / sizeof(*sides); + + for ( i=0 ; ioriginalBrushNumber = LittleLong( fogs->brushNum ); + + if ( (unsigned)out->originalBrushNumber >= brushesCount ) { + ri.Error( ERR_DROP, "fog brushNumber out of range" ); + } + brush = brushes + out->originalBrushNumber; + + firstSide = LittleLong( brush->firstSide ); + + if ( (unsigned)firstSide > sidesCount - 6 ) { + ri.Error( ERR_DROP, "fog brush sideNumber out of range" ); + } + + // brushes are always sorted with the axial sides first + sideNum = firstSide + 0; + planeNum = LittleLong( sides[ sideNum ].planeNum ); + out->bounds[0][0] = -s_worldData.planes[ planeNum ].dist; + + sideNum = firstSide + 1; + planeNum = LittleLong( sides[ sideNum ].planeNum ); + out->bounds[1][0] = s_worldData.planes[ planeNum ].dist; + + sideNum = firstSide + 2; + planeNum = LittleLong( sides[ sideNum ].planeNum ); + out->bounds[0][1] = -s_worldData.planes[ planeNum ].dist; + + sideNum = firstSide + 3; + planeNum = LittleLong( sides[ sideNum ].planeNum ); + out->bounds[1][1] = s_worldData.planes[ planeNum ].dist; + + sideNum = firstSide + 4; + planeNum = LittleLong( sides[ sideNum ].planeNum ); + out->bounds[0][2] = -s_worldData.planes[ planeNum ].dist; + + sideNum = firstSide + 5; + planeNum = LittleLong( sides[ sideNum ].planeNum ); + out->bounds[1][2] = s_worldData.planes[ planeNum ].dist; + + // get information from the shader for fog parameters + shader = R_FindShader( fogs->shader, LIGHTMAP_NONE, qtrue ); + + out->parms = shader->fogParms; + + out->colorRGBA[0] = shader->fogParms.color[0] * tr.identityLight * 255; + out->colorRGBA[1] = shader->fogParms.color[1] * tr.identityLight * 255; + out->colorRGBA[2] = shader->fogParms.color[2] * tr.identityLight * 255; + out->colorRGBA[3] = 255; + + d = shader->fogParms.depthForOpaque < 1 ? 1 : shader->fogParms.depthForOpaque; + out->tcScale = 1.0f / ( d * 8 ); + + // set the gradient vector + sideNum = LittleLong( fogs->visibleSide ); + + if ( sideNum == -1 ) { + out->hasSurface = qfalse; + } else { + out->hasSurface = qtrue; + planeNum = LittleLong( sides[ firstSide + sideNum ].planeNum ); + VectorSubtract( vec3_origin, s_worldData.planes[ planeNum ].normal, out->surface ); + out->surface[3] = -s_worldData.planes[ planeNum ].dist; + } + + out++; + } + +} + + +void R_LoadLightGrid( lump_t *l ) +{ + ri.Printf (PRINT_ALL, "\n---R_LoadLightGrid---\n"); + + + int i; + vec3_t maxs; + int numGridPoints; + world_t *w; + float *wMins, *wMaxs; + + w = &s_worldData; + + w->lightGridInverseSize[0] = 1.0f / w->lightGridSize[0]; + w->lightGridInverseSize[1] = 1.0f / w->lightGridSize[1]; + w->lightGridInverseSize[2] = 1.0f / w->lightGridSize[2]; + + wMins = w->bmodels[0].bounds[0]; + wMaxs = w->bmodels[0].bounds[1]; + + for ( i = 0 ; i < 3 ; i++ ) { + w->lightGridOrigin[i] = w->lightGridSize[i] * ceil( wMins[i] / w->lightGridSize[i] ); + maxs[i] = w->lightGridSize[i] * floor( wMaxs[i] / w->lightGridSize[i] ); + w->lightGridBounds[i] = (maxs[i] - w->lightGridOrigin[i])/w->lightGridSize[i] + 1; + } + + numGridPoints = w->lightGridBounds[0] * w->lightGridBounds[1] * w->lightGridBounds[2]; + + if ( l->filelen != numGridPoints * 8 ) { + ri.Printf( PRINT_WARNING, "WARNING: light grid mismatch\n" ); + w->lightGridData = NULL; + return; + } + + w->lightGridData = (byte*) ri.Hunk_Alloc( l->filelen, h_low ); + memcpy( w->lightGridData, (void *)(fileBase + l->fileofs), l->filelen ); + + // deal with overbright bits + for ( i = 0 ; i < numGridPoints ; i++ ) { + R_ColorShiftLightingBytes( &w->lightGridData[i*8], &w->lightGridData[i*8] ); + R_ColorShiftLightingBytes( &w->lightGridData[i*8+3], &w->lightGridData[i*8+3] ); + } +} + +void RE_RemapShader(const char *oldShader, const char *newShader, const char *timeOffset); + + +void R_LoadEntities( lump_t *l ) +{ + + ri.Printf (PRINT_ALL, "\n---R_LoadEntities---\n"); + + + char *p, *token, *s; + char keyname[MAX_TOKEN_CHARS]; + char value[MAX_TOKEN_CHARS]; + world_t *w; + + w = &s_worldData; + w->lightGridSize[0] = 64; + w->lightGridSize[1] = 64; + w->lightGridSize[2] = 128; + + p = (char *)(fileBase + l->fileofs); + + // store for reference by the cgame + w->entityString = (char*) ri.Hunk_Alloc( l->filelen + 1, h_low ); + strcpy( w->entityString, p ); + w->entityParsePoint = w->entityString; + + token = R_ParseExt( &p, qtrue ); + if (!*token || *token != '{') { + return; + } + + // only parse the world spawn + while ( 1 ) { + // parse key + token = R_ParseExt( &p, qtrue ); + + if ( !*token || *token == '}' ) { + break; + } + Q_strncpyz(keyname, token, sizeof(keyname)); + + // parse value + token = R_ParseExt( &p, qtrue ); + + if ( !*token || *token == '}' ) { + break; + } + Q_strncpyz(value, token, sizeof(value)); + + // check for remapping of shaders for vertex lighting + s = "vertexremapshader"; + if (!Q_strncmp(keyname, s, strlen(s)) ) { + s = strchr(value, ';'); + if (!s) { + ri.Printf( PRINT_WARNING, "WARNING: no semi colon in vertexshaderremap '%s'\n", value ); + break; + } + *s++ = 0; + if (r_vertexLight->integer) { + RE_RemapShader(value, s, "0"); + } + continue; + } + // check for remapping of shaders + s = "remapshader"; + if (!Q_strncmp(keyname, s, (int)strlen(s)) ) { + s = strchr(value, ';'); + if (!s) { + ri.Printf( PRINT_WARNING, "WARNING: no semi colon in shaderremap '%s'\n", value ); + break; + } + *s++ = 0; + RE_RemapShader(value, s, "0"); + continue; + } + // check for a different grid size + if (!Q_stricmp(keyname, "gridsize")) { + sscanf(value, "%f %f %f", &w->lightGridSize[0], &w->lightGridSize[1], &w->lightGridSize[2] ); + continue; + } + } +} + + +qboolean RE_GetEntityToken( char *buffer, int size ) +{ + const char* s = R_ParseExt( &s_worldData.entityParsePoint, qtrue); + Q_strncpyz( buffer, s, size ); + if ( !s_worldData.entityParsePoint || !s[0] ) { + s_worldData.entityParsePoint = s_worldData.entityString; + return qfalse; + } else { + return qtrue; + } +} + +/* +================= +RE_LoadWorldMap + +Called directly from cgame +================= +*/ + +void RE_LoadWorldMap( const char *name ) +{ + int i; + dheader_t *header; + char* buffer; + byte *startMarker; + + if ( tr.worldMapLoaded ) { + ri.Error( ERR_DROP, "ERROR: attempted to redundantly load world map\n" ); + } + + // set default sun direction to be used if it isn't + // overridden by a shader + tr.sunDirection[0] = 0.45f; + tr.sunDirection[1] = 0.3f; + tr.sunDirection[2] = 0.9f; + + VectorNormalize( tr.sunDirection ); + + + + // load it + ri.FS_ReadFile( name, (void**)&buffer ); + if ( !buffer ) { + ri.Error (ERR_DROP, "RE_LoadWorldMap: %s not found", name); + } + + // clear tr.world so if the level fails to load, the next + // try will not look at the partially loaded version + tr.world = NULL; + + memset( &s_worldData, 0, sizeof( s_worldData ) ); + Q_strncpyz( s_worldData.name, name, sizeof( s_worldData.name ) ); + + Q_strncpyz( s_worldData.baseName, R_SkipPath( s_worldData.name ), sizeof( s_worldData.name ) ); + R_StripExtension( s_worldData.baseName, s_worldData.baseName, sizeof(s_worldData.baseName) ); + + startMarker = (byte*) ri.Hunk_Alloc(0, h_low); + + header = (dheader_t *)buffer; + fileBase = (byte *)header; + + i = LittleLong (header->version); + if ( i != BSP_VERSION ) { + ri.Error (ERR_DROP, "RE_LoadWorldMap: %s has wrong version number (%i should be %i)", + name, i, BSP_VERSION); + } + + // swap all the lumps + for (i=0 ; ilumps[LUMP_SHADERS] ); + R_LoadLightmaps( &header->lumps[LUMP_LIGHTMAPS] ); + R_LoadPlanes (&header->lumps[LUMP_PLANES]); + R_LoadFogs( &header->lumps[LUMP_FOGS], &header->lumps[LUMP_BRUSHES], &header->lumps[LUMP_BRUSHSIDES] ); + R_LoadSurfaces( &header->lumps[LUMP_SURFACES], &header->lumps[LUMP_DRAWVERTS], &header->lumps[LUMP_DRAWINDEXES] ); + R_LoadMarksurfaces (&header->lumps[LUMP_LEAFSURFACES]); + R_LoadNodesAndLeafs (&header->lumps[LUMP_NODES], &header->lumps[LUMP_LEAFS]); + R_LoadSubmodels (&header->lumps[LUMP_MODELS]); + R_LoadVisibility( &header->lumps[LUMP_VISIBILITY] ); + R_LoadEntities( &header->lumps[LUMP_ENTITIES] ); + R_LoadLightGrid( &header->lumps[LUMP_LIGHTGRID] ); + + s_worldData.dataSize = (unsigned char *)ri.Hunk_Alloc(0, h_low) - startMarker; + + // only set tr.world now that we know the entire level has loaded properly + tr.world = &s_worldData; + tr.worldMapLoaded = qtrue; + ri.FS_FreeFile( buffer ); +} diff --git a/code/renderer_vulkan/tr_cmds.c b/code/renderer_vulkan/tr_cmds.c new file mode 100644 index 0000000000..5e5290f8fd --- /dev/null +++ b/code/renderer_vulkan/tr_cmds.c @@ -0,0 +1,670 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +#include "tr_local.h" +#include "tr_globals.h" +#include "tr_backend.h" +#include "tr_cvar.h" +#include "ref_import.h" + +#include "vk_instance.h" +#include "vk_frame.h" +#include "vk_screenshot.h" +#include "vk_shade_geometry.h" +#include "RB_ShowImages.h" +#include "R_PrintMat.h" +#include "tr_light.h" + +static renderCommandList_t BE_Commands; + +/* +============ +R_GetCommandBuffer + +make sure there is enough command space, waiting on the +render thread if needed. +============ +*/ +void* R_GetCommandBuffer( int bytes ) +{ + renderCommandList_t *cmdList = &BE_Commands; + + // always leave room for the end of list command + if ( cmdList->used + bytes + 4 > MAX_RENDER_COMMANDS ) + { + if ( bytes > MAX_RENDER_COMMANDS - 4 ) { + ri.Error( ERR_FATAL, "R_GetCommandBuffer: bad size %i", bytes ); + } + // if we run out of room, just start dropping commands + return NULL; + } + + cmdList->used += bytes; + + return cmdList->cmds + cmdList->used - bytes; +} + + +/* +============= +submits a single 'draw' command into the command queue +============= +*/ +void R_AddDrawSurfCmd( drawSurf_t *drawSurfs, int numDrawSurfs ) +{ + drawSurfsCommand_t* cmd = (drawSurfsCommand_t*) R_GetCommandBuffer( sizeof(drawSurfsCommand_t) ); + if ( !cmd ) { + return; + } + cmd->commandId = RC_DRAW_SURFS; + + cmd->drawSurfs = drawSurfs; + cmd->numDrawSurfs = numDrawSurfs; + + cmd->refdef = tr.refdef; + cmd->viewParms = tr.viewParms; + +} + + +/* +============= +RE_SetColor + +Passing NULL will set the color to white +============= +*/ +void RE_SetColor( const float *rgba ) +{ + if ( !tr.registered ) { + return; + } + + setColorCommand_t* cmd = (setColorCommand_t*) R_GetCommandBuffer( sizeof(setColorCommand_t) ); + if ( !cmd ) { + return; + } + cmd->commandId = RC_SET_COLOR; + + if(rgba) + { + cmd->color[0] = rgba[0]; + cmd->color[1] = rgba[1]; + cmd->color[2] = rgba[2]; + cmd->color[3] = rgba[3]; + } + else + { + // color white + cmd->color[0] = 1.0f; + cmd->color[1] = 1.0f; + cmd->color[2] = 1.0f; + cmd->color[3] = 1.0f; + } +} + + +void RE_StretchPic ( float x, float y, float w, float h, + float s1, float t1, float s2, float t2, qhandle_t hShader ) +{ + if (!tr.registered) { + return; + } + stretchPicCommand_t* cmd = (stretchPicCommand_t*) R_GetCommandBuffer(sizeof(stretchPicCommand_t)); + if ( !cmd ) { + return; + } + cmd->commandId = RC_STRETCH_PIC; + cmd->shader = R_GetShaderByHandle( hShader ); + cmd->x = x; + cmd->y = y; + cmd->w = w; + cmd->h = h; + cmd->s1 = s1; + cmd->t1 = t1; + cmd->s2 = s2; + cmd->t2 = t2; +} + + +void RE_BeginFrame( stereoFrame_t stereoFrame ) +{ + + if ( !tr.registered ) { + return; + } + + // use the other buffers next frame, because another CPU + // may still be rendering into the current ones + // draw buffer stuff + drawBufferCommand_t* cmd = (drawBufferCommand_t*) R_GetCommandBuffer(sizeof(drawBufferCommand_t)); + if ( !cmd ) { + return; + } + cmd->commandId = RC_DRAW_BUFFER; +} + + +/* +============= +RE_EndFrame + +Returns the number of msec spent in the back end +============= +*/ +void RE_EndFrame( int *frontEndMsec, int *backEndMsec ) +{ + if ( !tr.registered ) { + return; + } + swapBuffersCommand_t* cmd = (swapBuffersCommand_t*) R_GetCommandBuffer(sizeof(swapBuffersCommand_t)); + if ( !cmd ) { + return; + } + cmd->commandId = RC_SWAP_BUFFERS; + + R_IssueRenderCommands( qtrue ); + + R_InitNextFrame(); + + if ( frontEndMsec ) { + *frontEndMsec = tr.frontEndMsec; + } + tr.frontEndMsec = 0; + if ( backEndMsec ) { + *backEndMsec = backEnd.pc.msec; + } + backEnd.pc.msec = 0; +} + + + + +/* +================== +RB_RenderDrawSurfList +================== +*/ +static void RB_RenderDrawSurfList( drawSurf_t* drawSurfs, int numDrawSurfs ) +{ + shader_t *shader, *oldShader; + int fogNum, oldFogNum; + int dlighted, oldDlighted; + // save original time for entity shader offsets + float originalTime = backEnd.refdef.floatTime; + + // Any mirrored or portaled views have already been drawn, + // so prepare to actually render the visible surfaces for this view + // clear the z buffer, set the modelview, etc + // RB_BeginDrawingView (); + + // we will need to change the projection matrix before drawing + // 2D images again + backEnd.projection2D = qfalse; + + + // ensures that depth writes are enabled for the depth clear + + + // VULKAN + vk_clearDepthStencilAttachments(); + + if ( backEnd.refdef.rd.rdflags & RDF_HYPERSPACE ) + { + //RB_Hyperspace(); + // A player has predicted a teleport, but hasn't arrived yet + const float c = ( backEnd.refdef.rd.time & 255 ) / 255.0f; + const float color[4] = { c, c, c, 1 }; + + // so short, do we really need this? + vk_clearColorAttachments(color); + + backEnd.isHyperspace = qtrue; + } + else + { + backEnd.isHyperspace = qfalse; + } + + + // draw everything + int entityNum; + int oldEntityNum = -1; + backEnd.currentEntity = &tr.worldEntity; + oldShader = NULL; + oldFogNum = -1; + oldDlighted = qfalse; + int oldSort = -1; + + backEnd.pc.c_surfaces += numDrawSurfs; + + drawSurf_t* drawSurf; + + int i; + + for (i = 0, drawSurf = drawSurfs ; i < numDrawSurfs ; i++, drawSurf++) + { + if ( (int)drawSurf->sort == oldSort ) { + // fast path, same as previous sort + rb_surfaceTable[ *drawSurf->surface ]( drawSurf->surface ); + continue; + } + oldSort = drawSurf->sort; + R_DecomposeSort( drawSurf->sort, &entityNum, &shader, &fogNum, &dlighted ); + + // + // change the tess parameters if needed + // a "entityMergable" shader is a shader that can have surfaces from seperate + // entities merged into a single batch, like smoke and blood puff sprites + if (shader != oldShader || fogNum != oldFogNum || dlighted != oldDlighted + || ( entityNum != oldEntityNum && !shader->entityMergable ) ) { + if (oldShader != NULL) { + RB_EndSurface(); + } + RB_BeginSurface( shader, fogNum ); + oldShader = shader; + oldFogNum = fogNum; + oldDlighted = dlighted; + } + + // + // change the modelview matrix if needed + // + if ( entityNum != oldEntityNum ) + { + if ( entityNum != REFENTITYNUM_WORLD ) + { + backEnd.currentEntity = &backEnd.refdef.entities[entityNum]; + backEnd.refdef.floatTime = originalTime - backEnd.currentEntity->e.shaderTime; + // we have to reset the shaderTime as well otherwise image animations start + // from the wrong frame + tess.shaderTime = backEnd.refdef.floatTime - tess.shader->timeOffset; + + // set up the transformation matrix + R_RotateForEntity( backEnd.currentEntity, &backEnd.viewParms, &backEnd.or ); + + + // set up the dynamic lighting if needed + if ( backEnd.currentEntity->needDlights ) { + R_TransformDlights( backEnd.refdef.num_dlights, backEnd.refdef.dlights, &backEnd.or ); + } + + if ( backEnd.currentEntity->e.renderfx & RF_DEPTHHACK ) { + // hack the depth range to prevent view model from poking into walls + } + } + else + { + backEnd.currentEntity = &tr.worldEntity; + backEnd.refdef.floatTime = originalTime; + backEnd.or = backEnd.viewParms.world; + // we have to reset the shaderTime as well otherwise image animations on + // the world (like water) continue with the wrong frame + tess.shaderTime = backEnd.refdef.floatTime - tess.shader->timeOffset; + R_TransformDlights( backEnd.refdef.num_dlights, backEnd.refdef.dlights, &backEnd.or ); + } + + + // VULKAN + set_modelview_matrix(backEnd.or.modelMatrix); + oldEntityNum = entityNum; + } + + // add the triangles for this surface + rb_surfaceTable[ *drawSurf->surface ]( drawSurf->surface ); + } + + backEnd.refdef.floatTime = originalTime; + + // draw the contents of the last shader batch + if (oldShader != NULL) { + RB_EndSurface(); + } + + // go back to the world modelview matrix + set_modelview_matrix(backEnd.viewParms.world.modelMatrix); + + + // darken down any stencil shadows + RB_ShadowFinish(); +} + + + +void RB_StretchPic( const stretchPicCommand_t * const cmd ) +{ + + if ( qfalse == backEnd.projection2D ) + { + + backEnd.projection2D = qtrue; + + // set 2D virtual screen size + // set time for 2D shaders + int t = ri.Milliseconds(); + + backEnd.refdef.rd.time = t; + backEnd.refdef.floatTime = t * 0.001f; + } + + + if ( cmd->shader != tess.shader ) + { + if ( tess.numIndexes ) { + RB_EndSurface(); + } + backEnd.currentEntity = &backEnd.entity2D; + RB_BeginSurface(cmd->shader, 0 ); + } + + RB_CHECKOVERFLOW( 4, 6 ); + + + const unsigned int n0 = tess.numVertexes; + const unsigned int n1 = n0 + 1; + const unsigned int n2 = n0 + 2; + const unsigned int n3 = n0 + 3; + + + uint32_t numIndexes = tess.numIndexes; + + tess.indexes[ numIndexes ] = n3; + tess.indexes[ numIndexes + 1 ] = n0; + tess.indexes[ numIndexes + 2 ] = n2; + tess.indexes[ numIndexes + 3 ] = n2; + tess.indexes[ numIndexes + 4 ] = n0; + tess.indexes[ numIndexes + 5 ] = n1; + + + // TODO: verify does coding this way run faster in release mode ? + // coding this way do harm to debug version because of + // introduce additional 4 function call. + memcpy(tess.vertexColors[ n0 ], backEnd.Color2D, 4); + memcpy(tess.vertexColors[ n1 ], backEnd.Color2D, 4); + memcpy(tess.vertexColors[ n2 ], backEnd.Color2D, 4); + memcpy(tess.vertexColors[ n3 ], backEnd.Color2D, 4); + + + tess.xyz[ n0 ][0] = cmd->x; + tess.xyz[ n0 ][1] = cmd->y; + tess.xyz[ n0 ][2] = 0; + tess.xyz[ n1 ][0] = cmd->x + cmd->w; + tess.xyz[ n1 ][1] = cmd->y; + tess.xyz[ n1 ][2] = 0; + tess.xyz[ n2 ][0] = cmd->x + cmd->w; + tess.xyz[ n2 ][1] = cmd->y + cmd->h; + tess.xyz[ n2 ][2] = 0; + tess.xyz[ n3 ][0] = cmd->x; + tess.xyz[ n3 ][1] = cmd->y + cmd->h; + tess.xyz[ n3 ][2] = 0; + + + tess.texCoords[ n0 ][0][0] = cmd->s1; + tess.texCoords[ n0 ][0][1] = cmd->t1; + + tess.texCoords[ n1 ][0][0] = cmd->s2; + tess.texCoords[ n1 ][0][1] = cmd->t1; + + tess.texCoords[ n2 ][0][0] = cmd->s2; + tess.texCoords[ n2 ][0][1] = cmd->t2; + + tess.texCoords[ n3 ][0][0] = cmd->s1; + tess.texCoords[ n3 ][0][1] = cmd->t2; + + tess.numVertexes += 4; + tess.numIndexes += 6; + +} + + + +static void R_PerformanceCounters( void ) +{ + + if (r_speeds->integer == 1) { + ri.Printf (PRINT_ALL, "%i/%i shaders/surfs %i leafs %i verts %i/%i tris\n", + backEnd.pc.c_shaders, backEnd.pc.c_surfaces, tr.pc.c_leafs, backEnd.pc.c_vertexes, + backEnd.pc.c_indexes/3, backEnd.pc.c_totalIndexes/3); + } else if (r_speeds->integer == 2) { + ri.Printf (PRINT_ALL, "(patch) %i sin %i sclip %i sout %i bin %i bclip %i bout\n", + tr.pc.c_sphere_cull_patch_in, tr.pc.c_sphere_cull_patch_clip, tr.pc.c_sphere_cull_patch_out, + tr.pc.c_box_cull_patch_in, tr.pc.c_box_cull_patch_clip, tr.pc.c_box_cull_patch_out ); + ri.Printf (PRINT_ALL, "(md3) %i sin %i sclip %i sout %i bin %i bclip %i bout\n", + tr.pc.c_sphere_cull_md3_in, tr.pc.c_sphere_cull_md3_clip, tr.pc.c_sphere_cull_md3_out, + tr.pc.c_box_cull_md3_in, tr.pc.c_box_cull_md3_clip, tr.pc.c_box_cull_md3_out ); + } else if (r_speeds->integer == 3) { + ri.Printf (PRINT_ALL, "viewcluster: %i\n", tr.viewCluster ); + } else if (r_speeds->integer == 4) { + if ( backEnd.pc.c_dlightVertexes ) { + ri.Printf (PRINT_ALL, "dlight srf:%i culled:%i verts:%i tris:%i\n", + tr.pc.c_dlightSurfaces, tr.pc.c_dlightSurfacesCulled, + backEnd.pc.c_dlightVertexes, backEnd.pc.c_dlightIndexes / 3 ); + } + } + + memset( &tr.pc, 0, sizeof( tr.pc ) ); + memset( &backEnd.pc, 0, sizeof( backEnd.pc ) ); +} + +/* +==================== +This function will be called synchronously if running without +smp extensions, or asynchronously by another thread. +==================== +*/ +void R_IssueRenderCommands( qboolean runPerformanceCounters ) +{ + + if(runPerformanceCounters) + { + R_PerformanceCounters(); + } + + // actually start the commands going + // let it start on the new batch + // RB_ExecuteRenderCommands( cmdList->cmds ); + int t1 = ri.Milliseconds (); + + // add an end-of-list command + *(int *)(BE_Commands.cmds + BE_Commands.used) = RC_END_OF_LIST; + + + const void * data = BE_Commands.cmds; + + + while(1) + { + const int T = *(const int *)data; + switch ( T ) + { + case RC_SET_COLOR: + { + const setColorCommand_t * const cmd = data; + + backEnd.Color2D[0] = cmd->color[0] * 255; + backEnd.Color2D[1] = cmd->color[1] * 255; + backEnd.Color2D[2] = cmd->color[2] * 255; + backEnd.Color2D[3] = cmd->color[3] * 255; + + data += sizeof(setColorCommand_t); + } break; + + case RC_STRETCH_PIC: + { + const stretchPicCommand_t * const cmd = data; + + RB_StretchPic( cmd ); + + data += sizeof(stretchPicCommand_t); + } break; + + case RC_DRAW_SURFS: + { + const drawSurfsCommand_t * const cmd = (const drawSurfsCommand_t *)data; + + // RB_DrawSurfs( cmd ); + // finish any 2D drawing if needed + if ( tess.numIndexes ) { + RB_EndSurface(); + } + + backEnd.refdef = cmd->refdef; + backEnd.viewParms = cmd->viewParms; + + RB_RenderDrawSurfList( cmd->drawSurfs, cmd->numDrawSurfs ); + + data += sizeof(drawSurfsCommand_t); + } break; + + case RC_DRAW_BUFFER: + { + // data = RB_DrawBuffer( data ); + // const drawBufferCommand_t * const cmd = (const drawBufferCommand_t *)data; + vk_resetGeometryBuffer(); + + // VULKAN + vk_begin_frame(); + + data += sizeof(drawBufferCommand_t); + + // begin_frame_called = qtrue; + } break; + + case RC_SWAP_BUFFERS: + { + // data = RB_SwapBuffers( data ); + // finish any 2D drawing if needed + RB_EndSurface(); + + // texture swapping test + if ( r_showImages->integer ) { + RB_ShowImages(tr.images, tr.numImages); + } + + // VULKAN + vk_end_frame(); + + data += sizeof(swapBuffersCommand_t); + } break; + + case RC_SCREENSHOT: + { + const screenshotCommand_t * const cmd = data; + + RB_TakeScreenshot( cmd->width, cmd->height, cmd->fileName, cmd->jpeg); + + data += sizeof(screenshotCommand_t); + } break; + + + case RC_VIDEOFRAME: + { + const videoFrameCommand_t * const cmd = data; + + RB_TakeVideoFrameCmd( cmd ); + + data += sizeof(videoFrameCommand_t); + } break; + + case RC_END_OF_LIST: + // stop rendering on this thread + backEnd.pc.msec = ri.Milliseconds () - t1; + + BE_Commands.used = 0; + return; + } + } +} + + +/* +============= + +FixRenderCommandList +https://zerowing.idsoftware.com/bugzilla/show_bug.cgi?id=493 +Arnout: this is a nasty issue. Shaders can be registered after drawsurfaces are generated +but before the frame is rendered. This will, for the duration of one frame, cause drawsurfaces +to be rendered with bad shaders. To fix this, need to go through all render commands and fix +sortedIndex. +============== +*/ +void FixRenderCommandList( int newShader ) +{ + renderCommandList_t *cmdList = &BE_Commands; + + if( cmdList ) { + const void *curCmd = cmdList->cmds; + + while ( 1 ) { + switch ( *(const int *)curCmd ) { + case RC_SET_COLOR: + { + const setColorCommand_t *sc_cmd = (const setColorCommand_t *)curCmd; + curCmd = (const void *)(sc_cmd + 1); + break; + } + case RC_STRETCH_PIC: + { + const stretchPicCommand_t *sp_cmd = (const stretchPicCommand_t *)curCmd; + curCmd = (const void *)(sp_cmd + 1); + break; + } + case RC_DRAW_SURFS: + { + int i; + drawSurf_t *drawSurf; + shader_t *shader; + int fogNum; + int entityNum; + int dlightMap; + int sortedIndex; + const drawSurfsCommand_t *ds_cmd = (const drawSurfsCommand_t *)curCmd; + + for( i = 0, drawSurf = ds_cmd->drawSurfs; i < ds_cmd->numDrawSurfs; i++, drawSurf++ ) { + R_DecomposeSort( drawSurf->sort, &entityNum, &shader, &fogNum, &dlightMap ); + sortedIndex = (( drawSurf->sort >> QSORT_SHADERNUM_SHIFT ) & (MAX_SHADERS-1)); + if( sortedIndex >= newShader ) { + sortedIndex++; + drawSurf->sort = (sortedIndex << QSORT_SHADERNUM_SHIFT) | entityNum | ( fogNum << QSORT_FOGNUM_SHIFT ) | (int)dlightMap; + } + } + curCmd = (const void *)(ds_cmd + 1); + break; + } + case RC_DRAW_BUFFER: + { + const drawBufferCommand_t *db_cmd = (const drawBufferCommand_t *)curCmd; + curCmd = (const void *)(db_cmd + 1); + break; + } + case RC_SWAP_BUFFERS: + { + const swapBuffersCommand_t *sb_cmd = (const swapBuffersCommand_t *)curCmd; + curCmd = (const void *)(sb_cmd + 1); + break; + } + case RC_END_OF_LIST: + default: + return; + } + } + } +} diff --git a/code/renderer_vulkan/tr_curve.c b/code/renderer_vulkan/tr_curve.c new file mode 100644 index 0000000000..444d7106d2 --- /dev/null +++ b/code/renderer_vulkan/tr_curve.c @@ -0,0 +1,627 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + +#include "tr_local.h" +#include "tr_cvar.h" +#include "ref_import.h" + + +/* + +This file does all of the processing necessary to turn a raw grid of points +read from the map file into a srfGridMesh_t ready for rendering. + +The level of detail solution is direction independent, based only on subdivided +distance from the true curve. + +Only a single entry point: + +srfGridMesh_t *R_SubdividePatchToGrid( int width, int height, + drawVert_t points[MAX_PATCH_SIZE*MAX_PATCH_SIZE] ) { + +*/ + + +/* +============ +LerpDrawVert +============ +*/ +static void LerpDrawVert( drawVert_t *a, drawVert_t *b, drawVert_t *out ) { + out->xyz[0] = 0.5f * (a->xyz[0] + b->xyz[0]); + out->xyz[1] = 0.5f * (a->xyz[1] + b->xyz[1]); + out->xyz[2] = 0.5f * (a->xyz[2] + b->xyz[2]); + + out->st[0] = 0.5f * (a->st[0] + b->st[0]); + out->st[1] = 0.5f * (a->st[1] + b->st[1]); + + out->lightmap[0] = 0.5f * (a->lightmap[0] + b->lightmap[0]); + out->lightmap[1] = 0.5f * (a->lightmap[1] + b->lightmap[1]); + + out->color[0] = (a->color[0] + b->color[0]) >> 1; + out->color[1] = (a->color[1] + b->color[1]) >> 1; + out->color[2] = (a->color[2] + b->color[2]) >> 1; + out->color[3] = (a->color[3] + b->color[3]) >> 1; +} + +/* +============ +Transpose +============ +*/ +static void Transpose( int width, int height, drawVert_t ctrl[MAX_GRID_SIZE][MAX_GRID_SIZE] ) { + int i, j; + drawVert_t temp; + + if ( width > height ) { + for ( i = 0 ; i < height ; i++ ) { + for ( j = i + 1 ; j < width ; j++ ) { + if ( j < height ) { + // swap the value + temp = ctrl[j][i]; + ctrl[j][i] = ctrl[i][j]; + ctrl[i][j] = temp; + } else { + // just copy + ctrl[j][i] = ctrl[i][j]; + } + } + } + } else { + for ( i = 0 ; i < width ; i++ ) { + for ( j = i + 1 ; j < height ; j++ ) { + if ( j < width ) { + // swap the value + temp = ctrl[i][j]; + ctrl[i][j] = ctrl[j][i]; + ctrl[j][i] = temp; + } else { + // just copy + ctrl[i][j] = ctrl[j][i]; + } + } + } + } + +} + + +/* +================= +MakeMeshNormals + +Handles all the complicated wrapping and degenerate cases +================= +*/ +static void MakeMeshNormals( int width, int height, drawVert_t ctrl[MAX_GRID_SIZE][MAX_GRID_SIZE] ) { + int i, j, k, dist; + vec3_t normal; + vec3_t sum; + int count; + vec3_t base; + vec3_t delta; + int x, y; + drawVert_t *dv; + vec3_t around[8], temp; + qboolean good[8]; + qboolean wrapWidth, wrapHeight; + float len; +static int neighbors[8][2] = { + {0,1}, {1,1}, {1,0}, {1,-1}, {0,-1}, {-1,-1}, {-1,0}, {-1,1} + }; + + wrapWidth = qfalse; + for ( i = 0 ; i < height ; i++ ) { + VectorSubtract( ctrl[i][0].xyz, ctrl[i][width-1].xyz, delta ); + len = VectorLengthSquared( delta ); + if ( len > 1.0 ) { + break; + } + } + if ( i == height ) { + wrapWidth = qtrue; + } + + wrapHeight = qfalse; + for ( i = 0 ; i < width ; i++ ) { + VectorSubtract( ctrl[0][i].xyz, ctrl[height-1][i].xyz, delta ); + len = VectorLengthSquared( delta ); + if ( len > 1.0 ) { + break; + } + } + if ( i == width) { + wrapHeight = qtrue; + } + + + for ( i = 0 ; i < width ; i++ ) { + for ( j = 0 ; j < height ; j++ ) { + count = 0; + dv = &ctrl[j][i]; + VectorCopy( dv->xyz, base ); + for ( k = 0 ; k < 8 ; k++ ) { + VectorClear( around[k] ); + good[k] = qfalse; + + for ( dist = 1 ; dist <= 3 ; dist++ ) { + x = i + neighbors[k][0] * dist; + y = j + neighbors[k][1] * dist; + if ( wrapWidth ) { + if ( x < 0 ) { + x = width - 1 + x; + } else if ( x >= width ) { + x = 1 + x - width; + } + } + if ( wrapHeight ) { + if ( y < 0 ) { + y = height - 1 + y; + } else if ( y >= height ) { + y = 1 + y - height; + } + } + + if ( x < 0 || x >= width || y < 0 || y >= height ) { + break; // edge of patch + } + VectorSubtract( ctrl[y][x].xyz, base, temp ); + if ( VectorNormalize2( temp, temp ) == 0 ) { + continue; // degenerate edge, get more dist + } else { + good[k] = qtrue; + VectorCopy( temp, around[k] ); + break; // good edge + } + } + } + + VectorClear( sum ); + for ( k = 0 ; k < 8 ; k++ ) { + if ( !good[k] || !good[(k+1)&7] ) { + continue; // didn't get two points + } + CrossProduct( around[(k+1)&7], around[k], normal ); + if ( VectorNormalize2( normal, normal ) == 0 ) { + continue; + } + VectorAdd( normal, sum, sum ); + count++; + } + if ( count == 0 ) { +//printf("bad normal\n"); + count = 1; + } + VectorNormalize2( sum, dv->normal ); + } + } +} + + +/* +============ +InvertCtrl +============ +*/ +static void InvertCtrl( int width, int height, drawVert_t ctrl[MAX_GRID_SIZE][MAX_GRID_SIZE] ) { + int i, j; + drawVert_t temp; + + for ( i = 0 ; i < height ; i++ ) { + for ( j = 0 ; j < width/2 ; j++ ) { + temp = ctrl[i][j]; + ctrl[i][j] = ctrl[i][width-1-j]; + ctrl[i][width-1-j] = temp; + } + } +} + + +/* +================= +InvertErrorTable +================= +*/ +static void InvertErrorTable( float errorTable[2][MAX_GRID_SIZE], int width, int height ) { + int i; + float copy[2][MAX_GRID_SIZE]; + + memcpy( copy, errorTable, sizeof( copy ) ); + + for ( i = 0 ; i < width ; i++ ) { + errorTable[1][i] = copy[0][i]; //[width-1-i]; + } + + for ( i = 0 ; i < height ; i++ ) { + errorTable[0][i] = copy[1][height-1-i]; + } + +} + +/* +================== +PutPointsOnCurve +================== +*/ +static void PutPointsOnCurve( drawVert_t ctrl[MAX_GRID_SIZE][MAX_GRID_SIZE], + int width, int height ) { + int i, j; + drawVert_t prev, next; + + for ( i = 0 ; i < width ; i++ ) { + for ( j = 1 ; j < height ; j += 2 ) { + LerpDrawVert( &ctrl[j][i], &ctrl[j+1][i], &prev ); + LerpDrawVert( &ctrl[j][i], &ctrl[j-1][i], &next ); + LerpDrawVert( &prev, &next, &ctrl[j][i] ); + } + } + + + for ( j = 0 ; j < height ; j++ ) { + for ( i = 1 ; i < width ; i += 2 ) { + LerpDrawVert( &ctrl[j][i], &ctrl[j][i+1], &prev ); + LerpDrawVert( &ctrl[j][i], &ctrl[j][i-1], &next ); + LerpDrawVert( &prev, &next, &ctrl[j][i] ); + } + } +} + +/* +================= +R_CreateSurfaceGridMesh +================= +*/ +srfGridMesh_t *R_CreateSurfaceGridMesh(int width, int height, + drawVert_t ctrl[MAX_GRID_SIZE][MAX_GRID_SIZE], float errorTable[2][MAX_GRID_SIZE] ) { + int i, j, size; + drawVert_t *vert; + vec3_t tmpVec; + srfGridMesh_t *grid; + + // copy the results out to a grid + size = (width * height - 1) * sizeof( drawVert_t ) + sizeof( *grid ); + +#ifdef PATCH_STITCHING + grid = /*ri.Hunk_Alloc*/ (srfGridMesh_t*) ri.Malloc( size ); + memset(grid, 0, size); + + grid->widthLodError = /*ri.Hunk_Alloc*/ (float*) ri.Malloc( width * 4 ); + memcpy( grid->widthLodError, errorTable[0], width * 4 ); + + grid->heightLodError = /*ri.Hunk_Alloc*/ (float*) ri.Malloc( height * 4 ); + memcpy( grid->heightLodError, errorTable[1], height * 4 ); +#else + grid = ri.Hunk_Alloc( size ); + memset(grid, 0, size); + + grid->widthLodError = ri.Hunk_Alloc( width * 4 ); + memcpy( grid->widthLodError, errorTable[0], width * 4 ); + + grid->heightLodError = ri.Hunk_Alloc( height * 4 ); + memcpy( grid->heightLodError, errorTable[1], height * 4 ); +#endif + + grid->width = width; + grid->height = height; + grid->surfaceType = SF_GRID; + ClearBounds( grid->meshBounds[0], grid->meshBounds[1] ); + for ( i = 0 ; i < width ; i++ ) { + for ( j = 0 ; j < height ; j++ ) { + vert = &grid->verts[j*width+i]; + *vert = ctrl[j][i]; + AddPointToBounds( vert->xyz, grid->meshBounds[0], grid->meshBounds[1] ); + } + } + + // compute local origin and bounds + VectorAdd( grid->meshBounds[0], grid->meshBounds[1], grid->localOrigin ); + VectorScale( grid->localOrigin, 0.5f, grid->localOrigin ); + VectorSubtract( grid->meshBounds[0], grid->localOrigin, tmpVec ); + grid->meshRadius = VectorLength( tmpVec ); + + VectorCopy( grid->localOrigin, grid->lodOrigin ); + grid->lodRadius = grid->meshRadius; + // + return grid; +} + +/* +================= +R_FreeSurfaceGridMesh +================= +*/ +void R_FreeSurfaceGridMesh( srfGridMesh_t *grid ) { + ri.Free(grid->widthLodError); + ri.Free(grid->heightLodError); + ri.Free(grid); +} + +/* +================= +R_SubdividePatchToGrid +================= +*/ +srfGridMesh_t *R_SubdividePatchToGrid( int width, int height, + drawVert_t points[MAX_PATCH_SIZE*MAX_PATCH_SIZE] ) { + int i, j, k, l; + drawVert_t prev, next, mid; + float len, maxLen; + int dir; + int t; + drawVert_t ctrl[MAX_GRID_SIZE][MAX_GRID_SIZE]; + float errorTable[2][MAX_GRID_SIZE]; + + for ( i = 0 ; i < width ; i++ ) { + for ( j = 0 ; j < height ; j++ ) { + ctrl[j][i] = points[j*width+i]; + } + } + + for ( dir = 0 ; dir < 2 ; dir++ ) { + + for ( j = 0 ; j < MAX_GRID_SIZE ; j++ ) { + errorTable[dir][j] = 0; + } + + // horizontal subdivisions + for ( j = 0 ; j + 2 < width ; j += 2 ) { + // check subdivided midpoints against control points + + // FIXME: also check midpoints of adjacent patches against the control points + // this would basically stitch all patches in the same LOD group together. + + maxLen = 0; + for ( i = 0 ; i < height ; i++ ) { + vec3_t midxyz; + vec3_t midxyz2; + vec3_t lineDir; + vec3_t projected; + float d; + + // calculate the point on the curve + for ( l = 0 ; l < 3 ; l++ ) { + midxyz[l] = (ctrl[i][j].xyz[l] + ctrl[i][j+1].xyz[l] * 2 + + ctrl[i][j+2].xyz[l] ) * 0.25f; + } + + // see how far off the line it is + // using dist-from-line will not account for internal + // texture warping, but it gives a lot less polygons than + // dist-from-midpoint + VectorSubtract( midxyz, ctrl[i][j].xyz, midxyz ); + VectorSubtract( ctrl[i][j+2].xyz, ctrl[i][j].xyz, lineDir ); + VectorNormalize( lineDir ); + + d = DotProduct( midxyz, lineDir ); + VectorScale( lineDir, d, projected ); + VectorSubtract( midxyz, projected, midxyz2); + len = VectorLengthSquared( midxyz2 ); // we will do the sqrt later + if ( len > maxLen ) { + maxLen = len; + } + } + + maxLen = sqrt(maxLen); + + // if all the points are on the lines, remove the entire columns + if ( maxLen < 0.1f ) { + errorTable[dir][j+1] = 999; + continue; + } + + // see if we want to insert subdivided columns + if ( width + 2 > MAX_GRID_SIZE ) { + errorTable[dir][j+1] = 1.0f/maxLen; + continue; // can't subdivide any more + } + + if ( maxLen <= r_subdivisions->value ) { + errorTable[dir][j+1] = 1.0f/maxLen; + continue; // didn't need subdivision + } + + errorTable[dir][j+2] = 1.0f/maxLen; + + // insert two columns and replace the peak + width += 2; + for ( i = 0 ; i < height ; i++ ) { + LerpDrawVert( &ctrl[i][j], &ctrl[i][j+1], &prev ); + LerpDrawVert( &ctrl[i][j+1], &ctrl[i][j+2], &next ); + LerpDrawVert( &prev, &next, &mid ); + + for ( k = width - 1 ; k > j + 3 ; k-- ) { + ctrl[i][k] = ctrl[i][k-2]; + } + ctrl[i][j + 1] = prev; + ctrl[i][j + 2] = mid; + ctrl[i][j + 3] = next; + } + + // back up and recheck this set again, it may need more subdivision + j -= 2; + + } + + Transpose( width, height, ctrl ); + t = width; + width = height; + height = t; + } + + + // put all the aproximating points on the curve + PutPointsOnCurve( ctrl, width, height ); + + // cull out any rows or columns that are colinear + for ( i = 1 ; i < width-1 ; i++ ) { + if ( errorTable[0][i] != 999 ) { + continue; + } + for ( j = i+1 ; j < width ; j++ ) { + for ( k = 0 ; k < height ; k++ ) { + ctrl[k][j-1] = ctrl[k][j]; + } + errorTable[0][j-1] = errorTable[0][j]; + } + width--; + } + + for ( i = 1 ; i < height-1 ; i++ ) { + if ( errorTable[1][i] != 999 ) { + continue; + } + for ( j = i+1 ; j < height ; j++ ) { + for ( k = 0 ; k < width ; k++ ) { + ctrl[j-1][k] = ctrl[j][k]; + } + errorTable[1][j-1] = errorTable[1][j]; + } + height--; + } + +#if 1 + // flip for longest tristrips as an optimization + // the results should be visually identical with or + // without this step + if ( height > width ) { + Transpose( width, height, ctrl ); + InvertErrorTable( errorTable, width, height ); + t = width; + width = height; + height = t; + InvertCtrl( width, height, ctrl ); + } +#endif + + // calculate normals + MakeMeshNormals( width, height, ctrl ); + + return R_CreateSurfaceGridMesh( width, height, ctrl, errorTable ); +} + +/* +=============== +R_GridInsertColumn +=============== +*/ +srfGridMesh_t *R_GridInsertColumn( srfGridMesh_t *grid, int column, int row, vec3_t point, float loderror ) { + int i, j; + int width, height, oldwidth; + drawVert_t ctrl[MAX_GRID_SIZE][MAX_GRID_SIZE]; + float errorTable[2][MAX_GRID_SIZE]; + float lodRadius; + vec3_t lodOrigin; + + oldwidth = 0; + width = grid->width + 1; + if (width > MAX_GRID_SIZE) + return NULL; + height = grid->height; + for (i = 0; i < width; i++) { + if (i == column) { + //insert new column + for (j = 0; j < grid->height; j++) { + LerpDrawVert( &grid->verts[j * grid->width + i-1], &grid->verts[j * grid->width + i], &ctrl[j][i] ); + if (j == row) + VectorCopy(point, ctrl[j][i].xyz); + } + errorTable[0][i] = loderror; + continue; + } + errorTable[0][i] = grid->widthLodError[oldwidth]; + for (j = 0; j < grid->height; j++) { + ctrl[j][i] = grid->verts[j * grid->width + oldwidth]; + } + oldwidth++; + } + for (j = 0; j < grid->height; j++) { + errorTable[1][j] = grid->heightLodError[j]; + } + // put all the aproximating points on the curve + //PutPointsOnCurve( ctrl, width, height ); + // calculate normals + MakeMeshNormals( width, height, ctrl ); + + VectorCopy(grid->lodOrigin, lodOrigin); + lodRadius = grid->lodRadius; + // free the old grid + R_FreeSurfaceGridMesh(grid); + // create a new grid + grid = R_CreateSurfaceGridMesh( width, height, ctrl, errorTable ); + grid->lodRadius = lodRadius; + VectorCopy(lodOrigin, grid->lodOrigin); + return grid; +} + +/* +=============== +R_GridInsertRow +=============== +*/ +srfGridMesh_t *R_GridInsertRow( srfGridMesh_t *grid, int row, int column, vec3_t point, float loderror ) { + int i, j; + int width, height, oldheight; + drawVert_t ctrl[MAX_GRID_SIZE][MAX_GRID_SIZE]; + float errorTable[2][MAX_GRID_SIZE]; + float lodRadius; + vec3_t lodOrigin; + + oldheight = 0; + width = grid->width; + height = grid->height + 1; + if (height > MAX_GRID_SIZE) + return NULL; + for (i = 0; i < height; i++) { + if (i == row) { + //insert new row + for (j = 0; j < grid->width; j++) { + LerpDrawVert( &grid->verts[(i-1) * grid->width + j], &grid->verts[i * grid->width + j], &ctrl[i][j] ); + if (j == column) + VectorCopy(point, ctrl[i][j].xyz); + } + errorTable[1][i] = loderror; + continue; + } + errorTable[1][i] = grid->heightLodError[oldheight]; + for (j = 0; j < grid->width; j++) { + ctrl[i][j] = grid->verts[oldheight * grid->width + j]; + } + oldheight++; + } + for (j = 0; j < grid->width; j++) { + errorTable[0][j] = grid->widthLodError[j]; + } + // put all the aproximating points on the curve + //PutPointsOnCurve( ctrl, width, height ); + // calculate normals + MakeMeshNormals( width, height, ctrl ); + + VectorCopy(grid->lodOrigin, lodOrigin); + lodRadius = grid->lodRadius; + // free the old grid + R_FreeSurfaceGridMesh(grid); + // create a new grid + grid = R_CreateSurfaceGridMesh( width, height, ctrl, errorTable ); + grid->lodRadius = lodRadius; + VectorCopy(lodOrigin, grid->lodOrigin); + return grid; +} diff --git a/code/renderer_vulkan/tr_cvar.c b/code/renderer_vulkan/tr_cvar.c new file mode 100644 index 0000000000..98a5c4a132 --- /dev/null +++ b/code/renderer_vulkan/tr_cvar.c @@ -0,0 +1,173 @@ + +#include "tr_cvar.h" +#include "ref_import.h" + +cvar_t *r_railWidth; +cvar_t *r_railCoreWidth; +cvar_t *r_railSegmentLength; + +cvar_t *r_verbose; + +cvar_t *r_znear; + + +cvar_t *r_inGameVideo; +cvar_t *r_dynamiclight; + +cvar_t *r_norefresh; +cvar_t *r_drawentities; +cvar_t *r_drawworld; +cvar_t *r_speeds; +cvar_t *r_fullbright; +cvar_t *r_novis; +cvar_t *r_nocull; +cvar_t *r_facePlaneCull; +cvar_t *r_showcluster; +cvar_t *r_nocurves; + + + +cvar_t* r_fullscreen; +// display refresh rate +cvar_t* r_displayRefresh; + +cvar_t *r_lightmap; +cvar_t *r_vertexLight; +cvar_t *r_uiFullScreen; +cvar_t *r_shadows; +cvar_t *r_flares; +cvar_t *r_singleShader; +cvar_t *r_colorMipLevels; +cvar_t *r_picmip; +cvar_t *r_showtris; +cvar_t *r_showsky; +cvar_t *r_shownormals; +cvar_t *r_offsetFactor; +cvar_t *r_offsetUnits; +cvar_t *r_gamma; +cvar_t *r_intensity; +cvar_t *r_lockpvs; +cvar_t *r_noportals; +cvar_t *r_portalOnly; + +cvar_t *r_subdivisions; +cvar_t *r_lodCurveError; + +// r_overbrightBits->integer, but set to 0 if no hw gamma +// cvar_t *r_overBrightBits; +cvar_t *r_mapOverBrightBits; + +cvar_t *r_debugSurface; +cvar_t *r_simpleMipMaps; + +cvar_t *r_showImages; + +cvar_t *r_ambientScale; +cvar_t *r_directedScale; +cvar_t *r_debugLight; +cvar_t *r_debugSort; +cvar_t *r_printShaders; +cvar_t *r_saveFontData; + +cvar_t *r_maxpolys; +cvar_t *r_maxpolyverts; + +cvar_t* r_allowResize; // make window resizable +cvar_t* r_mode; + +cvar_t* r_loadImgAPI; + +cvar_t* r_swapInterval; + +void R_Register( void ) +{ + // + // latched and archived variables + // + r_picmip = ri.Cvar_Get ("r_picmip", "1", CVAR_ARCHIVE | CVAR_LATCH ); + ri.Cvar_CheckRange( r_picmip, 0, 8, qtrue ); + + r_simpleMipMaps = ri.Cvar_Get( "r_simpleMipMaps", "1", CVAR_ARCHIVE | CVAR_LATCH ); + r_colorMipLevels = ri.Cvar_Get ("r_colorMipLevels", "0", CVAR_LATCH ); + + // r_overBrightBits = ri.Cvar_Get ("r_overBrightBits", "0", CVAR_ARCHIVE | CVAR_LATCH ); + r_vertexLight = ri.Cvar_Get( "r_vertexLight", "0", CVAR_ARCHIVE | CVAR_LATCH ); + r_uiFullScreen = ri.Cvar_Get( "r_uifullscreen", "0", 0); + r_subdivisions = ri.Cvar_Get ("r_subdivisions", "4", CVAR_ARCHIVE | CVAR_LATCH); + + // + // temporary latched variables that can only change over a restart + // + r_fullbright = ri.Cvar_Get ("r_fullbright", "0", CVAR_LATCH|CVAR_CHEAT ); + r_mapOverBrightBits = ri.Cvar_Get ("r_mapOverBrightBits", "1", CVAR_LATCH ); + r_intensity = ri.Cvar_Get ("r_intensity", "1.5", CVAR_LATCH | CVAR_ARCHIVE ); + r_singleShader = ri.Cvar_Get ("r_singleShader", "0", CVAR_CHEAT | CVAR_LATCH ); + + // + // archived variables that can change at any time + // + r_lodCurveError = ri.Cvar_Get( "r_lodCurveError", "250", CVAR_ARCHIVE|CVAR_CHEAT ); + r_flares = ri.Cvar_Get ("r_flares", "0", CVAR_ARCHIVE ); + r_znear = ri.Cvar_Get( "r_znear", "4", CVAR_CHEAT ); + ri.Cvar_CheckRange( r_znear, 0.001f, 200, qtrue ); + + r_inGameVideo = ri.Cvar_Get( "r_inGameVideo", "1", CVAR_ARCHIVE ); + r_dynamiclight = ri.Cvar_Get( "r_dynamiclight", "1", CVAR_ARCHIVE ); + r_gamma = ri.Cvar_Get( "r_gamma", "1", CVAR_ARCHIVE | CVAR_LATCH ); + r_facePlaneCull = ri.Cvar_Get ("r_facePlaneCull", "1", CVAR_ARCHIVE ); + + r_railWidth = ri.Cvar_Get( "r_railWidth", "16", CVAR_ARCHIVE ); + r_railCoreWidth = ri.Cvar_Get( "r_railCoreWidth", "6", CVAR_ARCHIVE ); + r_railSegmentLength = ri.Cvar_Get( "r_railSegmentLength", "32", CVAR_ARCHIVE ); + + r_ambientScale = ri.Cvar_Get( "r_ambientScale", "0.6", CVAR_CHEAT ); + r_directedScale = ri.Cvar_Get( "r_directedScale", "1", CVAR_CHEAT ); + + // + // temporary variables that can change at any time + // + r_showImages = ri.Cvar_Get( "r_showImages", "0", CVAR_TEMP ); + + r_debugLight = ri.Cvar_Get( "r_debuglight", "0", CVAR_TEMP ); + r_debugSort = ri.Cvar_Get( "r_debugSort", "0", CVAR_CHEAT ); + r_printShaders = ri.Cvar_Get( "r_printShaders", "0", 0 ); + r_saveFontData = ri.Cvar_Get( "r_saveFontData", "0", 0 ); + + r_nocurves = ri.Cvar_Get ("r_nocurves", "0", CVAR_CHEAT ); + r_drawworld = ri.Cvar_Get ("r_drawworld", "1", CVAR_CHEAT ); + r_lightmap = ri.Cvar_Get ("r_lightmap", "0", 0 ); + r_portalOnly = ri.Cvar_Get ("r_portalOnly", "0", CVAR_TEMP ); + + + r_norefresh = ri.Cvar_Get ("r_norefresh", "0", CVAR_CHEAT); + r_drawentities = ri.Cvar_Get ("r_drawentities", "1", CVAR_CHEAT ); + r_nocull = ri.Cvar_Get ("r_nocull", "0", CVAR_CHEAT); + r_novis = ri.Cvar_Get ("r_novis", "0", CVAR_CHEAT); + r_showcluster = ri.Cvar_Get ("r_showcluster", "0", CVAR_CHEAT); + r_speeds = ri.Cvar_Get ("r_speeds", "0", CVAR_CHEAT); + r_verbose = ri.Cvar_Get( "r_verbose", "0", CVAR_CHEAT ); + r_debugSurface = ri.Cvar_Get ("r_debugSurface", "0", CVAR_TEMP); + r_showtris = ri.Cvar_Get ("r_showtris", "0", CVAR_TEMP); + r_showsky = ri.Cvar_Get ("r_showsky", "0", CVAR_TEMP); + r_shownormals = ri.Cvar_Get ("r_shownormals", "0", CVAR_TEMP); + r_offsetFactor = ri.Cvar_Get( "r_offsetfactor", "-1", CVAR_CHEAT ); + r_offsetUnits = ri.Cvar_Get( "r_offsetunits", "-2", CVAR_CHEAT ); + r_lockpvs = ri.Cvar_Get ("r_lockpvs", "0", CVAR_CHEAT); + r_noportals = ri.Cvar_Get ("r_noportals", "0", CVAR_CHEAT); + r_shadows = ri.Cvar_Get( "cg_shadows", "1", 0 ); + + r_maxpolys = ri.Cvar_Get( "r_maxpolys", va("%d", 600), 0); + r_maxpolyverts = ri.Cvar_Get( "r_maxpolyverts", va("%d", 3000), 0); + + r_fullscreen = ri.Cvar_Get( "r_fullscreen", "1", CVAR_ARCHIVE | CVAR_LATCH ); + r_displayRefresh = ri.Cvar_Get( "r_displayRefresh", "60", CVAR_LATCH ); + ri.Cvar_CheckRange( r_displayRefresh, 0, 200, qtrue ); + + r_allowResize = ri.Cvar_Get( "r_allowResize", "0", CVAR_ARCHIVE | CVAR_LATCH ); + + r_mode = ri.Cvar_Get( "r_mode", "-2", CVAR_ARCHIVE | CVAR_LATCH ); + + r_loadImgAPI = ri.Cvar_Get( "r_loadImgAPI", "0", CVAR_ARCHIVE | CVAR_LATCH ); + + r_swapInterval = ri.Cvar_Get( "r_swapInterval", "0", CVAR_ARCHIVE | CVAR_LATCH ); +} diff --git a/code/renderer_vulkan/tr_cvar.h b/code/renderer_vulkan/tr_cvar.h new file mode 100644 index 0000000000..f5117c9dc8 --- /dev/null +++ b/code/renderer_vulkan/tr_cvar.h @@ -0,0 +1,99 @@ +#ifndef TR_CVAR_H_ +#define TR_CVAR_H_ + +#include "../qcommon/q_shared.h" + + +extern cvar_t *r_railWidth; +extern cvar_t *r_railCoreWidth; +extern cvar_t *r_railSegmentLength; + +extern cvar_t *r_verbose; // used for verbose debug spew + +extern cvar_t *r_znear; // near Z clip plane + + +extern cvar_t *r_depthbits; // number of desired depth bits + + + + +extern cvar_t *r_inGameVideo; // controls whether in game video should be draw +extern cvar_t *r_dynamiclight; // dynamic lights enabled/disabled + +extern cvar_t *r_norefresh; // bypasses the ref rendering +extern cvar_t *r_drawentities; // disable/enable entity rendering +extern cvar_t *r_drawworld; // disable/enable world rendering +extern cvar_t *r_speeds; // various levels of information display + +extern cvar_t *r_novis; // disable/enable usage of PVS +extern cvar_t *r_nocull; +extern cvar_t *r_facePlaneCull; // enables culling of planar surfaces with back side test +extern cvar_t *r_nocurves; +extern cvar_t *r_showcluster; + +extern cvar_t *r_mode; // video mode +extern cvar_t *r_fullscreen; +extern cvar_t *r_gamma; + + +extern cvar_t *r_singleShader; // make most world faces use default shader +extern cvar_t *r_colorMipLevels; // development aid to see texture mip usage +extern cvar_t *r_picmip; // controls picmip values +extern cvar_t *r_offsetFactor; +extern cvar_t *r_offsetUnits; + +extern cvar_t *r_fullbright; // avoid lightmap pass +extern cvar_t *r_lightmap; // render lightmaps only +extern cvar_t *r_vertexLight; // vertex lighting mode for better performance +extern cvar_t *r_uiFullScreen; // ui is running fullscreen + +extern cvar_t *r_showtris; // enables wireframe rendering of the world +extern cvar_t *r_showsky; // forces sky in front of all surfaces +extern cvar_t *r_shownormals; // draws wireframe normals +extern cvar_t *r_clear; // force screen clear every frame + +extern cvar_t *r_shadows; // controls shadows: 0 = none, 1 = blur, 2 = stencil, 3 = black planar projection + +extern cvar_t *r_intensity; + +extern cvar_t *r_lockpvs; +extern cvar_t *r_noportals; +extern cvar_t *r_portalOnly; + +extern cvar_t *r_subdivisions; +extern cvar_t *r_lodCurveError; + +//extern cvar_t *r_overBrightBits; +extern cvar_t *r_mapOverBrightBits; + +extern cvar_t *r_debugSurface; +extern cvar_t *r_simpleMipMaps; + +extern cvar_t *r_showImages; +extern cvar_t *r_debugSort; + +extern cvar_t *r_printShaders; +extern cvar_t *r_saveFontData; + + +extern cvar_t *r_maxpolys; +extern cvar_t *r_maxpolyverts; + + +extern cvar_t *r_ambientScale; +extern cvar_t *r_directedScale; +extern cvar_t *r_debugLight; + +extern cvar_t* r_allowResize; // make window resizable +extern cvar_t* r_mode; +extern cvar_t* r_fullscreen; +extern cvar_t* r_displayRefresh; +extern cvar_t* r_loadImgAPI; +extern cvar_t* r_swapInterval; + +void R_Register( void ); + + + +#endif diff --git a/code/renderer_vulkan/tr_flares.c b/code/renderer_vulkan/tr_flares.c new file mode 100644 index 0000000000..54994a4d33 --- /dev/null +++ b/code/renderer_vulkan/tr_flares.c @@ -0,0 +1,546 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_flares.c +#include "ref_import.h" + +#include "tr_local.h" + +extern cvar_t* r_flares; + +void RB_SurfaceFlare(srfFlare_t *surf) +{ + if (r_flares->integer) + { + ri.Printf(PRINT_DEVELOPER, "I'm so weak, don't know how to implement this.\n"); + // RB_AddFlare(surf, tess.fogNum, surf->origin, surf->color, surf->normal); + } +} + +/* +============================================================================= + +LIGHT FLARES + +A light flare is an effect that takes place inside the eye when bright light +sources are visible. The size of the flare relative to the screen is nearly +constant, irrespective of distance, but the intensity should be proportional to the +projected area of the light source. + +A surface that has been flagged as having a light flare will calculate the depth +buffer value that its midpoint should have when the surface is added. + +After all opaque surfaces have been rendered, the depth buffer is read back for +each flare in view. If the point has not been obscured by a closer surface, the +flare should be drawn. + +Surfaces that have a repeated texture should never be flagged as flaring, because +there will only be a single flare added at the midpoint of the polygon. + +To prevent abrupt popping, the intensity of the flare is interpolated up and +down as it changes visibility. This involves scene to scene state, unlike almost +all other aspects of the renderer, and is complicated by the fact that a single +frame may have multiple scenes. + +RB_RenderFlares() will be called once per view (twice in a mirrored scene, potentially +up to five or more times in a frame with 3D status bar icons). + +============================================================================= +*/ + + +// flare states maintain visibility over multiple frames for fading +// layers: view, mirror, menu +/* + +typedef struct flare_s { + struct flare_s *next; // for active chain + + int addedFrame; + + qboolean inPortal; // true if in a portal view of the scene + int frameSceneNum; + void *surface; + int fogNum; + + int fadeTime; + + qboolean visible; // state of last test + float drawIntensity; // may be non 0 even if !visible due to fading + + int windowX, windowY; + float eyeZ; + + vec3_t origin; + vec3_t color; +} flare_t; + +#define MAX_FLARES 256 + +flare_t r_flareStructs[MAX_FLARES]; +flare_t *r_activeFlares, *r_inactiveFlares; + +int flareCoeff; + + +================== +R_SetFlareCoeff +================== + +static void R_SetFlareCoeff( void ) { + + if(r_flareCoeff->value == 0.0f) + flareCoeff = atof(FLARE_STDCOEFF); + else + flareCoeff = r_flareCoeff->value; +} +*/ +/* +================== +R_ClearFlares +================== + +void R_ClearFlares( void ) { + int i; + + memset( r_flareStructs, 0, sizeof( r_flareStructs ) ); + r_activeFlares = NULL; + r_inactiveFlares = NULL; + + for ( i = 0 ; i < MAX_FLARES ; i++ ) { + r_flareStructs[i].next = r_inactiveFlares; + r_inactiveFlares = &r_flareStructs[i]; + } + + R_SetFlareCoeff(); +} +*/ + +/* +================== +RB_AddFlare + +This is called at surface tesselation time +================== + +void RB_AddFlare( void *surface, int fogNum, vec3_t point, vec3_t color, vec3_t normal ) { + int i; + flare_t *f; + vec3_t local; + float d = 1; + vec4_t eye, clip, normalized, window; + + backEnd.pc.c_flareAdds++; + + if(normal && (normal[0] || normal[1] || normal[2])) + { + VectorSubtract( backEnd.viewParms.or.origin, point, local ); + FastNormalize1f(local); + d = DotProduct(local, normal); + + // If the viewer is behind the flare don't add it. + if(d < 0) + return; + } + + // if the point is off the screen, don't bother adding it + // calculate screen coordinates and depth + R_TransformModelToClip( point, backEnd.or.modelMatrix, + backEnd.viewParms.projectionMatrix, eye, clip ); + + // check to see if the point is completely off screen + for ( i = 0 ; i < 3 ; i++ ) { + if ( clip[i] >= clip[3] || clip[i] <= -clip[3] ) { + return; + } + } + + R_TransformClipToWindow( clip, &backEnd.viewParms, normalized, window ); + + if ( window[0] < 0 || window[0] >= backEnd.viewParms.viewportWidth + || window[1] < 0 || window[1] >= backEnd.viewParms.viewportHeight ) { + return; // shouldn't happen, since we check the clip[] above, except for FP rounding + } + + // see if a flare with a matching surface, scene, and view exists + for ( f = r_activeFlares ; f ; f = f->next ) { + if ( f->surface == surface && f->frameSceneNum == backEnd.viewParms.frameSceneNum + && f->inPortal == backEnd.viewParms.isPortal ) { + break; + } + } + + // allocate a new one + if (!f ) { + if ( !r_inactiveFlares ) { + // the list is completely full + return; + } + f = r_inactiveFlares; + r_inactiveFlares = r_inactiveFlares->next; + f->next = r_activeFlares; + r_activeFlares = f; + + f->surface = surface; + f->frameSceneNum = backEnd.viewParms.frameSceneNum; + f->inPortal = backEnd.viewParms.isPortal; + f->addedFrame = -1; + } + + if ( f->addedFrame != backEnd.viewParms.frameCount - 1 ) { + f->visible = qfalse; + f->fadeTime = backEnd.refdef.time - 2000; + } + + f->addedFrame = backEnd.viewParms.frameCount; + f->fogNum = fogNum; + + VectorCopy(point, f->origin); + VectorCopy( color, f->color ); + + // fade the intensity of the flare down as the + // light surface turns away from the viewer + VectorScale( f->color, d, f->color ); + + // save info needed to test + f->windowX = backEnd.viewParms.viewportX + window[0]; + f->windowY = backEnd.viewParms.viewportY + window[1]; + + f->eyeZ = eye[2]; +} +*/ +/* +================== +RB_AddDlightFlares +================== + +void RB_AddDlightFlares( void ) { + dlight_t *l; + int i, j, k; + fog_t *fog = NULL; + + if ( !r_flares->integer ) { + return; + } + + l = backEnd.refdef.dlights; + + if(tr.world) + fog = tr.world->fogs; + + for (i=0 ; inumfogs ; j++ ) { + fog = &tr.world->fogs[j]; + for ( k = 0 ; k < 3 ; k++ ) { + if ( l->origin[k] < fog->bounds[0][k] || l->origin[k] > fog->bounds[1][k] ) { + break; + } + } + if ( k == 3 ) { + break; + } + } + if ( j == tr.world->numfogs ) { + j = 0; + } + } + else + j = 0; + + RB_AddFlare( (void *)l, j, l->origin, l->color, NULL ); + } +} +*/ +/* +=============================================================================== + +FLARE BACK END + +=============================================================================== +*/ + +/* +================== +RB_TestFlare +================== + +void RB_TestFlare( flare_t *f ) { + float depth; + qboolean visible; + float fade; + float screenZ; + + backEnd.pc.c_flareTests++; + + // doing a readpixels is as good as doing a glFinish(), so + // don't bother with another sync + glState.finishCalled = qfalse; + + // read back the z buffer contents + qglReadPixels( f->windowX, f->windowY, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &depth ); + + screenZ = backEnd.viewParms.projectionMatrix[14] / + ( ( 2*depth - 1 ) * backEnd.viewParms.projectionMatrix[11] - backEnd.viewParms.projectionMatrix[10] ); + + visible = ( -f->eyeZ - -screenZ ) < 24; + + if ( visible ) { + if ( !f->visible ) { + f->visible = qtrue; + f->fadeTime = backEnd.refdef.time - 1; + } + fade = ( ( backEnd.refdef.time - f->fadeTime ) /1000.0f ) * r_flareFade->value; + } else { + if ( f->visible ) { + f->visible = qfalse; + f->fadeTime = backEnd.refdef.time - 1; + } + fade = 1.0f - ( ( backEnd.refdef.time - f->fadeTime ) / 1000.0f ) * r_flareFade->value; + } + + if ( fade < 0 ) { + fade = 0; + } + if ( fade > 1 ) { + fade = 1; + } + + f->drawIntensity = fade; +} +*/ + +/* + * This is an alternative to intensity scaling. It changes the size of the flare on screen instead + * with growing distance. See in the description at the top why this is not the way to go. + // size will change ~ 1/r. + size = backEnd.viewParms.viewportWidth * (r_flareSize->value / (distance * -2.0f)); +*/ + +/* + * As flare sizes stay nearly constant with increasing distance we must decrease the intensity + * to achieve a reasonable visual result. The intensity is ~ (size^2 / distance^2) which can be + * got by considering the ratio of + * (flaresurface on screen) : (Surface of sphere defined by flare origin and distance from flare) + * An important requirement is: + * intensity <= 1 for all distances. + * + * The formula used here to compute the intensity is as follows: + * intensity = flareCoeff * size^2 / (distance + size*sqrt(flareCoeff))^2 + * As you can see, the intensity will have a max. of 1 when the distance is 0. + * The coefficient flareCoeff will determine the falloff speed with increasing distance. + +void RB_RenderFlare( flare_t *f ) { + float size; + vec3_t color; + int iColor[3]; + float distance, intensity, factor; + byte fogFactors[3] = {255, 255, 255}; + + backEnd.pc.c_flareRenders++; + + // We don't want too big values anyways when dividing by distance. + if(f->eyeZ > -1.0f) + distance = 1.0f; + else + distance = -f->eyeZ; + + // calculate the flare size.. + size = backEnd.viewParms.viewportWidth * ( r_flareSize->value/640.0f + 8 / distance ); + + + + factor = distance + size * sqrt(flareCoeff); + + intensity = flareCoeff * size * size / (factor * factor); + + VectorScale(f->color, f->drawIntensity * intensity, color); + + // Calculations for fogging + if(tr.world && f->fogNum > 0 && f->fogNum < tr.world->numfogs) + { + tess.numVertexes = 1; + VectorCopy(f->origin, tess.xyz[0]); + tess.fogNum = f->fogNum; + + RB_CalcModulateColorsByFog(fogFactors); + + // We don't need to render the flare if colors are 0 anyways. + if(!(fogFactors[0] || fogFactors[1] || fogFactors[2])) + return; + } + + iColor[0] = color[0] * fogFactors[0]; + iColor[1] = color[1] * fogFactors[1]; + iColor[2] = color[2] * fogFactors[2]; + + RB_BeginSurface( tr.flareShader, f->fogNum ); + + // FIXME: use quadstamp? + tess.xyz[tess.numVertexes][0] = f->windowX - size; + tess.xyz[tess.numVertexes][1] = f->windowY - size; + tess.texCoords[tess.numVertexes][0][0] = 0; + tess.texCoords[tess.numVertexes][0][1] = 0; + tess.vertexColors[tess.numVertexes][0] = iColor[0]; + tess.vertexColors[tess.numVertexes][1] = iColor[1]; + tess.vertexColors[tess.numVertexes][2] = iColor[2]; + tess.vertexColors[tess.numVertexes][3] = 255; + tess.numVertexes++; + + tess.xyz[tess.numVertexes][0] = f->windowX - size; + tess.xyz[tess.numVertexes][1] = f->windowY + size; + tess.texCoords[tess.numVertexes][0][0] = 0; + tess.texCoords[tess.numVertexes][0][1] = 1; + tess.vertexColors[tess.numVertexes][0] = iColor[0]; + tess.vertexColors[tess.numVertexes][1] = iColor[1]; + tess.vertexColors[tess.numVertexes][2] = iColor[2]; + tess.vertexColors[tess.numVertexes][3] = 255; + tess.numVertexes++; + + tess.xyz[tess.numVertexes][0] = f->windowX + size; + tess.xyz[tess.numVertexes][1] = f->windowY + size; + tess.texCoords[tess.numVertexes][0][0] = 1; + tess.texCoords[tess.numVertexes][0][1] = 1; + tess.vertexColors[tess.numVertexes][0] = iColor[0]; + tess.vertexColors[tess.numVertexes][1] = iColor[1]; + tess.vertexColors[tess.numVertexes][2] = iColor[2]; + tess.vertexColors[tess.numVertexes][3] = 255; + tess.numVertexes++; + + tess.xyz[tess.numVertexes][0] = f->windowX + size; + tess.xyz[tess.numVertexes][1] = f->windowY - size; + tess.texCoords[tess.numVertexes][0][0] = 1; + tess.texCoords[tess.numVertexes][0][1] = 0; + tess.vertexColors[tess.numVertexes][0] = iColor[0]; + tess.vertexColors[tess.numVertexes][1] = iColor[1]; + tess.vertexColors[tess.numVertexes][2] = iColor[2]; + tess.vertexColors[tess.numVertexes][3] = 255; + tess.numVertexes++; + + tess.indexes[tess.numIndexes++] = 0; + tess.indexes[tess.numIndexes++] = 1; + tess.indexes[tess.numIndexes++] = 2; + tess.indexes[tess.numIndexes++] = 0; + tess.indexes[tess.numIndexes++] = 2; + tess.indexes[tess.numIndexes++] = 3; + + RB_EndSurface(); +} +*/ +/* +================== +RB_RenderFlares + +Because flares are simulating an occular effect, they should be drawn after +everything (all views) in the entire frame has been drawn. + +Because of the way portals use the depth buffer to mark off areas, the +needed information would be lost after each view, so we are forced to draw +flares after each view. + +The resulting artifact is that flares in mirrors or portals don't dim properly +when occluded by something in the main view, and portal flares that should +extend past the portal edge will be overwritten. +================== + +void RB_RenderFlares (void) +{ + flare_t *f; + flare_t **prev; + qboolean draw; + + if(r_flareCoeff->modified) + { + R_SetFlareCoeff(); + r_flareCoeff->modified = qfalse; + } + + // Reset currentEntity to world so that any previously referenced entities + // don't have influence on the rendering of these flares (i.e. RF_ renderer flags). + backEnd.currentEntity = &tr.worldEntity; + backEnd.or = backEnd.viewParms.world; + +// RB_AddDlightFlares(); + + // perform z buffer readback on each flare in this view + draw = qfalse; + prev = &r_activeFlares; + while ( ( f = *prev ) != NULL ) { + // throw out any flares that weren't added last frame + if ( f->addedFrame < backEnd.viewParms.frameCount - 1 ) { + *prev = f->next; + f->next = r_inactiveFlares; + r_inactiveFlares = f; + continue; + } + + // don't draw any here that aren't from this scene / portal + f->drawIntensity = 0; + if ( f->frameSceneNum == backEnd.viewParms.frameSceneNum + && f->inPortal == backEnd.viewParms.isPortal ) { + RB_TestFlare( f ); + if ( f->drawIntensity ) { + draw = qtrue; + } else { + // this flare has completely faded out, so remove it from the chain + *prev = f->next; + f->next = r_inactiveFlares; + r_inactiveFlares = f; + continue; + } + } + + prev = &f->next; + } + + if ( !draw ) { + return; // none visible + } + + if ( backEnd.viewParms.isPortal ) { + qglDisable (GL_CLIP_PLANE0); + } + + qglPushMatrix(); + qglLoadIdentity(); + qglMatrixMode( GL_PROJECTION ); + qglPushMatrix(); + qglLoadIdentity(); + qglOrtho( backEnd.viewParms.viewportX, backEnd.viewParms.viewportX + backEnd.viewParms.viewportWidth, + backEnd.viewParms.viewportY, backEnd.viewParms.viewportY + backEnd.viewParms.viewportHeight, + -99999, 99999 ); + + for ( f = r_activeFlares ; f ; f = f->next ) { + if ( f->frameSceneNum == backEnd.viewParms.frameSceneNum + && f->inPortal == backEnd.viewParms.isPortal + && f->drawIntensity ) { + RB_RenderFlare( f ); + } + } + + qglPopMatrix(); + qglMatrixMode( GL_MODELVIEW ); + qglPopMatrix(); +} +*/ diff --git a/code/renderer_vulkan/tr_flares.h b/code/renderer_vulkan/tr_flares.h new file mode 100644 index 0000000000..56feced97e --- /dev/null +++ b/code/renderer_vulkan/tr_flares.h @@ -0,0 +1,8 @@ +#ifndef TR_FLARE_H_ +#define TR_FLARE_H_ + + +void RB_SurfaceFlare(srfFlare_t *surf); + + +#endif diff --git a/code/renderer_vulkan/tr_fog.c b/code/renderer_vulkan/tr_fog.c new file mode 100644 index 0000000000..4a6d53179f --- /dev/null +++ b/code/renderer_vulkan/tr_fog.c @@ -0,0 +1,53 @@ +#include "tr_fog.h" +#include +#include + +#define FOG_TABLE_SIZE 256 + +static float FogTable[FOG_TABLE_SIZE]; + + +void R_InitFogTable( void ) +{ + float exp = 0.5; + + unsigned int i; + + for ( i = 0 ; i < FOG_TABLE_SIZE ; i++ ) + { + FogTable[i] = pow ( (float)i/(FOG_TABLE_SIZE-1), exp ); + } +} + + +/* +================ +Returns a 0.0 to 1.0 fog density value +This is called for each texel of the fog texture on startup +and for each vertex of transparent shaders in fog dynamically +================ +*/ +float R_FogFactor( float s, float t ) +{ + s -= 1.0/512; + if ( s < 0 ) { + return 0; + } + if ( t < 1.0/32 ) { + return 0; + } + if ( t < 31.0/32 ) { + s *= (t - 1.0f/32.0f) / (30.0f/32.0f); + } + + // we need to leave a lot of clamp range + s *= 8; + + if ( s > 1.0 ) { + s = 1.0; + } + + float d = FogTable[ (int)(s * (FOG_TABLE_SIZE-1)) ]; + + return d; +} diff --git a/code/renderer_vulkan/tr_fog.h b/code/renderer_vulkan/tr_fog.h new file mode 100644 index 0000000000..ae8a3f6abe --- /dev/null +++ b/code/renderer_vulkan/tr_fog.h @@ -0,0 +1,9 @@ +#ifndef TR_FOG_H_ +#define TR_FOG_H_ + + +void R_InitFogTable( void ); +float R_FogFactor( float s, float t ); + + +#endif diff --git a/code/renderer_vulkan/tr_fonts.c b/code/renderer_vulkan/tr_fonts.c new file mode 100644 index 0000000000..82e3f153a1 --- /dev/null +++ b/code/renderer_vulkan/tr_fonts.c @@ -0,0 +1,577 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_font.c +// +// +// The font system uses FreeType 2.x to render TrueType fonts for use within the game. +// As of this writing ( Nov, 2000 ) Team Arena uses these fonts for all of the ui and +// about 90% of the cgame presentation. A few areas of the CGAME were left uses the old +// fonts since the code is shared with standard Q3A. +// +// If you include this font rendering code in a commercial product you MUST include the +// following somewhere with your product, see www.freetype.org for specifics or changes. +// The Freetype code also uses some hinting techniques that MIGHT infringe on patents +// held by apple so be aware of that also. +// +// As of Q3A 1.25+ and Team Arena, we are shipping the game with the font rendering code +// disabled. This removes any potential patent issues and it keeps us from having to +// distribute an actual TrueTrype font which is 1. expensive to do and 2. seems to require +// an act of god to accomplish. +// +// What we did was pre-render the fonts using FreeType ( which is why we leave the FreeType +// credit in the credits ) and then saved off the glyph data and then hand touched up the +// font bitmaps so they scale a bit better in GL. +// +// There are limitations in the way fonts are saved and reloaded in that it is based on +// point size and not name. So if you pre-render Helvetica in 18 point and Impact in 18 point +// you will end up with a single 18 point data file and image set. Typically you will want to +// choose 3 sizes to best approximate the scaling you will be doing in the ui scripting system +// +// In the UI Scripting code, a scale of 1.0 is equal to a 48 point font. In Team Arena, we +// use three or four scales, most of them exactly equaling the specific rendered size. We +// rendered three sizes in Team Arena, 12, 16, and 20. +// +// To generate new font data you need to go through the following steps. +// 1. delete the fontImage_x_xx.tga files and fontImage_xx.dat files from the fonts path. +// 2. in a ui script, specificy a font, smallFont, and bigFont keyword with font name and +// point size. the original TrueType fonts must exist in fonts at this point. +// 3. run the game, you should see things normally. +// 4. Exit the game and there will be three dat files and at least three tga files. The +// tga's are in 256x256 pages so if it takes three images to render a 24 point font you +// will end up with fontImage_0_24.tga through fontImage_2_24.tga +// 5. In future runs of the game, the system looks for these images and data files when a s +// specific point sized font is rendered and loads them for use. +// 6. Because of the original beta nature of the FreeType code you will probably want to hand +// touch the font bitmaps. +// +// Currently a define in the project turns on or off the FreeType code which is currently +// defined out. To pre-render new fonts you need enable the define ( BUILD_FREETYPE ) and +// uncheck the exclude from build check box in the FreeType2 area of the Renderer project. + + +#include "tr_local.h" +#include "ref_import.h" +#include "tr_cvar.h" + + + +#ifdef BUILD_FREETYPE +#include +#include FT_FREETYPE_H +#include FT_ERRORS_H +#include FT_SYSTEM_H +#include FT_IMAGE_H +#include FT_OUTLINE_H + +#define _FLOOR(x) ((x) & -64) +#define _CEIL(x) (((x)+63) & -64) +#define _TRUNC(x) ((x) >> 6) + +FT_Library ftLibrary = NULL; +#endif + + +qhandle_t R_RegisterShaderFromImage(const char *name, int lightmapIndex, image_t *image, qboolean mipRawImage); +qhandle_t RE_RegisterShaderNoMip( const char *name ); + +#define MAX_FONTS 6 +static int registeredFontCount = 0; +static fontInfo_t registeredFont[MAX_FONTS]; + + +#ifdef BUILD_FREETYPE +void R_GetGlyphInfo(FT_GlyphSlot glyph, int *left, int *right, int *width, int *top, int *bottom, int *height, int *pitch) { + *left = _FLOOR( glyph->metrics.horiBearingX ); + *right = _CEIL( glyph->metrics.horiBearingX + glyph->metrics.width ); + *width = _TRUNC(*right - *left); + + *top = _CEIL( glyph->metrics.horiBearingY ); + *bottom = _FLOOR( glyph->metrics.horiBearingY - glyph->metrics.height ); + *height = _TRUNC( *top - *bottom ); + *pitch = ( qtrue ? (*width+3) & -4 : (*width+7) >> 3 ); +} + + +FT_Bitmap *R_RenderGlyph(FT_GlyphSlot glyph, glyphInfo_t* glyphOut) { + FT_Bitmap *bit2; + int left, right, width, top, bottom, height, pitch, size; + + R_GetGlyphInfo(glyph, &left, &right, &width, &top, &bottom, &height, &pitch); + + if ( glyph->format == ft_glyph_format_outline ) { + size = pitch*height; + + bit2 = ri.Malloc(sizeof(FT_Bitmap)); + + bit2->width = width; + bit2->rows = height; + bit2->pitch = pitch; + bit2->pixel_mode = ft_pixel_mode_grays; + //bit2->pixel_mode = ft_pixel_mode_mono; + bit2->buffer = ri.Malloc(pitch*height); + bit2->num_grays = 256; + + memset( bit2->buffer, 0, size ); + + FT_Outline_Translate( &glyph->outline, -left, -bottom ); + + FT_Outline_Get_Bitmap( ftLibrary, &glyph->outline, bit2 ); + + glyphOut->height = height; + glyphOut->pitch = pitch; + glyphOut->top = (glyph->metrics.horiBearingY >> 6) + 1; + glyphOut->bottom = bottom; + + return bit2; + } else { + ri.Printf(PRINT_ALL, "Non-outline fonts are not supported\n"); + } + return NULL; +} + +void WriteTGA (char *filename, byte *data, int width, int height) { + byte *buffer; + int i, c; + int row; + unsigned char *flip; + unsigned char *src, *dst; + + buffer = ri.Malloc(width*height*4 + 18); + memset (buffer, 0, 18); + buffer[2] = 2; // uncompressed type + buffer[12] = width&255; + buffer[13] = width>>8; + buffer[14] = height&255; + buffer[15] = height>>8; + buffer[16] = 32; // pixel size + + // swap rgb to bgr + c = 18 + width * height * 4; + for (i=18 ; iglyph, &glyph); + if (bitmap) { + glyph.xSkip = (face->glyph->metrics.horiAdvance >> 6) + 1; + } else { + return &glyph; + } + + if (glyph.height > *maxHeight) { + *maxHeight = glyph.height; + } + + if (calcHeight) { + ri.Free(bitmap->buffer); + ri.Free(bitmap); + return &glyph; + } + +/* + // need to convert to power of 2 sizes so we do not get + // any scaling from the gl upload + for (scaled_width = 1 ; scaled_width < glyph.pitch ; scaled_width<<=1) + ; + for (scaled_height = 1 ; scaled_height < glyph.height ; scaled_height<<=1) + ; +*/ + + scaled_width = glyph.pitch; + scaled_height = glyph.height; + + // we need to make sure we fit + if (*xOut + scaled_width + 1 >= 255) { + *xOut = 0; + *yOut += *maxHeight + 1; + } + + if (*yOut + *maxHeight + 1 >= 255) { + *yOut = -1; + *xOut = -1; + ri.Free(bitmap->buffer); + ri.Free(bitmap); + return &glyph; + } + + + src = bitmap->buffer; + dst = imageOut + (*yOut * 256) + *xOut; + + if (bitmap->pixel_mode == ft_pixel_mode_mono) { + for (i = 0; i < glyph.height; i++) { + int j; + unsigned char *_src = src; + unsigned char *_dst = dst; + unsigned char mask = 0x80; + unsigned char val = *_src; + for (j = 0; j < glyph.pitch; j++) { + if (mask == 0x80) { + val = *_src++; + } + if (val & mask) { + *_dst = 0xff; + } + mask >>= 1; + + if ( mask == 0 ) { + mask = 0x80; + } + _dst++; + } + + src += glyph.pitch; + dst += 256; + } + } else { + for (i = 0; i < glyph.height; i++) { + memcpy(dst, src, glyph.pitch); + src += glyph.pitch; + dst += 256; + } + } + + // we now have an 8 bit per pixel grey scale bitmap + // that is width wide and pf->ftSize->metrics.y_ppem tall + + glyph.imageHeight = scaled_height; + glyph.imageWidth = scaled_width; + glyph.s = (float)*xOut / 256; + glyph.t = (float)*yOut / 256; + glyph.s2 = glyph.s + (float)scaled_width / 256; + glyph.t2 = glyph.t + (float)scaled_height / 256; + + *xOut += scaled_width + 1; + + ri.Free(bitmap->buffer); + ri.Free(bitmap); + } + + return &glyph; +} +#endif + +static int fdOffset; +static byte *fdFile; + +int readInt( void ) { + int i = fdFile[fdOffset]+(fdFile[fdOffset+1]<<8)+(fdFile[fdOffset+2]<<16)+(fdFile[fdOffset+3]<<24); + fdOffset += 4; + return i; +} + +typedef union { + byte fred[4]; + float ffred; +} poor; + +float readFloat( void ) { + poor me; +#if defined Q3_BIG_ENDIAN + me.fred[0] = fdFile[fdOffset+3]; + me.fred[1] = fdFile[fdOffset+2]; + me.fred[2] = fdFile[fdOffset+1]; + me.fred[3] = fdFile[fdOffset+0]; +#elif defined Q3_LITTLE_ENDIAN + me.fred[0] = fdFile[fdOffset+0]; + me.fred[1] = fdFile[fdOffset+1]; + me.fred[2] = fdFile[fdOffset+2]; + me.fred[3] = fdFile[fdOffset+3]; +#endif + fdOffset += 4; + return me.ffred; +} + + +void RE_RegisterFont(const char *fontName, int pointSize, fontInfo_t *font) +{ +#ifdef BUILD_FREETYPE + FT_Face face; + int j, k, xOut, yOut, lastStart, imageNumber; + int scaledSize, newSize, maxHeight, left; + unsigned char *out, *imageBuff; + glyphInfo_t *glyph; + image_t *image; + qhandle_t h; + float max; + float dpi = 72; + float glyphScale; +#endif + char *faceData; + int i; + char name[1024]; + + if (!fontName) { + ri.Printf(PRINT_ALL, "RE_RegisterFont: called with empty name\n"); + return; + } + + if (pointSize <= 0) { + pointSize = 12; + } + + + if (registeredFontCount >= MAX_FONTS) { + ri.Printf(PRINT_WARNING, "RE_RegisterFont: Too many fonts registered already.\n"); + return; + } + + snprintf(name, sizeof(name), "fonts/fontImage_%i.dat",pointSize); + for (i = 0; i < registeredFontCount; i++) { + if (Q_stricmp(name, registeredFont[i].name) == 0) { + memcpy(font, ®isteredFont[i], sizeof(fontInfo_t)); + return; + } + } + + int len = ri.FS_ReadFile(name, NULL); + if (len == sizeof(fontInfo_t)) { + ri.FS_ReadFile(name, (void**)&faceData); + fdOffset = 0; + fdFile = (unsigned char*)faceData; + for(i=0; iglyphs[i].height = readInt(); + font->glyphs[i].top = readInt(); + font->glyphs[i].bottom = readInt(); + font->glyphs[i].pitch = readInt(); + font->glyphs[i].xSkip = readInt(); + font->glyphs[i].imageWidth = readInt(); + font->glyphs[i].imageHeight = readInt(); + font->glyphs[i].s = readFloat(); + font->glyphs[i].t = readFloat(); + font->glyphs[i].s2 = readFloat(); + font->glyphs[i].t2 = readFloat(); + font->glyphs[i].glyph = readInt(); + Q_strncpyz(font->glyphs[i].shaderName, (const char *)&fdFile[fdOffset], sizeof(font->glyphs[i].shaderName)); + fdOffset += sizeof(font->glyphs[i].shaderName); + } + font->glyphScale = readFloat(); + memcpy(font->name, &fdFile[fdOffset], MAX_QPATH); + +// memcpy(font, faceData, sizeof(fontInfo_t)); + Q_strncpyz(font->name, name, sizeof(font->name)); + for (i = GLYPH_START; i <= GLYPH_END; i++) { + font->glyphs[i].glyph = RE_RegisterShaderNoMip(font->glyphs[i].shaderName); + } + memcpy(®isteredFont[registeredFontCount++], font, sizeof(fontInfo_t)); + ri.FS_FreeFile(faceData); + return; + } + +#ifndef BUILD_FREETYPE + ri.Printf(PRINT_WARNING, "RE_RegisterFont: FreeType code not available\n"); +#else + if (ftLibrary == NULL) { + ri.Printf(PRINT_WARNING, "RE_RegisterFont: FreeType not initialized.\n"); + return; + } + + len = ri.FS_ReadFile(fontName, &faceData); + if (len <= 0) { + ri.Printf(PRINT_WARNING, "RE_RegisterFont: Unable to read font file '%s'\n", fontName); + return; + } + + // allocate on the stack first in case we fail + if (FT_New_Memory_Face( ftLibrary, faceData, len, 0, &face )) { + ri.Printf(PRINT_WARNING, "RE_RegisterFont: FreeType, unable to allocate new face.\n"); + return; + } + + + if (FT_Set_Char_Size( face, pointSize << 6, pointSize << 6, dpi, dpi)) { + ri.Printf(PRINT_WARNING, "RE_RegisterFont: FreeType, unable to set face char size.\n"); + return; + } + + //*font = ®isteredFonts[registeredFontCount++]; + + // make a 256x256 image buffer, once it is full, register it, clean it and keep going + // until all glyphs are rendered + + out = ri.Malloc(256*256); + if (out == NULL) { + ri.Printf(PRINT_WARNING, "RE_RegisterFont: ri.Malloc failure during output image creation.\n"); + return; + } + memset(out, 0, 256*256); + + maxHeight = 0; + + for (i = GLYPH_START; i <= GLYPH_END; i++) { + RE_ConstructGlyphInfo(out, &xOut, &yOut, &maxHeight, face, (unsigned char)i, qtrue); + } + + xOut = 0; + yOut = 0; + i = GLYPH_START; + lastStart = i; + imageNumber = 0; + + while ( i <= GLYPH_END + 1 ) { + + if ( i == GLYPH_END + 1 ) { + // upload/save current image buffer + xOut = yOut = -1; + } else { + glyph = RE_ConstructGlyphInfo(out, &xOut, &yOut, &maxHeight, face, (unsigned char)i, qfalse); + } + + if (xOut == -1 || yOut == -1) { + // ran out of room + // we need to create an image from the bitmap, set all the handles in the glyphs to this point + // + + scaledSize = 256*256; + newSize = scaledSize * 4; + imageBuff = ri.Malloc(newSize); + left = 0; + max = 0; + for ( k = 0; k < (scaledSize) ; k++ ) { + if (max < out[k]) { + max = out[k]; + } + } + + if (max > 0) { + max = 255/max; + } + + for ( k = 0; k < (scaledSize) ; k++ ) { + imageBuff[left++] = 255; + imageBuff[left++] = 255; + imageBuff[left++] = 255; + + imageBuff[left++] = ((float)out[k] * max); + } + + snprintf (name, sizeof(name), "fonts/fontImage_%i_%i.tga", imageNumber++, pointSize); + if (r_saveFontData->integer) { + WriteTGA(name, imageBuff, 256, 256); + } + + image = R_CreateImage(name, imageBuff, 256, 256, qfalse, qfalse, GL_CLAMP); + + ri.Printf(PRINT_WARNING, "RE_RegisterFont: ri.Malloc failure during output image creation.\n"); + + h = R_RegisterShaderFromImage(name, LIGHTMAP_2D, image, qfalse); + for (j = lastStart; j < i; j++) { + font->glyphs[j].glyph = h; + Q_strncpyz(font->glyphs[j].shaderName, name, sizeof(font->glyphs[j].shaderName)); + } + lastStart = i; + memset(out, 0, 256*256); + xOut = 0; + yOut = 0; + ri.Free(imageBuff); + if ( i == GLYPH_END + 1 ) + i++; + } else { + memcpy(&font->glyphs[i], glyph, sizeof(glyphInfo_t)); + i++; + } + } + + // change the scale to be relative to 1 based on 72 dpi ( so dpi of 144 means a scale of .5 ) + glyphScale = 72.0f / dpi; + + // we also need to adjust the scale based on point size relative to 48 points as the ui scaling is based on a 48 point font + glyphScale *= 48.0f / pointSize; + + registeredFont[registeredFontCount].glyphScale = glyphScale; + font->glyphScale = glyphScale; + memcpy(®isteredFont[registeredFontCount++], font, sizeof(fontInfo_t)); + + if (r_saveFontData->integer) { + ri.FS_WriteFile(va("fonts/fontImage_%i.dat", pointSize), font, sizeof(fontInfo_t)); + } + + ri.Free(out); + + ri.FS_FreeFile(faceData); +#endif +} + + + +void R_InitFreeType(void) +{ +#ifdef BUILD_FREETYPE + if (FT_Init_FreeType( &ftLibrary )) { + ri.Printf(PRINT_WARNING, "R_InitFreeType: Unable to initialize FreeType.\n"); + } +#endif + registeredFontCount = 0; + r_saveFontData = ri.Cvar_Get( "r_saveFontData", "0", 0 ); +} + + + + +void R_DoneFreeType(void) { +#ifdef BUILD_FREETYPE + if (ftLibrary) { + FT_Done_FreeType( ftLibrary ); + ftLibrary = NULL; + } +#endif + registeredFontCount = 0; + +} + diff --git a/code/renderer_vulkan/tr_globals.c b/code/renderer_vulkan/tr_globals.c new file mode 100644 index 0000000000..53263b18a6 --- /dev/null +++ b/code/renderer_vulkan/tr_globals.c @@ -0,0 +1,4 @@ +#include "tr_globals.h" + +trGlobals_t tr; + diff --git a/code/renderer_vulkan/tr_globals.h b/code/renderer_vulkan/tr_globals.h new file mode 100644 index 0000000000..1845bfa493 --- /dev/null +++ b/code/renderer_vulkan/tr_globals.h @@ -0,0 +1,101 @@ +#ifndef TR_GLOBALS_H_ +#define TR_GLOBALS_H_ + +#include "tr_local.h" +#include "tr_model.h" + +// 12 bits, see QSORT_SHADERNUM_SHIFT +#define MAX_SHADERS 16384 + +/* +** trGlobals_t +** +** Most renderer globals are defined here. +** backend functions should never modify any of these fields, +** but may read fields that aren't dynamically modified +** by the frontend. +*/ + + +typedef struct { + qboolean registered; // cleared at shutdown, set at beginRegistration + + int visCount; // incremented every time a new vis cluster is entered + int viewCount; // incremented every view (twice a scene if portaled) + // and every R_MarkFragments call + + qboolean worldMapLoaded; + world_t *world; + + const unsigned char *externalVisData; // from RE_SetWorldVisData, shared with CM_Load + + image_t *defaultImage; + image_t *scratchImage[32]; + image_t *fogImage; + image_t *dlightImage; // inverse-quare highlight for projective adding + image_t *whiteImage; // full of 0xff + image_t *identityLightImage; // full of tr.identityLightByte + + shader_t *defaultShader; + shader_t *cinematicShader; + shader_t *shadowShader; + shader_t *projectionShadowShader; + + int numLightmaps; + image_t *lightmaps[MAX_LIGHTMAPS]; + + trRefEntity_t *currentEntity; + trRefEntity_t worldEntity; // point currentEntity at this when rendering world + int currentEntityNum; + int shiftedEntityNum; // currentEntityNum << QSORT_ENTITYNUM_SHIFT + model_t *currentModel; + + viewParms_t viewParms; + + float identityLight; // 1.0 / ( 1 << overbrightBits ) + int identityLightByte; // identityLight * 255 + + orientationr_t or; // for current entity + + trRefdef_t refdef; + + int viewCluster; + + vec3_t sunLight; // from the sky shader for this level + vec3_t sunDirection; + + frontEndCounters_t pc; + int frontEndMsec; // not in pc due to clearing issue + + // + // put large tables at the end, so most elements will be + // within the +/32K indexed range on risc processors + // + model_t *models[MAX_MOD_KNOWN]; + int numModels; + + int numImages; + image_t * images[MAX_DRAWIMAGES]; + + // shader indexes from other modules will be looked up in tr.shaders[] + // shader indexes from drawsurfs will be looked up in sortedShaders[] + // lower indexed sortedShaders must be rendered first (opaque surfaces before translucent) + int numShaders; + shader_t *shaders[MAX_SHADERS]; + shader_t *sortedShaders[MAX_SHADERS]; + + int numSkins; + skin_t *skins[MAX_SKINS]; + + float sinTable[FUNCTABLE_SIZE]; + float squareTable[FUNCTABLE_SIZE]; + float triangleTable[FUNCTABLE_SIZE]; + float sawToothTable[FUNCTABLE_SIZE]; + float inverseSawToothTable[FUNCTABLE_SIZE]; +} trGlobals_t; + + + +extern trGlobals_t tr; + +#endif diff --git a/code/renderer_vulkan/tr_image.c b/code/renderer_vulkan/tr_image.c new file mode 100644 index 0000000000..d4738381ed --- /dev/null +++ b/code/renderer_vulkan/tr_image.c @@ -0,0 +1,310 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + +/* +============================================================================ + +SKINS + +============================================================================ +*/ + +#include "tr_local.h" +#include "tr_globals.h" +#include "ref_import.h" +#include "tr_shader.h" + + +/* +================== +CommaParse + +This is unfortunate, but the skin files aren't +compatable with our normal parsing rules. +================== +*/ +static char *CommaParse( char **data_p ) +{ + int c = 0, len; + char *data; + static char com_token[MAX_TOKEN_CHARS]; + + data = *data_p; + len = 0; + com_token[0] = 0; + + // make sure incoming data is valid + if ( !data ) { + *data_p = NULL; + return com_token; + } + + while ( 1 ) { + // skip whitespace + while( (c = *data) <= ' ') { + if( !c ) { + break; + } + data++; + } + + + c = *data; + + // skip double slash comments + if ( c == '/' && data[1] == '/' ) + { + while (*data && *data != '\n') + data++; + } + // skip /* */ comments + else if ( c=='/' && data[1] == '*' ) + { + while ( *data && ( *data != '*' || data[1] != '/' ) ) + { + data++; + } + if ( *data ) + { + data += 2; + } + } + else + { + break; + } + } + + if ( c == 0 ) { + return ""; + } + + // handle quoted strings + if (c == '\"') + { + data++; + while (1) + { + c = *data++; + if (c=='\"' || !c) + { + com_token[len] = 0; + *data_p = ( char * ) data; + return com_token; + } + if (len < MAX_TOKEN_CHARS) + { + com_token[len] = c; + len++; + } + } + } + + // parse a regular word + do + { + if (len < MAX_TOKEN_CHARS) + { + com_token[len] = c; + len++; + } + data++; + c = *data; + } while (c>32 && c != ',' ); + + if (len == MAX_TOKEN_CHARS) + { + ri.Printf (PRINT_ALL, "Token exceeded %i chars, discarded.\n", MAX_TOKEN_CHARS); + len = 0; + } + com_token[len] = 0; + + *data_p = ( char * ) data; + return com_token; +} + + + +qhandle_t RE_RegisterSkin( const char *name ) +{ + skinSurface_t parseSurfaces[MAX_SKIN_SURFACES]; + qhandle_t hSkin; + skin_t *skin; + skinSurface_t *surf; + char *text, *text_p; + char *token; + char surfName[MAX_QPATH]; + + if ( !name || !name[0] ) { + ri.Printf(PRINT_ALL, "Empty name passed to RE_RegisterSkin\n" ); + return 0; + } + + if ( (int)strlen( name ) >= MAX_QPATH ) { + ri.Printf(PRINT_ALL, "Skin name exceeds MAX_QPATH\n" ); + return 0; + } + + + // see if the skin is already loaded + for ( hSkin = 1; hSkin < tr.numSkins ; hSkin++ ) { + skin = tr.skins[hSkin]; + if ( !Q_stricmp( skin->name, name ) ) { + if( skin->numSurfaces == 0 ) { + return 0; // default skin + } + return hSkin; + } + } + + // allocate a new skin + if ( tr.numSkins == MAX_SKINS ) { + ri.Printf( PRINT_WARNING, "WARNING: RE_RegisterSkin( '%s' ) MAX_SKINS hit\n", name ); + return 0; + } + tr.numSkins++; + skin = (skin_t*) ri.Hunk_Alloc( sizeof( skin_t ), h_low ); + tr.skins[hSkin] = skin; + Q_strncpyz( skin->name, name, sizeof( skin->name ) ); + skin->numSurfaces = 0; + + + // If not a .skin file, load as a single shader + if ( strcmp( name + (int)strlen( name ) - 5, ".skin" ) ) { + skin->numSurfaces = 1; + skin->pSurfaces = (skinSurface_t *) ri.Hunk_Alloc( sizeof(skinSurface_t), h_low ); + skin->pSurfaces[0].shader = R_FindShader( name, LIGHTMAP_NONE, qtrue ); + return hSkin; + } + + // load and parse the skin file + ri.FS_ReadFile( name, (void**)&text ); + if ( !text ) { + return 0; + } + + text_p = text; + while ( text_p && *text_p ) { + // get surface name + token = CommaParse( &text_p ); + Q_strncpyz( surfName, token, sizeof( surfName ) ); + + if ( !token[0] ) { + break; + } + // lowercase the surface name so skin compares are faster + Q_strlwr( surfName ); + + if ( *text_p == ',' ) { + text_p++; + } + + if ( strstr( token, "tag_" ) ) { + continue; + } + + // parse the shader name + token = CommaParse( &text_p ); + +// surf = skin->surfaces[ skin->numSurfaces ] = (skinSurface_t*) ri.Hunk_Alloc( sizeof( *skin->surfaces[0] ), h_low ); + surf = &parseSurfaces[skin->numSurfaces]; + Q_strncpyz( surf->name, surfName, sizeof( surf->name ) ); + surf->shader = R_FindShader( token, LIGHTMAP_NONE, qtrue ); + skin->numSurfaces++; + } + + ri.FS_FreeFile( text ); + + + // never let a skin have 0 shaders + if ( skin->numSurfaces == 0 ) { + return 0; // use default skin + } + +// copy surfaces to skin + skin->pSurfaces = ri.Hunk_Alloc( skin->numSurfaces * sizeof( skinSurface_t ), h_low ); + memcpy( skin->pSurfaces, parseSurfaces, skin->numSurfaces * sizeof( skinSurface_t ) ); + + + + return hSkin; +} + +/* +=============== +R_InitSkins +=============== +*/ +void R_InitSkins( void ) +{ + skin_t *skin; + + tr.numSkins = 1; + + // make the default skin have all default shaders + skin = tr.skins[0] = (skin_t*) ri.Hunk_Alloc( sizeof( skin_t ), h_low ); + Q_strncpyz( skin->name, "", sizeof( skin->name ) ); + skin->numSurfaces = 1; +// skin->surfaces[0] = (skinSurface_t*) ri.Hunk_Alloc( sizeof( *skin->surfaces ), h_low ); +// skin->surfaces[0]->shader = tr.defaultShader; + skin->pSurfaces = ri.Hunk_Alloc( sizeof( skinSurface_t ), h_low ); + skin->pSurfaces[0].shader = tr.defaultShader; +} + +/* +=============== +R_GetSkinByHandle +=============== +*/ +skin_t* R_GetSkinByHandle( qhandle_t hSkin ) +{ + if ( hSkin < 1 || hSkin >= tr.numSkins ) { + return tr.skins[0]; + } + return tr.skins[ hSkin ]; +} + +/* +=============== +R_SkinList_f +=============== +*/ +void R_SkinList_f( void ) +{ + int i, j; + skin_t *skin; + + ri.Printf (PRINT_ALL, "------------------\n"); + + for ( i = 0 ; i < tr.numSkins ; i++ ) { + skin = tr.skins[i]; + +// ri.Printf( PRINT_ALL, "%3i:%s\n", i, skin->name ); + ri.Printf( PRINT_ALL, "%3i:%s (%d surfaces)\n", i, skin->name, skin->numSurfaces ); + for ( j = 0 ; j < skin->numSurfaces ; j++ ) { + ri.Printf( PRINT_ALL, " %s = %s\n", + skin->pSurfaces[j].name, skin->pSurfaces[j].shader->name ); + } + } + ri.Printf (PRINT_ALL, "------------------\n"); +} + diff --git a/code/renderer_vulkan/tr_image.h b/code/renderer_vulkan/tr_image.h new file mode 100644 index 0000000000..700ece48fc --- /dev/null +++ b/code/renderer_vulkan/tr_image.h @@ -0,0 +1,37 @@ +#ifndef TR_IMAGE_H_ +#define TR_IMAGE_H_ + +#include "VKimpl.h" + + +typedef struct image_s { + char imgName[MAX_QPATH]; // game path, including extension + uint32_t width, height; // source image + uint32_t uploadWidth, uploadHeight; // after power of two and picmip but not including clamp to MAX_TEXTURE_SIZE + + uint32_t index; + + VkImage handle; + // To use any VkImage, including those in the swap chain, int the render pipeline + // we have to create a VkImageView object. An image view is quite literally a + // view into image. It describe how to access the image and witch part of the + // image to access, if it should be treated as a 2D texture depth texture without + // any mipmapping levels. + + VkImageView view; + + // Descriptor set that contains single descriptor used to access the given image. + // It is updated only once during image initialization. + VkDescriptorSet descriptor_set; + + int wrapClampMode; // GL_CLAMP or GL_REPEAT, for vulkan + VkBool32 mipmap; // for vulkan + uint32_t mipLevels; // gl texture binding + VkBool32 allowPicmip; // for vulkan + VkBool32 isLightmap; + + struct image_s* next; +} image_t; + + +#endif diff --git a/code/renderer_vulkan/tr_init.c b/code/renderer_vulkan/tr_init.c new file mode 100644 index 0000000000..ed466826a1 --- /dev/null +++ b/code/renderer_vulkan/tr_init.c @@ -0,0 +1,226 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_init.c -- functions that are not called every frame + +#include "tr_local.h" +#include "tr_globals.h" +#include "tr_model.h" +#include "tr_cvar.h" + +#include "vk_init.h" + +#include "vk_screenshot.h" +#include "vk_shade_geometry.h" +#include "vk_pipelines.h" +#include "vk_image.h" + +#include "tr_fog.h" +#include "tr_backend.h" +#include "glConfig.h" +#include "ref_import.h" + +extern void RE_ClearScene( void ); + + + +void R_Init( void ) +{ + int i; + + ri.Printf( PRINT_ALL, "----- R_Init -----\n" ); + + + // clear all our internal state + memset( &tr, 0, sizeof( tr ) ); + memset( &tess, 0, sizeof( tess ) ); + + R_ClearBackendState(); + + if ( (intptr_t)tess.xyz & 15 ) { + ri.Printf( PRINT_ALL, "WARNING: tess.xyz not 16 byte aligned\n" ); + } + + + // + // init function tables + // + for ( i = 0; i < FUNCTABLE_SIZE; i++ ) + { + tr.sinTable[i] = sin( DEG2RAD( i * 360.0f / ( ( float ) ( FUNCTABLE_SIZE - 1 ) ) ) ); + tr.squareTable[i] = ( i < FUNCTABLE_SIZE/2 ) ? 1.0f : -1.0f; + tr.sawToothTable[i] = (float)i / FUNCTABLE_SIZE; + tr.inverseSawToothTable[i] = 1.0f - tr.sawToothTable[i]; + + if ( i < FUNCTABLE_SIZE / 2 ) + { + if ( i < FUNCTABLE_SIZE / 4 ) + { + tr.triangleTable[i] = ( float ) i / ( FUNCTABLE_SIZE / 4 ); + } + else + { + tr.triangleTable[i] = 1.0f - tr.triangleTable[i-FUNCTABLE_SIZE / 4]; + } + } + else + { + tr.triangleTable[i] = -tr.triangleTable[i-FUNCTABLE_SIZE/2]; + } + } + + R_InitDisplayResolution(); + + R_InitFogTable(); + + R_NoiseInit(); + + R_Register(); + + // make sure all the commands added here are also + // removed in R_Shutdown + ri.Cmd_AddCommand( "displayResoList", R_DisplayResolutionList_f ); + + ri.Cmd_AddCommand( "modellist", R_Modellist_f ); + ri.Cmd_AddCommand( "screenshotJPEG", R_ScreenShotJPEG_f ); + ri.Cmd_AddCommand( "screenshot", R_ScreenShot_f ); + ri.Cmd_AddCommand( "shaderlist", R_ShaderList_f ); + ri.Cmd_AddCommand( "skinlist", R_SkinList_f ); + + ri.Cmd_AddCommand( "vkinfo", vulkanInfo_f ); + ri.Cmd_AddCommand( "minimize", vk_minimizeWindow ); + + ri.Cmd_AddCommand( "pipelineList", R_PipelineList_f ); + + ri.Cmd_AddCommand( "gpuMem", gpuMemUsageInfo_f ); + + ri.Cmd_AddCommand( "printOR", R_PrintBackEnd_OR_f ); + + R_InitScene(); + + R_glConfigInit(); + + // VULKAN + if ( !isVKinitialied() ) + { + vk_initialize(); + + // print info + vulkanInfo_f(); + } + + + R_InitImages(); + + R_InitShaders(); + + R_InitSkins(); + + R_ModelInit(); + + R_InitFreeType(); + + ri.Printf( PRINT_ALL, "----- R_Init finished -----\n" ); +} + + + + +void RE_Shutdown( qboolean destroyWindow ) +{ + + ri.Printf( PRINT_ALL, "\nRE_Shutdown( %i )\n", destroyWindow ); + + ri.Cmd_RemoveCommand("displayResoList"); + + ri.Cmd_RemoveCommand("modellist"); + ri.Cmd_RemoveCommand("screenshotJPEG"); + ri.Cmd_RemoveCommand("screenshot"); + ri.Cmd_RemoveCommand("shaderlist"); + ri.Cmd_RemoveCommand("skinlist"); + + ri.Cmd_RemoveCommand("minimize"); + + ri.Cmd_RemoveCommand("vkinfo"); + ri.Cmd_RemoveCommand("pipelineList"); + ri.Cmd_RemoveCommand("gpuMem"); + ri.Cmd_RemoveCommand("printOR"); + + R_DoneFreeType(); + + // VULKAN + // Releases vulkan resources allocated during program execution. + // This effectively puts vulkan subsystem into initial state + // (the state we have after vk_initialize call). + + // contains vulkan resources/state, reinitialized on a map change. + + vk_destroyShaderStagePipeline(); + + + vk_resetGeometryBuffer(); + + if ( tr.registered ) + { + vk_destroyImageRes(); + tr.registered = qfalse; + } + + if (destroyWindow) + { + vk_shutdown(); + vk_destroyWindow(); + + // It is cleared not for renderer_vulkan, + // but fot rendergl1, renderergl2 to create the window + R_glConfigClear(); + } +} + + +void RE_BeginRegistration(glconfig_t * pGlCfg) +{ + R_Init(); + + R_GetGlConfig(pGlCfg); + + tr.viewCluster = -1; // force markleafs to regenerate + + RE_ClearScene(); + + tr.registered = qtrue; + + ri.Printf(PRINT_ALL, "RE_BeginRegistration finished.\n"); +} + +/* +============= +RE_EndRegistration + +Touch all images to make sure they are resident +============= +*/ +void RE_EndRegistration( void ) +{ + if ( tr.registered ) { + R_IssueRenderCommands( qfalse ); + } +} diff --git a/code/renderer_vulkan/tr_light.c b/code/renderer_vulkan/tr_light.c new file mode 100644 index 0000000000..68c95db98c --- /dev/null +++ b/code/renderer_vulkan/tr_light.c @@ -0,0 +1,396 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_light.c + +#include "tr_local.h" +#include "tr_globals.h" +#include "ref_import.h" +#include "tr_cvar.h" +#include "tr_light.h" + + + +#define DLIGHT_AT_RADIUS 16 +// at the edge of a dlight's influence, this amount of light will be added + +#define DLIGHT_MINIMUM_RADIUS 16 +// never calculate a range less than this to prevent huge light numbers + + +/* +=============== +R_TransformDlights + +Transforms the origins of an array of dlights. +Used by both the front end (for DlightBmodel) and +the back end (before doing the lighting calculation) +=============== +*/ +void R_TransformDlights( int count, dlight_t *dl, const orientationr_t * const or) +{ + int i; + + for ( i = 0 ; i < count ; i++, dl++ ) + { + vec3_t temp; + VectorSubtract( dl->origin, or->origin, temp ); + dl->transformed[0] = DotProduct( temp, or->axis[0] ); + dl->transformed[1] = DotProduct( temp, or->axis[1] ); + dl->transformed[2] = DotProduct( temp, or->axis[2] ); + } +} + +/* +============= +R_DlightBmodel + +Determine which dynamic lights may effect this bmodel +============= +*/ +void R_DlightBmodel( bmodel_t *bmodel ) { + int i, j; + dlight_t *dl; + int mask; + msurface_t *surf; + + // transform all the lights + R_TransformDlights( tr.refdef.num_dlights, tr.refdef.dlights, &tr.or ); + + mask = 0; + for ( i=0 ; itransformed[j] - bmodel->bounds[1][j] > dl->radius ) { + break; + } + if ( bmodel->bounds[0][j] - dl->transformed[j] > dl->radius ) { + break; + } + } + if ( j < 3 ) { + continue; + } + + // we need to check this light + mask |= 1 << i; + } + + tr.currentEntity->needDlights = (qboolean) (mask != 0); + + // set the dlight bits in all the surfaces + for ( i = 0 ; i < bmodel->numSurfaces ; i++ ) { + surf = bmodel->firstSurface + i; + + if ( *surf->data == SF_FACE ) { + ((srfSurfaceFace_t *)surf->data)->dlightBits = mask; + } else if ( *surf->data == SF_GRID ) { + ((srfGridMesh_t *)surf->data)->dlightBits = mask; + } else if ( *surf->data == SF_TRIANGLES ) { + ((srfTriangles_t *)surf->data)->dlightBits = mask; + } + } +} + + +/* +============================================================================= + +LIGHT SAMPLING + +============================================================================= +*/ + + + +/* +================= +R_SetupEntityLightingGrid + +================= +*/ +static void R_SetupEntityLightingGrid( trRefEntity_t *ent ) { + vec3_t lightOrigin; + int pos[3]; + int i, j; + byte *gridData; + float frac[3]; + int gridStep[3]; + vec3_t direction; + float totalFactor; + + if ( ent->e.renderfx & RF_LIGHTING_ORIGIN ) { + // seperate lightOrigins are needed so an object that is + // sinking into the ground can still be lit, and so + // multi-part models can be lit identically + VectorCopy( ent->e.lightingOrigin, lightOrigin ); + } else { + VectorCopy( ent->e.origin, lightOrigin ); + } + + VectorSubtract( lightOrigin, tr.world->lightGridOrigin, lightOrigin ); + for ( i = 0 ; i < 3 ; i++ ) { + float v; + + v = lightOrigin[i]*tr.world->lightGridInverseSize[i]; + pos[i] = floor( v ); + frac[i] = v - pos[i]; + if ( pos[i] < 0 ) { + pos[i] = 0; + } else if ( pos[i] >= tr.world->lightGridBounds[i] - 1 ) { + pos[i] = tr.world->lightGridBounds[i] - 1; + } + } + + VectorClear( ent->ambientLight ); + VectorClear( ent->directedLight ); + VectorClear( direction ); + + assert( tr.world->lightGridData ); // bk010103 - NULL with -nolight maps + + // trilerp the light value + gridStep[0] = 8; + gridStep[1] = 8 * tr.world->lightGridBounds[0]; + gridStep[2] = 8 * tr.world->lightGridBounds[0] * tr.world->lightGridBounds[1]; + gridData = tr.world->lightGridData + pos[0] * gridStep[0] + + pos[1] * gridStep[1] + pos[2] * gridStep[2]; + + totalFactor = 0; + for ( i = 0 ; i < 8 ; i++ ) { + float factor; + byte *data; + int lat, lng; + vec3_t normal; + #if idppc + float d0, d1, d2, d3, d4, d5; + #endif + factor = 1.0; + data = gridData; + for ( j = 0 ; j < 3 ; j++ ) { + if ( i & (1<ambientLight[0] += factor * d0; + ent->ambientLight[1] += factor * d1; + ent->ambientLight[2] += factor * d2; + + ent->directedLight[0] += factor * d3; + ent->directedLight[1] += factor * d4; + ent->directedLight[2] += factor * d5; + #else + ent->ambientLight[0] += factor * data[0]; + ent->ambientLight[1] += factor * data[1]; + ent->ambientLight[2] += factor * data[2]; + + ent->directedLight[0] += factor * data[3]; + ent->directedLight[1] += factor * data[4]; + ent->directedLight[2] += factor * data[5]; + #endif + lat = data[7]; + lng = data[6]; + lat *= (FUNCTABLE_SIZE/256); + lng *= (FUNCTABLE_SIZE/256); + + // decode X as cos( lat ) * sin( long ) + // decode Y as sin( lat ) * sin( long ) + // decode Z as cos( long ) + + normal[0] = tr.sinTable[(lat+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK] * tr.sinTable[lng]; + normal[1] = tr.sinTable[lat] * tr.sinTable[lng]; + normal[2] = tr.sinTable[(lng+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK]; + + VectorMA( direction, factor, normal, direction ); + } + + if ( totalFactor > 0 && totalFactor < 0.99 ) { + totalFactor = 1.0f / totalFactor; + VectorScale( ent->ambientLight, totalFactor, ent->ambientLight ); + VectorScale( ent->directedLight, totalFactor, ent->directedLight ); + } + + VectorScale( ent->ambientLight, r_ambientScale->value, ent->ambientLight ); + VectorScale( ent->directedLight, r_directedScale->value, ent->directedLight ); + + VectorNormalize2( direction, ent->lightDir ); +} + + +/* +=============== +LogLight +=============== +*/ +static void LogLight( trRefEntity_t *ent ) { + int max1, max2; + + if ( !(ent->e.renderfx & RF_FIRST_PERSON ) ) { + return; + } + + max1 = ent->ambientLight[0]; + if ( ent->ambientLight[1] > max1 ) { + max1 = ent->ambientLight[1]; + } else if ( ent->ambientLight[2] > max1 ) { + max1 = ent->ambientLight[2]; + } + + max2 = ent->directedLight[0]; + if ( ent->directedLight[1] > max2 ) { + max2 = ent->directedLight[1]; + } else if ( ent->directedLight[2] > max2 ) { + max2 = ent->directedLight[2]; + } + + ri.Printf( PRINT_ALL, "amb:%i dir:%i\n", max1, max2 ); +} + +/* +================= +R_SetupEntityLighting + +Calculates all the lighting values that will be used +by the Calc_* functions +================= +*/ +void R_SetupEntityLighting( const trRefdef_t *refdef, trRefEntity_t *ent ) { + int i; + dlight_t *dl; + float power; + vec3_t dir; + float d; + vec3_t lightDir; + vec3_t lightOrigin; + + // lighting calculations + if ( ent->lightingCalculated ) { + return; + } + ent->lightingCalculated = qtrue; + + // + // trace a sample point down to find ambient light + // + if ( ent->e.renderfx & RF_LIGHTING_ORIGIN ) { + // seperate lightOrigins are needed so an object that is + // sinking into the ground can still be lit, and so + // multi-part models can be lit identically + VectorCopy( ent->e.lightingOrigin, lightOrigin ); + } else { + VectorCopy( ent->e.origin, lightOrigin ); + } + + // if NOWORLDMODEL, only use dynamic lights (menu system, etc) + if ( !(refdef->rd.rdflags & RDF_NOWORLDMODEL ) + && tr.world->lightGridData ) { + R_SetupEntityLightingGrid( ent ); + } else { + ent->ambientLight[0] = ent->ambientLight[1] = + ent->ambientLight[2] = tr.identityLight * 150; + ent->directedLight[0] = ent->directedLight[1] = + ent->directedLight[2] = tr.identityLight * 150; + VectorCopy( tr.sunDirection, ent->lightDir ); + } + + // bonus items and view weapons have a fixed minimum add + if ( 1 /* ent->e.renderfx & RF_MINLIGHT */ ) { + // give everything a minimum light add + ent->ambientLight[0] += tr.identityLight * 32; + ent->ambientLight[1] += tr.identityLight * 32; + ent->ambientLight[2] += tr.identityLight * 32; + } + + // + // modify the light by dynamic lights + // + d = VectorLength( ent->directedLight ); + VectorScale( ent->lightDir, d, lightDir ); + + for ( i = 0 ; i < refdef->num_dlights ; i++ ) { + dl = &refdef->dlights[i]; + VectorSubtract( dl->origin, lightOrigin, dir ); + d = VectorNormalize( dir ); + + power = DLIGHT_AT_RADIUS * ( dl->radius * dl->radius ); + if ( d < DLIGHT_MINIMUM_RADIUS ) { + d = DLIGHT_MINIMUM_RADIUS; + } + d = power / ( d * d ); + + VectorMA( ent->directedLight, d, dl->color, ent->directedLight ); + VectorMA( lightDir, d, dir, lightDir ); + } + + // clamp ambient + for ( i = 0 ; i < 3 ; i++ ) { + if ( ent->ambientLight[i] > tr.identityLightByte ) { + ent->ambientLight[i] = tr.identityLightByte; + } + } + + if ( r_debugLight->integer ) { + LogLight( ent ); + } + + // save out the byte packet version + ent->ambientLightRGBA[0] = (unsigned char)( ent->ambientLight[0] ); + ent->ambientLightRGBA[1] = (unsigned char)( ent->ambientLight[1] ); + ent->ambientLightRGBA[2] = (unsigned char)( ent->ambientLight[2] ); + ent->ambientLightRGBA[3] = 0xff; + + // transform the direction to local space + VectorNormalize( lightDir ); + ent->lightDir[0] = DotProduct( lightDir, ent->e.axis[0] ); + ent->lightDir[1] = DotProduct( lightDir, ent->e.axis[1] ); + ent->lightDir[2] = DotProduct( lightDir, ent->e.axis[2] ); +} + +int RE_LightForPoint( vec3_t point, vec3_t ambientLight, vec3_t directedLight, vec3_t lightDir ) +{ + trRefEntity_t ent; + + // bk010103 - this segfaults with -nolight maps + if ( tr.world->lightGridData == NULL ) + return qfalse; + + memset(&ent, 0, sizeof(ent)); + VectorCopy( point, ent.e.origin ); + R_SetupEntityLightingGrid( &ent ); + VectorCopy(ent.ambientLight, ambientLight); + VectorCopy(ent.directedLight, directedLight); + VectorCopy(ent.lightDir, lightDir); + + return qtrue; +} diff --git a/code/renderer_vulkan/tr_light.h b/code/renderer_vulkan/tr_light.h new file mode 100644 index 0000000000..3220f0e886 --- /dev/null +++ b/code/renderer_vulkan/tr_light.h @@ -0,0 +1,23 @@ +#ifndef TR_LIGHT_H_ +#define TR_LIGHT_H_ + + +// can't be increased without changing bit packing for drawsurfs + +typedef struct dlight_s { + float origin[3]; + float color[3]; // range from 0.0 to 1.0, should be color normalized + float transformed[3]; // origin in local coordinate system + float radius; + int additive; // texture detail is lost tho when the lightmap is dark +} dlight_t; + + + +void R_DlightBmodel( bmodel_t *bmodel ); +void R_SetupEntityLighting( const trRefdef_t *refdef, trRefEntity_t *ent ); +void R_TransformDlights( int count, dlight_t *dl, const orientationr_t * const or ); + + + +#endif diff --git a/code/renderer_vulkan/tr_local.h b/code/renderer_vulkan/tr_local.h new file mode 100644 index 0000000000..5168ac2e28 --- /dev/null +++ b/code/renderer_vulkan/tr_local.h @@ -0,0 +1,1031 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + + +#ifndef TR_LOCAL_H +#define TR_LOCAL_H + +#include "../qcommon/q_shared.h" +#include "../qcommon/qfiles.h" + +#include "../renderercommon/tr_types.h" + +#include "tr_image.h" + +#include "VKimpl.h" + +// QALIGN macro for aligned struct members (defined in vkQuake3's q_shared.h +// but not in upstream ioquake3) +#ifndef QALIGN + #ifdef __GNUC__ + #define QALIGN(x) __attribute__((aligned(x))) + #else + #define QALIGN(x) + #endif +#endif + + + + +// a trRefEntity_t has all the information passed in by +// the client game, as well as some locally derived info +typedef struct { + refEntity_t e; + + float axisLength; // compensate for non-normalized axis + + qboolean needDlights; // true for bmodels that touch a dlight + qboolean lightingCalculated; + vec3_t lightDir; // normalized direction towards light + vec3_t ambientLight; // color normalized to 0-255 + unsigned char ambientLightRGBA[4]; // 32 bit rgba packed + vec3_t directedLight; +} trRefEntity_t; + + +typedef struct { + float modelMatrix[16] QALIGN(16); + float axis[3][3]; // orientation in world + float origin[3]; // in world coordinates + float viewOrigin[3]; // viewParms->or.origin in local coordinates +} orientationr_t; + +//=============================================================================== + + + + +#define MAX_SHADER_STAGES 8 + +typedef enum { + GF_NONE, + + GF_SIN, + GF_SQUARE, + GF_TRIANGLE, + GF_SAWTOOTH, + GF_INVERSE_SAWTOOTH, + + GF_NOISE + +} genFunc_t; + + +typedef enum { + DEFORM_NONE, + DEFORM_WAVE, + DEFORM_NORMALS, + DEFORM_BULGE, + DEFORM_MOVE, + DEFORM_PROJECTION_SHADOW, + DEFORM_AUTOSPRITE, + DEFORM_AUTOSPRITE2, + DEFORM_TEXT0, + DEFORM_TEXT1, + DEFORM_TEXT2, + DEFORM_TEXT3, + DEFORM_TEXT4, + DEFORM_TEXT5, + DEFORM_TEXT6, + DEFORM_TEXT7 +} deform_t; + +typedef enum { + AGEN_IDENTITY, + AGEN_SKIP, + AGEN_ENTITY, + AGEN_ONE_MINUS_ENTITY, + AGEN_VERTEX, + AGEN_ONE_MINUS_VERTEX, + AGEN_LIGHTING_SPECULAR, + AGEN_WAVEFORM, + AGEN_PORTAL, + AGEN_CONST +} alphaGen_t; + +typedef enum { + CGEN_BAD, + CGEN_IDENTITY_LIGHTING, // tr.identityLight + CGEN_IDENTITY, // always (1,1,1,1) + CGEN_ENTITY, // grabbed from entity's modulate field + CGEN_ONE_MINUS_ENTITY, // grabbed from 1 - entity.modulate + CGEN_EXACT_VERTEX, // tess.vertexColors + CGEN_VERTEX, // tess.vertexColors * tr.identityLight + CGEN_ONE_MINUS_VERTEX, + CGEN_WAVEFORM, // programmatically generated + CGEN_LIGHTING_DIFFUSE, + CGEN_FOG, // standard fog + CGEN_CONST // fixed color +} colorGen_t; + +typedef enum { + TCGEN_BAD, + TCGEN_IDENTITY, // clear to 0,0 + TCGEN_LIGHTMAP, + TCGEN_TEXTURE, + TCGEN_ENVIRONMENT_MAPPED, + TCGEN_FOG, + TCGEN_VECTOR // S and T from world coordinates +} texCoordGen_t; + +typedef enum { + ACFF_NONE, + ACFF_MODULATE_RGB, + ACFF_MODULATE_RGBA, + ACFF_MODULATE_ALPHA +} acff_t; + +typedef struct { + genFunc_t func; + + float base; + float amplitude; + float phase; + float frequency; +} waveForm_t; + +#define TR_MAX_TEXMODS 4 + +typedef enum { + TMOD_NONE, + TMOD_TRANSFORM, + TMOD_TURBULENT, + TMOD_SCROLL, + TMOD_SCALE, + TMOD_STRETCH, + TMOD_ROTATE, + TMOD_ENTITY_TRANSLATE +} texMod_t; + +#define MAX_SHADER_DEFORMS 3 +typedef struct { + deform_t deformation; // vertex coordinate modification type + + vec3_t moveVector; + waveForm_t deformationWave; + float deformationSpread; + + float bulgeWidth; + float bulgeHeight; + float bulgeSpeed; +} deformStage_t; + + +typedef struct { + texMod_t type; + + // used for TMOD_TURBULENT and TMOD_STRETCH + waveForm_t wave; + + // used for TMOD_TRANSFORM + float matrix[2][2]; // s' = s * m[0][0] + t * m[1][0] + trans[0] + float translate[2]; // t' = s * m[0][1] + t * m[0][1] + trans[1] + + // used for TMOD_SCALE + float scale[2]; // s *= scale[0] + // t *= scale[1] + + // used for TMOD_SCROLL + float scroll[2]; // s' = s + scroll[0] * time + // t' = t + scroll[1] * time + + // + = clockwise + // - = counterclockwise + float rotateSpeed; + +} texModInfo_t; + + +#define MAX_IMAGE_ANIMATIONS 8 + +typedef struct { + image_t* image[MAX_IMAGE_ANIMATIONS]; + int numImageAnimations; + float imageAnimationSpeed; + + texCoordGen_t tcGen; + vec3_t tcGenVectors[2]; + + int numTexMods; + texModInfo_t *texMods; + + int videoMapHandle; + qboolean isLightmap; + qboolean isVideoMap; +} textureBundle_t; + +#define NUM_TEXTURE_BUNDLES 2 + +typedef struct { + qboolean active; + + textureBundle_t bundle[NUM_TEXTURE_BUNDLES]; + + waveForm_t rgbWave; + colorGen_t rgbGen; + + waveForm_t alphaWave; + alphaGen_t alphaGen; + + unsigned char constantColor[4]; // for CGEN_CONST and AGEN_CONST + + unsigned int stateBits; // GLS_xxxx mask + + acff_t adjustColorsForFog; + + qboolean isDetail; + + // VULKAN + VkPipeline vk_pipeline; + VkPipeline vk_portal_pipeline; + VkPipeline vk_mirror_pipeline; + +} shaderStage_t; + +struct shaderCommands_s; + +typedef enum { + CT_FRONT_SIDED, + CT_BACK_SIDED, + CT_TWO_SIDED +} cullType_t; + +typedef enum { + FP_NONE, // surface is translucent and will just be adjusted properly + FP_EQUAL, // surface is opaque but possibly alpha tested + FP_LE // surface is trnaslucent, but still needs a fog pass (fog surface) +} fogPass_t; + +typedef struct { + float cloudHeight; + image_t *outerbox[6], *innerbox[6]; +} skyParms_t; + +typedef struct { + vec3_t color; + float depthForOpaque; +} fogParms_t; + + +typedef struct shader_s { + char name[MAX_QPATH]; // game path, including extension + int lightmapIndex; // for a shader to match, both name and lightmapIndex must match + + int index; // this shader == tr.shaders[index] + int sortedIndex; // this shader == tr.sortedShaders[sortedIndex] + + float sort; // lower numbered shaders draw before higher numbered + + qboolean defaultShader; // we want to return index 0 if the shader failed to + // load for some reason, but R_FindShader should + // still keep a name allocated for it, so if + // something calls RE_RegisterShader again with + // the same name, we don't try looking for it again + + qboolean explicitlyDefined; // found in a .shader file + + int surfaceFlags; // if explicitlyDefined, this will have SURF_* flags + int contentFlags; + + qboolean entityMergable; // merge across entites optimizable (smoke, blood) + + qboolean isSky; + skyParms_t sky; + fogParms_t fogParms; + + float portalRange; // distance to fog out at + + int multitextureEnv; // 0, GL_MODULATE, GL_ADD (FIXME: put in stage) + + cullType_t cullType; // CT_FRONT_SIDED, CT_BACK_SIDED, or CT_TWO_SIDED + qboolean polygonOffset; // set for decals and other items that must be offset + qboolean noMipMaps; // for console fonts, 2D elements, etc. + qboolean noPicMip; // for images that must always be full resolution + + fogPass_t fogPass; // draw a blended pass, possibly with depth test equals + + qboolean needsNormal; // not all shaders will need all data to be gathered + qboolean needsST1; + qboolean needsST2; + qboolean needsColor; + + int numDeforms; + deformStage_t deforms[MAX_SHADER_DEFORMS]; + + int numUnfoggedPasses; + shaderStage_t *stages[MAX_SHADER_STAGES]; + + float clampTime; // time this shader is clamped to + float timeOffset; // current time offset for this shader + + struct shader_s *remappedShader; // current shader this one is remapped too + + struct shader_s *next; +} shader_t; + +// trRefdef_t holds everything that comes in refdef_t, +// as well as the locally generated scene information +typedef struct { +/* + int x, y, width, height; + float fov_x, fov_y; + vec3_t vieworg; + float viewaxis[3][3]; // transformation matrix + + int time; // time in milliseconds for shader effects and other time dependent rendering issues + int rdflags; // RDF_NOWORLDMODEL, etc + + // 1 bits will prevent the associated area from rendering at all + byte areamask[MAX_MAP_AREA_BYTES]; + + + // text messages for deform text shaders + char text[MAX_RENDER_STRINGS][MAX_RENDER_STRING_LENGTH]; +*/ + refdef_t rd; + qboolean AreamaskModified; // qtrue if areamask changed since last scene + float floatTime; // tr.refdef.time / 1000.0 + int num_entities; + trRefEntity_t *entities; + + int num_dlights; + struct dlight_s *dlights; + + int numPolys; + struct srfPoly_s *polys; + + int numDrawSurfs; + struct drawSurf_s *drawSurfs; +} trRefdef_t; + + +//================================================================================= + +// max surfaces per-skin +// This is an arbitry limit. Vanilla Q3 only supported 32 surfaces in skins but failed to +// enforce the maximum limit when reading skin files. It was possile to use more than 32 +// surfaces which accessed out of bounds memory past end of skin->surfaces hunk block. +#define MAX_SKIN_SURFACES 256 + + +// skins allow models to be retextured without modifying the model file +typedef struct { + char name[MAX_QPATH]; + shader_t *shader; +} skinSurface_t; + +typedef struct skin_s { + char name[MAX_QPATH]; // game path, including extension + int numSurfaces; + skinSurface_t* pSurfaces; // dynamically allocated array of surfaces +} skin_t; + + +typedef struct { + int originalBrushNumber; + vec3_t bounds[2]; + + unsigned char colorRGBA[4]; // in packed byte format + float tcScale; // texture coordinate vector scales + fogParms_t parms; + + // for clipping distance in fog when outside + qboolean hasSurface; + float surface[4]; +} fog_t; + +typedef struct { + orientationr_t or; + orientationr_t world; + vec3_t pvsOrigin; // may be different than or.origin for portals + qboolean isPortal; // true if this view is through a portal + qboolean isMirror; // the portal is a mirror, invert the face culling +// cplane_t portalPlane; // clip anything behind this if mirroring + int viewportX, viewportY, viewportWidth, viewportHeight; + float fovX, fovY; + float projectionMatrix[16] QALIGN(16); + cplane_t frustum[4]; + vec3_t visBounds[2]; + float zFar; +} viewParms_t; + + +/* +============================================================================== + +SURFACES + +============================================================================== +*/ + +// any changes in surfaceType must be mirrored in rb_surfaceTable[] +typedef enum { + SF_BAD, + SF_SKIP, // ignore + SF_FACE, + SF_GRID, + SF_TRIANGLES, + SF_POLY, + SF_MD3, + SF_MDR, + SF_IQM, + SF_FLARE, + SF_ENTITY, // beams, rails, lightning, etc that can be determined by entity + + SF_NUM_SURFACE_TYPES, + SF_MAX = 0x7fffffff // ensures that sizeof( surfaceType_t ) == sizeof( int ) +} surfaceType_t; + +typedef struct drawSurf_s { + unsigned sort; // bit combination for fast compares + surfaceType_t * surface; // any of surface*_t +} drawSurf_t; + +#define MAX_FACE_POINTS 64 + +#define MAX_PATCH_SIZE 32 // max dimensions of a patch mesh in map file +#define MAX_GRID_SIZE 65 // max dimensions of a grid mesh in memory + +// when cgame directly specifies a polygon, it becomes a srfPoly_t +// as soon as it is called +typedef struct srfPoly_s { + surfaceType_t surfaceType; + qhandle_t hShader; + int fogIndex; + int numVerts; + polyVert_t *verts; +} srfPoly_t; + + +typedef struct srfFlare_s { + surfaceType_t surfaceType; + vec3_t origin; + vec3_t normal; + vec3_t color; +} srfFlare_t; + +typedef struct srfGridMesh_s { + surfaceType_t surfaceType; + + // dynamic lighting information + int dlightBits; + + // culling information + vec3_t meshBounds[2]; + vec3_t localOrigin; + float meshRadius; + + // lod information, which may be different + // than the culling information to allow for + // groups of curves that LOD as a unit + vec3_t lodOrigin; + float lodRadius; + int lodFixed; + int lodStitched; + + // vertexes + int width, height; + float *widthLodError; + float *heightLodError; + drawVert_t verts[1]; // variable sized +} srfGridMesh_t; + + + +#define VERTEXSIZE 8 +typedef struct { + surfaceType_t surfaceType; + cplane_t plane; + + // dynamic lighting information + int dlightBits; + + // triangle definitions (no normals at points) + int numPoints; + int numIndices; + int ofsIndices; + float points[1][VERTEXSIZE]; // variable sized + // there is a variable length list of indices here also +} srfSurfaceFace_t; + + +// misc_models in maps are turned into direct geometry by q3map +typedef struct { + surfaceType_t surfaceType; + + // dynamic lighting information + int dlightBits; + + // culling information (FIXME: use this!) + vec3_t bounds[2]; + vec3_t localOrigin; + float radius; + + // triangle definitions + int numIndexes; + int *indexes; + + int numVerts; + drawVert_t *verts; +} srfTriangles_t; + + + + +/* +============================================================================== + +BRUSH MODELS + +============================================================================== +*/ + + +// +// in memory representation +// + +#define SIDE_FRONT 0 +#define SIDE_BACK 1 +#define SIDE_ON 2 + +typedef struct msurface_s { + int viewCount; // if == tr.viewCount, already added + struct shader_s *shader; + int fogIndex; + + surfaceType_t *data; // any of srf*_t +} msurface_t; + + + +#define CONTENTS_NODE -1 +typedef struct mnode_s { + // common with leaf and node + int contents; // -1 for nodes, to differentiate from leafs + int visframe; // node needs to be traversed if current + vec3_t mins, maxs; // for bounding box culling + struct mnode_s *parent; + + // node specific + cplane_t *plane; + struct mnode_s *children[2]; + + // leaf specific + int cluster; + int area; + + msurface_t **firstmarksurface; + int nummarksurfaces; +} mnode_t; + +typedef struct { + vec3_t bounds[2]; // for culling + msurface_t *firstSurface; + int numSurfaces; +} bmodel_t; + +typedef struct { + char name[MAX_QPATH]; // ie: maps/tim_dm2.bsp + char baseName[MAX_QPATH]; // ie: tim_dm2 + + int dataSize; + + int numShaders; + dshader_t *shaders; + + bmodel_t *bmodels; + + int numplanes; + cplane_t *planes; + + int numnodes; // includes leafs + int numDecisionNodes; + mnode_t *nodes; + + int numsurfaces; + msurface_t *surfaces; + + int nummarksurfaces; + msurface_t **marksurfaces; + + int numfogs; + fog_t *fogs; + + vec3_t lightGridOrigin; + vec3_t lightGridSize; + vec3_t lightGridInverseSize; + int lightGridBounds[3]; + byte *lightGridData; + + + int numClusters; + int clusterBytes; + const unsigned char *vis; // may be passed in by CM_LoadMap to save space + + byte *novis; // clusterBytes of 0xff + + char *entityString; + char *entityParsePoint; +} world_t; + +//====================================================================== + + + +#define MAX_DRAWIMAGES 2048 +#define MAX_LIGHTMAPS 256 + +#define MAX_SKINS 1024 +#define MAX_DRAWSURFS 0x10000 +#define DRAWSURF_MASK (MAX_DRAWSURFS-1) + +/* + +the drawsurf sort data is packed into a single 32 bit value so it can be +compared quickly during the qsorting process + +the bits are allocated as follows: + +21 - 31 : sorted shader index +11 - 20 : entity index +2 - 6 : fog index +//2 : used to be clipped flag REMOVED - 03.21.00 rad +0 - 1 : dlightmap index + + TTimo - 1.32 +17-31 : sorted shader index +7-16 : entity index +2-6 : fog index +0-1 : dlightmap index +*/ +#define QSORT_FOGNUM_SHIFT 2 +#define QSORT_ENTITYNUM_SHIFT 7 +#define QSORT_SHADERNUM_SHIFT 17 + + + + +/* +** performanceCounters_t +*/ +typedef struct { + int c_sphere_cull_patch_in, c_sphere_cull_patch_clip, c_sphere_cull_patch_out; + int c_box_cull_patch_in, c_box_cull_patch_clip, c_box_cull_patch_out; + int c_sphere_cull_md3_in, c_sphere_cull_md3_clip, c_sphere_cull_md3_out; + int c_box_cull_md3_in, c_box_cull_md3_clip, c_box_cull_md3_out; + + int c_leafs; + int c_dlightSurfaces; + int c_dlightSurfacesCulled; +} frontEndCounters_t; + + +#define FUNCTABLE_SIZE 1024 +#define FUNCTABLE_SIZE2 10 +#define FUNCTABLE_MASK (FUNCTABLE_SIZE-1) + + + + +float R_NoiseGet4f( float x, float y, float z, float t ); +void R_NoiseInit( void ); + +void R_RenderView( viewParms_t *parms ); + +void R_AddMD3Surfaces( trRefEntity_t *e ); + +void R_AddPolygonSurfaces( void ); + +void R_DecomposeSort( unsigned sort, int *entityNum, shader_t **shader, + int *fogNum, int *dlightMap ); + +void R_AddDrawSurf( surfaceType_t *surface, shader_t *shader, int fogIndex, int dlightMap ); +void ScanAndLoadShaderFiles( void ); +shader_t * GeneratePermanentShader( void ); +qboolean ParseShader( char **text ); + + + +#define CULL_IN 0 // completely unclipped +#define CULL_CLIP 1 // clipped by one or more planes +#define CULL_OUT 2 // completely outside the clipping planes +//void R_LocalNormalToWorld (vec3_t local, vec3_t world); +//void R_LocalPointToWorld (vec3_t local, vec3_t world); +int R_CullLocalBox (vec3_t bounds[2]); +int R_CullPointAndRadius( vec3_t origin, float radius ); +int R_CullLocalPointAndRadius( vec3_t origin, float radius ); + +void R_RotateForEntity( const trRefEntity_t *ent, const viewParms_t *viewParms, orientationr_t *or ); + + +void R_InitScene(void); +void R_InitNextFrame(void); + + +void R_ImageList_f( void ); +void R_SkinList_f( void ); +// https://zerowing.idsoftware.com/bugzilla/show_bug.cgi?id=516 + + +void R_InitImages( void ); +void R_InitSkins( void ); +skin_t *R_GetSkinByHandle( qhandle_t hSkin ); + + +// +// tr_shader.c +// +// qhandle_t RE_RegisterShaderLightMap( const char *name, int lightmapIndex ); + + +shader_t* R_FindShader( const char *name, int lightmapIndex, qboolean mipRawImage ); +shader_t* R_GetShaderByHandle( qhandle_t hShader ); +//shader_t* R_FindShaderByName( const char *name ); + +void R_InitShaders( void ); +void R_ShaderList_f( void ); +void R_ClearShaderHashTable(void); +void R_SetTheShader( const char *name, int lightmapIndex ); +void R_UpdateShaderHashTable(shader_t* newShader); + +void R_SetDefaultShader( void ); +shader_t *FinishShader( void ); +void R_CreateDefaultShadingCmds(const char* name, image_t* image); + + + +/* +==================================================================== + +TESSELATOR/SHADER DECLARATIONS + +==================================================================== +*/ +typedef byte color4ub_t[4]; + +typedef struct stageVars +{ + color4ub_t colors[SHADER_MAX_VERTEXES]; + vec2_t texcoords[NUM_TEXTURE_BUNDLES][SHADER_MAX_VERTEXES]; +} stageVars_t; + +typedef struct shaderCommands_s +{ + unsigned int indexes[SHADER_MAX_INDEXES]; + vec4_t xyz[SHADER_MAX_VERTEXES]; + vec4_t normal[SHADER_MAX_VERTEXES]; + vec2_t texCoords[SHADER_MAX_VERTEXES][2]; + color4ub_t vertexColors[SHADER_MAX_VERTEXES]; + int vertexDlightBits[SHADER_MAX_VERTEXES]; + + stageVars_t svars; + + color4ub_t constantColor255[SHADER_MAX_VERTEXES]; + + shader_t *shader; + float shaderTime; + int fogNum; + + int dlightBits; // or together of all vertexDlightBits + + int numIndexes; + int numVertexes; + + // info extracted from current shader + int numPasses; + shaderStage_t **xstages; +} shaderCommands_t; + + +void RB_BeginSurface(shader_t *shader, int fogNum ); +void RB_EndSurface(void); +void RB_CheckOverflow( int verts, int indexes ); +#define RB_CHECKOVERFLOW(v,i) if (tess.numVertexes + (v) >= SHADER_MAX_VERTEXES || tess.numIndexes + (i) >= SHADER_MAX_INDEXES ) {RB_CheckOverflow(v,i);} + +void RB_StageIteratorGeneric( void ); +void RB_StageIteratorSky( void ); + +void RB_AddQuadStamp( vec3_t origin, vec3_t left, vec3_t up, byte *color ); +void RB_AddQuadStampExt( vec3_t origin, vec3_t left, vec3_t up, byte *color, float s1, float t1, float s2, float t2 ); + + + + +/* +============================================================ + +WORLD MAP + +============================================================ +*/ + +void R_AddBrushModelSurfaces( trRefEntity_t *e ); +void R_AddWorldSurfaces( void ); + + +/* +============================================================ + +SHADOWS + +============================================================ +*/ + +void RB_ShadowTessEnd( void ); +void RB_ShadowFinish( void ); +void RB_ProjectionShadowDeform( void ); + +/* +============================================================ + +SKIES + +============================================================ +*/ +void R_InitSkyTexCoords( float cloudLayerHeight ); + +/* +============================================================ + +CURVE TESSELATION + +============================================================ +*/ + +#define PATCH_STITCHING + +srfGridMesh_t *R_SubdividePatchToGrid( int width, int height, + drawVert_t points[MAX_PATCH_SIZE*MAX_PATCH_SIZE] ); +srfGridMesh_t *R_GridInsertColumn( srfGridMesh_t *grid, int column, int row, vec3_t point, float loderror ); +srfGridMesh_t *R_GridInsertRow( srfGridMesh_t *grid, int row, int column, vec3_t point, float loderror ); +void R_FreeSurfaceGridMesh( srfGridMesh_t *grid ); + + + +/* +============================================================= + +ANIMATED MODELS + +============================================================= +*/ +void R_MDRAddAnimSurfaces( trRefEntity_t *ent ); +void R_AddAnimSurfaces( trRefEntity_t *ent ); +void R_AddIQMSurfaces( trRefEntity_t *ent ); + +/* +============================================================= +============================================================= +*/ + + +void RB_DeformTessGeometry( void ); + +void RB_CalcEnvironmentTexCoords( float *dstTexCoords ); +void RB_CalcFogTexCoords( float *dstTexCoords ); +void RB_CalcScrollTexCoords( const float scroll[2], float *dstTexCoords ); +void RB_CalcRotateTexCoords( float rotSpeed, float *dstTexCoords ); +void RB_CalcScaleTexCoords( const float scale[2], float *dstTexCoords ); +void RB_CalcTurbulentTexCoords( const waveForm_t *wf, float *dstTexCoords ); +void RB_CalcTransformTexCoords( const texModInfo_t *tmi, float *dstTexCoords ); +void RB_CalcModulateColorsByFog( unsigned char *dstColors ); +void RB_CalcModulateAlphasByFog( unsigned char *dstColors ); +void RB_CalcModulateRGBAsByFog( unsigned char *dstColors ); +void RB_CalcWaveAlpha( const waveForm_t *wf, unsigned char *dstColors ); +void RB_CalcWaveColor( const waveForm_t *wf, unsigned char (*dstColors)[4] ); +void RB_CalcAlphaFromEntity( unsigned char *dstColors ); +void RB_CalcAlphaFromOneMinusEntity( unsigned char *dstColors ); +void RB_CalcStretchTexCoords( const waveForm_t *wf, float *texCoords ); +void RB_CalcColorFromEntity( unsigned char (*dstColors)[4] ); +void RB_CalcColorFromOneMinusEntity( unsigned char (*dstColors)[4] ); +void RB_CalcSpecularAlpha( unsigned char *alphas ); +void RB_CalcDiffuseColor( unsigned char (*colors)[4] ); + + + +/* +============================================================= + +RENDERER BACK END COMMAND QUEUE + +============================================================= +*/ + +#define MAX_RENDER_COMMANDS 0x40000 + +typedef struct { + byte cmds[MAX_RENDER_COMMANDS]; + int used; +} renderCommandList_t; + +typedef struct { + int commandId; + float color[4]; +} setColorCommand_t; + +typedef struct { + int commandId; +} drawBufferCommand_t; + + +typedef struct { + int commandId; +} swapBuffersCommand_t; + +typedef struct { + int commandId; +} endFrameCommand_t; + +typedef struct { + int commandId; + shader_t *shader; + float x, y; + float w, h; + float s1, t1; + float s2, t2; +} stretchPicCommand_t; + +typedef struct { + int commandId; + trRefdef_t refdef; + viewParms_t viewParms; + drawSurf_t *drawSurfs; + int numDrawSurfs; +} drawSurfsCommand_t; + + +typedef enum { + RC_END_OF_LIST, + RC_SET_COLOR, + RC_STRETCH_PIC, + RC_DRAW_SURFS, + RC_DRAW_BUFFER, + RC_SWAP_BUFFERS, + RC_SCREENSHOT, + RC_VIDEOFRAME +} renderCommand_t; + + + + + +/* +============================================================= + +RENDERER BACK END FUNCTIONS + +============================================================= +*/ + + +void *R_GetCommandBuffer( int bytes ); +void RB_ExecuteRenderCommands( const void *data ); + + +void R_IssueRenderCommands( qboolean runPerformanceCounters ); +void FixRenderCommandList( int newShader ); +void R_AddDrawSurfCmd( drawSurf_t *drawSurfs, int numDrawSurfs ); + + +/* +============================================================ + +SCENE GENERATION + +============================================================ +*/ + + +// font stuff +void R_InitFreeType(void); +void R_DoneFreeType(void); + + +extern void (*rb_surfaceTable[SF_NUM_SURFACE_TYPES])(void *); + +extern shaderCommands_t tess; + + + + +#endif //TR_LOCAL_H diff --git a/code/renderer_vulkan/tr_main.c b/code/renderer_vulkan/tr_main.c new file mode 100644 index 0000000000..785e015525 --- /dev/null +++ b/code/renderer_vulkan/tr_main.c @@ -0,0 +1,1312 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_main.c -- main control flow for each frame + +#include "tr_local.h" +#include "tr_globals.h" +#include "tr_cvar.h" +#include "tr_shader.h" + +#include "vk_shade_geometry.h" + +#include "vk_image.h" +#include "matrix_multiplication.h" +#include "ref_import.h" + +#include "R_PrintMat.h" +#include "R_PortalPlane.h" +#include "R_DebugGraphics.h" + + +// x: 1x3 +// x^T: 3x1 +// mat: 3x3 +// out: 1*3 +// out = x * mat = (mat^T * x^T)^T +static inline void R_LocalVecToWorld (const float in[3], const float mat[3][3], float out[3]) +{ + out[0] = in[0] * mat[0][0] + in[1] * mat[1][0] + in[2] * mat[2][0]; + out[1] = in[0] * mat[0][1] + in[1] * mat[1][1] + in[2] * mat[2][1]; + out[2] = in[0] * mat[0][2] + in[1] * mat[1][2] + in[2] * mat[2][2]; +} + + +/* +static void R_LocalNormalToWorld (const vec3_t local, const orientationr_t * const pRT, vec3_t world) +{ + world[0] = local[0] * pRT->axis[0][0] + local[1] * pRT->axis[1][0] + local[2] * pRT->axis[2][0]; + world[1] = local[0] * pRT->axis[0][1] + local[1] * pRT->axis[1][1] + local[2] * pRT->axis[2][1]; + world[2] = local[0] * pRT->axis[0][2] + local[1] * pRT->axis[1][2] + local[2] * pRT->axis[2][2]; +} +*/ + +// x: 1x3 +// x^T: 3x1 +// mat: 3x3 +// out: 1*3 +// out = (x * mat^T) = (mat * x^T)^T +static inline void R_WorldVectorToLocal (const float in[3], const float mat[3][3], float out[3]) +{ +// out[0] = DotProduct(in, mat[0]); +// out[1] = DotProduct(in, mat[1]); +// out[2] = DotProduct(in, mat[2]); + out[0] = in[0] * mat[0][0] + in[1] * mat[0][1] + in[2] * mat[0][2]; + out[1] = in[0] * mat[1][0] + in[1] * mat[1][1] + in[2] * mat[1][2]; + out[2] = in[0] * mat[2][0] + in[1] * mat[2][1] + in[2] * mat[2][2]; +} + + +static void R_WorldPointToLocal (const vec3_t world, const orientationr_t * const pRT, float out[3]) +{ + float delta[3]; + VectorSubtract( world, pRT->origin, delta ); + R_WorldVectorToLocal(delta, pRT->axis, out); +} + +/* +static void R_MirrorVector (vec3_t in, orientation_t *surface, orientation_t *camera, vec3_t out) +{ + int i; + + VectorClear( out ); + for ( i = 0 ; i < 3 ; i++ ) + { + float d = DotProduct(in, surface->axis[i]); + VectorMA( out, d, camera->axis[i], out ); + } +} +*/ + +static inline void R_MirrorVector (vec3_t in, orientation_t *surface, orientation_t *camera, vec3_t out) +{ + vec3_t local; + R_WorldVectorToLocal(in, surface->axis, local); + R_LocalVecToWorld(local, camera->axis, out); +} + +static void R_MirrorPoint (vec3_t in, orientation_t *surface, orientation_t *camera, vec3_t out) +{ + // ri.Printf(PRINT_ALL, "R_MirrorPoint\n"); + vec3_t vectmp; + VectorSubtract( in, surface->origin, vectmp ); + + // vec3_t transformed; + vec3_t local; + R_WorldVectorToLocal(vectmp, surface->axis, local); + R_LocalVecToWorld(local, camera->axis, vectmp); + VectorAdd( vectmp, camera->origin, out ); +} + + + +/* +================= +R_RotateForEntity + +Generates an orientation for an entity and viewParms +Does NOT produce any GL calls +Called by both the front end and the back end + +typedef struct { + float modelMatrix[16] QALIGN(16); + float axis[3][3]; // orientation in world + float origin[3]; // in world coordinates + float viewOrigin[3]; // viewParms->or.origin in local coordinates +} orientationr_t; + +================= +*/ +void R_RotateForEntity(const trRefEntity_t* const ent, const viewParms_t* const viewParms, orientationr_t* const or) +{ + + if ( ent->e.reType != RT_MODEL ) + { + *or = viewParms->world; + return; + } + + //VectorCopy( ent->e.origin, or->origin ); + //VectorCopy( ent->e.axis[0], or->axis[0] ); + //VectorCopy( ent->e.axis[1], or->axis[1] ); + //VectorCopy( ent->e.axis[2], or->axis[2] ); + memcpy(or->origin, ent->e.origin, 12); + memcpy(or->axis, ent->e.axis, 36); + + float glMatrix[16] QALIGN(16); + + glMatrix[0] = or->axis[0][0]; + glMatrix[1] = or->axis[0][1]; + glMatrix[2] = or->axis[0][2]; + glMatrix[3] = 0; + + glMatrix[4] = or->axis[1][0]; + glMatrix[5] = or->axis[1][1]; + glMatrix[6] = or->axis[1][2]; + glMatrix[7] = 0; + + glMatrix[8] = or->axis[2][0]; + glMatrix[9] = or->axis[2][1]; + glMatrix[10] = or->axis[2][2]; + glMatrix[11] = 0; + + glMatrix[12] = or->origin[0]; + glMatrix[13] = or->origin[1]; + glMatrix[14] = or->origin[2]; + glMatrix[15] = 1; + + MatrixMultiply4x4_SSE( glMatrix, viewParms->world.modelMatrix, or->modelMatrix ); + + // calculate the viewer origin in the model's space + // needed for fog, specular, and environment mapping + + R_WorldPointToLocal(viewParms->or.origin, or, or->viewOrigin); + + if ( ent->e.nonNormalizedAxes ) + { + if ( ent->e.nonNormalizedAxes ) + { + const float * v = ent->e.axis[0]; + float axisLength = v[0] * v[0] + v[0] * v[0] + v[2] * v[2]; + if ( axisLength ) { + axisLength = 1.0f / sqrtf(axisLength); + } + + or->viewOrigin[0] *= axisLength; + or->viewOrigin[1] *= axisLength; + or->viewOrigin[2] *= axisLength; + } + } +/* + vec3_t delta; + + VectorSubtract( viewParms->or.origin, or->origin, delta ); + + R_WorldVectorToLocal(delta, or->axis, or->viewOrigin); + + // compensate for scale in the axes if necessary + float axisLength = 1.0f; + if ( ent->e.nonNormalizedAxes ) + { + axisLength = VectorLength( ent->e.axis[0] ); + if ( axisLength ) { + axisLength = 1.0f / axisLength; + } + } + + or->viewOrigin[0] = DotProduct( delta, or->axis[0] ) * axisLength; + or->viewOrigin[1] = DotProduct( delta, or->axis[1] ) * axisLength; + or->viewOrigin[2] = DotProduct( delta, or->axis[2] ) * axisLength; + +*/ + // printMat1x3f("viewOrigin", or->viewOrigin); + // printMat4x4f("modelMatrix", or->modelMatrix); + +} + +/* +================= +typedef struct { + float modelMatrix[16] QALIGN(16); + float axis[3][3]; // orientation in world + float origin[3]; // in world coordinates + float viewOrigin[3]; // viewParms->or.origin in local coordinates +} orientationr_t; + + +Sets up the modelview matrix for a given viewParm + +IN: tr.viewParms +OUT: tr.or +================= +*/ +static void R_RotateForViewer ( viewParms_t * const pViewParams, orientationr_t * const pEntityPose) +{ + //const viewParms_t * const pViewParams = &tr.viewParms; + // for current entity + // orientationr_t * const pEntityPose = &tr.or; + + const static float s_flipMatrix[16] QALIGN(16) = { + // convert from our coordinate system (looking down X) + // to OpenGL's coordinate system (looking down -Z) + 0, 0, -1, 0, + -1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 0, 1 + }; + + + float o0, o1, o2; + + pEntityPose->origin[0] = pEntityPose->origin[1] = pEntityPose->origin[2] = 0; + Mat3x3Identity(pEntityPose->axis); + + + // transform by the camera placement + // VectorCopy( tr.viewParms.or.origin, tr.or.viewOrigin ); + // VectorCopy( tr.viewParms.or.origin, origin ); + + pEntityPose->viewOrigin[0] = o0 = pViewParams->or.origin[0]; + pEntityPose->viewOrigin[1] = o1 = pViewParams->or.origin[1]; + pEntityPose->viewOrigin[2] = o2 = pViewParams->or.origin[2]; + + + float viewerMatrix[16] QALIGN(16); + viewerMatrix[0] = pViewParams->or.axis[0][0]; + viewerMatrix[1] = pViewParams->or.axis[1][0]; + viewerMatrix[2] = pViewParams->or.axis[2][0]; + viewerMatrix[3] = 0; + + viewerMatrix[4] = pViewParams->or.axis[0][1]; + viewerMatrix[5] = pViewParams->or.axis[1][1]; + viewerMatrix[6] = pViewParams->or.axis[2][1]; + viewerMatrix[7] = 0; + + viewerMatrix[8] = pViewParams->or.axis[0][2]; + viewerMatrix[9] = pViewParams->or.axis[1][2]; + viewerMatrix[10] = pViewParams->or.axis[2][2]; + viewerMatrix[11] = 0; + + viewerMatrix[12] = - o0 * viewerMatrix[0] - o1 * viewerMatrix[4] - o2 * viewerMatrix[8]; + viewerMatrix[13] = - o0 * viewerMatrix[1] - o1 * viewerMatrix[5] - o2 * viewerMatrix[9]; + viewerMatrix[14] = - o0 * viewerMatrix[2] - o1 * viewerMatrix[6] - o2 * viewerMatrix[10]; + viewerMatrix[15] = 1; + + // convert from our coordinate system (looking down X) + // to OpenGL's coordinate system (looking down -Z) + MatrixMultiply4x4_SSE( viewerMatrix, s_flipMatrix, pEntityPose->modelMatrix ); + + pViewParams->world = *pEntityPose; +} + + +/* +================= +Setup that culling frustum planes for the current view +================= +*/ +static void R_SetupFrustum (viewParms_t * const pViewParams) +{ + + { + float ang = pViewParams->fovX * (float)(M_PI / 360.0f); + float xs = sin( ang ); + float xc = cos( ang ); + + float temp1[3]; + float temp2[3]; + + VectorScale( pViewParams->or.axis[0], xs, temp1 ); + VectorScale( pViewParams->or.axis[1], xc, temp2); + + VectorAdd(temp1, temp2, pViewParams->frustum[0].normal); + pViewParams->frustum[0].dist = DotProduct (pViewParams->or.origin, pViewParams->frustum[0].normal); + pViewParams->frustum[0].type = PLANE_NON_AXIAL; + + VectorSubtract(temp1, temp2, pViewParams->frustum[1].normal); + pViewParams->frustum[1].dist = DotProduct (pViewParams->or.origin, pViewParams->frustum[1].normal); + pViewParams->frustum[1].type = PLANE_NON_AXIAL; + } + + + { + float ang = pViewParams->fovY * (float)(M_PI / 360.0f); + float xs = sin( ang ); + float xc = cos( ang ); + float temp1[3]; + float temp2[3]; + + VectorScale( pViewParams->or.axis[0], xs, temp1); + VectorScale( pViewParams->or.axis[2], xc, temp2); + + VectorAdd(temp1, temp2, pViewParams->frustum[2].normal); + pViewParams->frustum[2].dist = DotProduct (pViewParams->or.origin, pViewParams->frustum[2].normal); + pViewParams->frustum[2].type = PLANE_NON_AXIAL; + + VectorSubtract(temp1, temp2, pViewParams->frustum[3].normal); + pViewParams->frustum[3].dist = DotProduct (pViewParams->or.origin, pViewParams->frustum[3].normal); + pViewParams->frustum[3].type = PLANE_NON_AXIAL; + } + + + uint32_t i = 0; + for (i=0; i < 4; i++) + { + // SetPlaneSignbits( &pViewParams->frustum[i] ); + // cplane_t* out = &pViewParams->frustum[i]; + int bits = 0, j; + + // for fast box on planeside test + + for (j=0 ; j<3 ; j++) + { + if (pViewParams->frustum[i].normal[j] < 0) { + bits |= 1<frustum[i].signbits = bits; + } +} + + +/* +============= +R_PlaneForSurface +============= +*/ +void R_PlaneForSurface (surfaceType_t *surfType, cplane_t *plane) +{ + srfTriangles_t *tri; + srfPoly_t *poly; + drawVert_t *v1, *v2, *v3; + vec4_t plane4; + + if (!surfType) { + memset (plane, 0, sizeof(*plane)); + plane->normal[0] = 1; + return; + } + switch (*surfType) + { + case SF_FACE: + *plane = ((srfSurfaceFace_t *)surfType)->plane; + return; + case SF_TRIANGLES: + tri = (srfTriangles_t *)surfType; + v1 = tri->verts + tri->indexes[0]; + v2 = tri->verts + tri->indexes[1]; + v3 = tri->verts + tri->indexes[2]; + PlaneFromPoints( plane4, v1->xyz, v2->xyz, v3->xyz ); + VectorCopy( plane4, plane->normal ); + plane->dist = plane4[3]; + return; + case SF_POLY: + poly = (srfPoly_t *)surfType; + PlaneFromPoints( plane4, poly->verts[0].xyz, poly->verts[1].xyz, poly->verts[2].xyz ); + VectorCopy( plane4, plane->normal ); + plane->dist = plane4[3]; + return; + default: + memset (plane, 0, sizeof(*plane)); + plane->normal[0] = 1; + return; + } +} + +/* +================= +entityNum is the entity that the portal surface is a part of, which may +be moving and rotating. + +Returns qtrue if it should be mirrored +================= +*/ +static qboolean R_GetPortalOrientations( drawSurf_t *drawSurf, int entityNum, + orientation_t *surface, orientation_t *camera, + vec3_t pvsOrigin, qboolean *mirror ) +{ + int i; + cplane_t originalPlane, plane; + vec3_t transformed; + + // create plane axis for the portal we are seeing + R_PlaneForSurface( drawSurf->surface, &originalPlane ); + + // rotate the plane if necessary + if ( entityNum != REFENTITYNUM_WORLD ) { + tr.currentEntityNum = entityNum; + tr.currentEntity = &tr.refdef.entities[entityNum]; + + // get the orientation of the entity + R_RotateForEntity( tr.currentEntity, &tr.viewParms, &tr.or ); + + // rotate the plane, but keep the non-rotated version for matching + // against the portalSurface entities + // R_LocalNormalToWorld( originalPlane.normal, &tr.or, plane.normal ); + R_LocalVecToWorld(originalPlane.normal, tr.or.axis, plane.normal); + plane.dist = originalPlane.dist + DotProduct( plane.normal, tr.or.origin ); + + // translate the original plane + originalPlane.dist = originalPlane.dist + DotProduct( originalPlane.normal, tr.or.origin ); + } else { + plane = originalPlane; + } + + VectorCopy( plane.normal, surface->axis[0] ); + //VectorPerp( plane.normal, surface->axis[1] ); + PerpendicularVector( surface->axis[1], surface->axis[0] ); + CrossProduct( surface->axis[0], surface->axis[1], surface->axis[2] ); + + // locate the portal entity closest to this plane. + // origin will be the origin of the portal, origin2 will be + // the origin of the camera + for ( i = 0 ; i < tr.refdef.num_entities ; i++ ) + { + trRefEntity_t* e = &tr.refdef.entities[i]; + if ( e->e.reType != RT_PORTALSURFACE ) { + continue; + } + + float d = DotProduct( e->e.origin, originalPlane.normal ) - originalPlane.dist; + if ( d > 64 || d < -64) { + continue; + } + + // get the pvsOrigin from the entity + VectorCopy( e->e.oldorigin, pvsOrigin ); + + // if the entity is just a mirror, don't use as a camera point + if ( e->e.oldorigin[0] == e->e.origin[0] && + e->e.oldorigin[1] == e->e.origin[1] && + e->e.oldorigin[2] == e->e.origin[2] ) { + VectorScale( plane.normal, plane.dist, surface->origin ); + VectorCopy( surface->origin, camera->origin ); + VectorSubtract( vec3_origin, surface->axis[0], camera->axis[0] ); + VectorCopy( surface->axis[1], camera->axis[1] ); + VectorCopy( surface->axis[2], camera->axis[2] ); + + *mirror = qtrue; + return qtrue; + } + + // project the origin onto the surface plane to get + // an origin point we can rotate around + d = DotProduct( e->e.origin, plane.normal ) - plane.dist; + VectorMA( e->e.origin, -d, surface->axis[0], surface->origin ); + + // now get the camera origin and orientation + VectorCopy( e->e.oldorigin, camera->origin ); + memcpy(camera->axis, e->e.axis, 36); + VectorSubtract( vec3_origin, camera->axis[0], camera->axis[0] ); + VectorSubtract( vec3_origin, camera->axis[1], camera->axis[1] ); + + // optionally rotate + if ( e->e.oldframe ) { + // if a speed is specified + if ( e->e.frame ) { + // continuous rotate + d = (tr.refdef.rd.time/1000.0f) * e->e.frame; + VectorCopy( camera->axis[1], transformed ); + RotatePointAroundVector( camera->axis[1], camera->axis[0], transformed, d ); + CrossProduct( camera->axis[0], camera->axis[1], camera->axis[2] ); + } else { + // bobbing rotate, with skinNum being the rotation offset + d = sin( tr.refdef.rd.time * 0.003f ); + d = e->e.skinNum + d * 4; + VectorCopy( camera->axis[1], transformed ); + RotatePointAroundVector( camera->axis[1], camera->axis[0], transformed, d ); + CrossProduct( camera->axis[0], camera->axis[1], camera->axis[2] ); + } + } + else if ( e->e.skinNum ) { + d = e->e.skinNum; + VectorCopy( camera->axis[1], transformed ); + RotatePointAroundVector( camera->axis[1], camera->axis[0], transformed, d ); + CrossProduct( camera->axis[0], camera->axis[1], camera->axis[2] ); + } + *mirror = qfalse; + return qtrue; + } + + // if we didn't locate a portal entity, don't render anything. + // We don't want to just treat it as a mirror, because without a + // portal entity the server won't have communicated a proper entity set + // in the snapshot + + // unfortunately, with local movement prediction it is easily possible + // to see a surface before the server has communicated the matching + // portal surface entity, so we don't want to print anything here... + + //ri.Printf( PRINT_ALL, "Portal surface without a portal entity\n" ); + + return qfalse; +} + + + +static qboolean IsMirror( const drawSurf_t *drawSurf, int entityNum ) +{ + int i; + cplane_t originalPlane, plane; + + // create plane axis for the portal we are seeing + R_PlaneForSurface( drawSurf->surface, &originalPlane ); + + // rotate the plane if necessary + if ( entityNum != REFENTITYNUM_WORLD ) + { + tr.currentEntityNum = entityNum; + tr.currentEntity = &tr.refdef.entities[entityNum]; + + // get the orientation of the entity + R_RotateForEntity( tr.currentEntity, &tr.viewParms, &tr.or ); + + // rotate the plane, but keep the non-rotated version for matching + // against the portalSurface entities + // R_LocalNormalToWorld( originalPlane.normal, &tr.or, plane.normal ); + R_LocalVecToWorld(originalPlane.normal, tr.or.axis, plane.normal); + plane.dist = originalPlane.dist + DotProduct( plane.normal, tr.or.origin ); + + // translate the original plane + originalPlane.dist = originalPlane.dist + DotProduct( originalPlane.normal, tr.or.origin ); + } + else + { + plane = originalPlane; + } + + // locate the portal entity closest to this plane. + // origin will be the origin of the portal, origin2 will be + // the origin of the camera + for ( i = 0 ; i < tr.refdef.num_entities ; i++ ) + { + trRefEntity_t* e = &tr.refdef.entities[i]; + if ( e->e.reType != RT_PORTALSURFACE ) { + continue; + } + + float d = DotProduct( e->e.origin, originalPlane.normal ) - originalPlane.dist; + if ( d > 64 || d < -64) { + continue; + } + + // if the entity is just a mirror, don't use as a camera point + if ( e->e.oldorigin[0] == e->e.origin[0] && + e->e.oldorigin[1] == e->e.origin[1] && + e->e.oldorigin[2] == e->e.origin[2] ) + { + return qtrue; + } + + return qfalse; + } + return qfalse; +} + + +/* +** SurfIsOffscreen +** +** Determines if a surface is completely offscreen. +*/ +static qboolean SurfIsOffscreen( const drawSurf_t *drawSurf, vec4_t clipDest[128] ) +{ + float shortest = 100000000; + int entityNum; + int numTriangles; + shader_t *shader; + int fogNum; + int dlighted; + vec4_t clip; + int i; + unsigned int pointOr = 0; + unsigned int pointAnd = (unsigned int)~0; + (void)pointOr; // accumulated but only pointAnd is checked + + R_RotateForViewer(&tr.viewParms, &tr.or); + + R_DecomposeSort( drawSurf->sort, &entityNum, &shader, &fogNum, &dlighted ); + RB_BeginSurface( shader, fogNum ); + rb_surfaceTable[ *drawSurf->surface ]( drawSurf->surface ); + + assert( tess.numVertexes < 128 ); + + for ( i = 0; i < tess.numVertexes; i++ ) + { + int j; + unsigned int pointFlags = 0; + + TransformModelToClip_SSE(tess.xyz[i], tr.or.modelMatrix, tr.viewParms.projectionMatrix, clip); + for ( j = 0; j < 3; j++ ) + { + if ( clip[j] >= clip[3] ) + { + pointFlags |= (1 << (j*2)); + } + else if ( clip[j] <= -clip[3] ) + { + pointFlags |= ( 1 << (j*2+1)); + } + } + pointAnd &= pointFlags; + pointOr |= pointFlags; + } + + // trivially reject + if ( pointAnd ) + { + tess.numIndexes = 0; + return qtrue; + } + + // determine if this surface is backfaced and also determine the distance + // to the nearest vertex so we can cull based on portal range. Culling + // based on vertex distance isn't 100% correct (we should be checking for + // range to the surface), but it's good enough for the types of portals + // we have in the game right now. + numTriangles = tess.numIndexes / 3; + + for ( i = 0; i < tess.numIndexes; i += 3 ) + { + vec3_t normal; + float len; + + VectorSubtract( tess.xyz[tess.indexes[i]], tr.viewParms.or.origin, normal ); + + len = VectorLengthSquared( normal ); // lose the sqrt + if ( len < shortest ) + { + shortest = len; + } + + if ( DotProduct( normal, tess.normal[tess.indexes[i]] ) >= 0 ) + { + numTriangles--; + } + } + tess.numIndexes = 0; + if ( !numTriangles ) + { + return qtrue; + } + + // mirrors can early out at this point, since we don't do a fade over distance + // with them (although we could) + if ( IsMirror( drawSurf, entityNum ) ) + { + return qfalse; + } + + if ( shortest > (tess.shader->portalRange*tess.shader->portalRange) ) + { + return qtrue; + } + + return qfalse; +} + +/* +======================== +R_MirrorViewBySurface + +Returns qtrue if another view has been rendered +======================== +*/ +static qboolean R_MirrorViewBySurface (drawSurf_t *drawSurf, int entityNum) +{ + vec4_t clipDest[128]; + orientation_t surface, camera; + + // don't recursively mirror + if (tr.viewParms.isPortal) { + ri.Printf( PRINT_DEVELOPER, "WARNING: recursive mirror/portal found\n" ); + return qfalse; + } + + if ( r_noportals->integer) { + return qfalse; + } + + // trivially reject portal/mirror + if ( SurfIsOffscreen( drawSurf, clipDest ) ) { + //ri.Printf(PRINT_ALL, "isSurfOffscreen: 1\n"); + return qfalse; + } + + // save old viewParms so we can return to it after the mirror view + viewParms_t oldParms = tr.viewParms; + + viewParms_t newParms = tr.viewParms; + newParms.isPortal = qtrue; + + if ( !R_GetPortalOrientations( drawSurf, entityNum, &surface, &camera, + newParms.pvsOrigin, &newParms.isMirror ) ) + { + return qfalse; // bad portal, no portalentity + } + + R_MirrorPoint (oldParms.or.origin, &surface, &camera, newParms.or.origin ); + + // VectorSubtract( vec3_origin, camera.axis[0], newParms.portalPlane.normal ); + // newParms.portalPlane.dist = DotProduct( camera.origin, newParms.portalPlane.normal ); + R_SetupPortalPlane(camera.axis, camera.origin); + + R_MirrorVector (oldParms.or.axis[0], &surface, &camera, newParms.or.axis[0]); + R_MirrorVector (oldParms.or.axis[1], &surface, &camera, newParms.or.axis[1]); + R_MirrorVector (oldParms.or.axis[2], &surface, &camera, newParms.or.axis[2]); + + // OPTIMIZE: restrict the viewport on the mirrored view + // render the mirror view + R_RenderView (&newParms); + + tr.viewParms = oldParms; + + return qtrue; +} + +/* +================= +R_SpriteFogNum + +See if a sprite is inside a fog volume +================= +*/ +int R_SpriteFogNum( trRefEntity_t *ent ) { + int i, j; + fog_t *fog; + + if ( tr.refdef.rd.rdflags & RDF_NOWORLDMODEL ) { + return 0; + } + + for ( i = 1 ; i < tr.world->numfogs ; i++ ) { + fog = &tr.world->fogs[i]; + for ( j = 0 ; j < 3 ; j++ ) { + if ( ent->e.origin[j] - ent->e.radius >= fog->bounds[1][j] ) { + break; + } + if ( ent->e.origin[j] + ent->e.radius <= fog->bounds[0][j] ) { + break; + } + } + if ( j == 3 ) { + return i; + } + } + + return 0; +} + +/* +========================================================================================== + +DRAWSURF SORTING + +========================================================================================== +*/ + +/* +================= +qsort replacement + +================= +*/ +void SWAP_DRAW_SURF(void* a, void* b) +{ + char buf[sizeof(drawSurf_t)]; + memcpy(buf, a, sizeof(drawSurf_t)); + memcpy(a, b, sizeof(drawSurf_t)); + memcpy(b, buf, sizeof(drawSurf_t)); +} + + +/* this parameter defines the cutoff between using quick sort and + insertion sort for arrays; arrays with lengths shorter or equal to the + below value use insertion sort */ + +#define CUTOFF 8 /* testing shows that this is good value */ + +static void shortsort( drawSurf_t *lo, drawSurf_t *hi ) +{ + drawSurf_t *p, *max; + + while (hi > lo) { + max = lo; + for (p = lo + 1; p <= hi; p++ ) { + if ( p->sort > max->sort ) { + max = p; + } + } + SWAP_DRAW_SURF(max, hi); + hi--; + } +} + + +/* sort the array between lo and hi (inclusive) +FIXME: this was lifted and modified from the microsoft lib source... + */ + +void qsortFast ( + void *base, + unsigned num, + unsigned width + ) +{ + char *lo, *hi; /* ends of sub-array currently sorting */ + char *mid; /* points to middle of subarray */ + char *loguy, *higuy; /* traveling pointers for partition step */ + unsigned size; /* size of the sub-array */ + char *lostk[30], *histk[30]; + int stkptr; /* stack for saving sub-array to be processed */ + + /* Note: the number of stack entries required is no more than + 1 + log2(size), so 30 is sufficient for any array */ + + if (num < 2 || width == 0) + return; /* nothing to do */ + + stkptr = 0; /* initialize stack */ + + lo = (char*) base; + hi = (char *)base + width * (num-1); /* initialize limits */ + + /* this entry point is for pseudo-recursion calling: setting + lo and hi and jumping to here is like recursion, but stkptr is + prserved, locals aren't, so we preserve stuff on the stack */ +recurse: + + size = (hi - lo) / width + 1; /* number of el's to sort */ + + /* below a certain size, it is faster to use a O(n^2) sorting method */ + if (size <= CUTOFF) { + shortsort((drawSurf_t *)lo, (drawSurf_t *)hi); + } + else { + /* First we pick a partititioning element. The efficiency of the + algorithm demands that we find one that is approximately the + median of the values, but also that we select one fast. Using + the first one produces bad performace if the array is already + sorted, so we use the middle one, which would require a very + wierdly arranged array for worst case performance. Testing shows + that a median-of-three algorithm does not, in general, increase + performance. */ + + mid = lo + (size / 2) * width; /* find middle element */ + SWAP_DRAW_SURF(mid, lo); /* swap it to beginning of array */ + + /* We now wish to partition the array into three pieces, one + consisiting of elements <= partition element, one of elements + equal to the parition element, and one of element >= to it. This + is done below; comments indicate conditions established at every + step. */ + + loguy = lo; + higuy = hi + width; + + /* Note that higuy decreases and loguy increases on every iteration, + so loop must terminate. */ + for (;;) { + /* lo <= loguy < hi, lo < higuy <= hi + 1, + A[i] <= A[lo] for lo <= i <= loguy, + A[i] >= A[lo] for higuy <= i <= hi */ + + do { + loguy += width; + } while (loguy <= hi && + ( ((drawSurf_t *)loguy)->sort <= ((drawSurf_t *)lo)->sort ) ); + + /* lo < loguy <= hi+1, A[i] <= A[lo] for lo <= i < loguy, + either loguy > hi or A[loguy] > A[lo] */ + + do { + higuy -= width; + } while (higuy > lo && + ( ((drawSurf_t *)higuy)->sort >= ((drawSurf_t *)lo)->sort ) ); + + /* lo-1 <= higuy <= hi, A[i] >= A[lo] for higuy < i <= hi, + either higuy <= lo or A[higuy] < A[lo] */ + + if (higuy < loguy) + break; + + /* if loguy > hi or higuy <= lo, then we would have exited, so + A[loguy] > A[lo], A[higuy] < A[lo], + loguy < hi, highy > lo */ + + SWAP_DRAW_SURF(loguy, higuy); + + /* A[loguy] < A[lo], A[higuy] > A[lo]; so condition at top + of loop is re-established */ + } + + /* A[i] >= A[lo] for higuy < i <= hi, + A[i] <= A[lo] for lo <= i < loguy, + higuy < loguy, lo <= higuy <= hi + implying: + A[i] >= A[lo] for loguy <= i <= hi, + A[i] <= A[lo] for lo <= i <= higuy, + A[i] = A[lo] for higuy < i < loguy */ + + SWAP_DRAW_SURF(lo, higuy); /* put partition element in place */ + + /* OK, now we have the following: + A[i] >= A[higuy] for loguy <= i <= hi, + A[i] <= A[higuy] for lo <= i < higuy + A[i] = A[lo] for higuy <= i < loguy */ + + /* We've finished the partition, now we want to sort the subarrays + [lo, higuy-1] and [loguy, hi]. + We do the smaller one first to minimize stack usage. + We only sort arrays of length 2 or more.*/ + + if ( higuy - 1 - lo >= hi - loguy ) { + if (lo + width < higuy) { + lostk[stkptr] = lo; + histk[stkptr] = higuy - width; + ++stkptr; + } /* save big recursion for later */ + + if (loguy < hi) { + lo = loguy; + goto recurse; /* do small recursion */ + } + } + else { + if (loguy < hi) { + lostk[stkptr] = loguy; + histk[stkptr] = hi; + ++stkptr; /* save big recursion for later */ + } + + if (lo + width < higuy) { + hi = higuy - width; + goto recurse; /* do small recursion */ + } + } + } + + /* We have sorted the array, except for any pending sorts on the stack. + Check if there are any, and do them. */ + + --stkptr; + if (stkptr >= 0) { + lo = lostk[stkptr]; + hi = histk[stkptr]; + goto recurse; /* pop subarray from stack */ + } + else + return; /* all subarrays done */ +} + + +//========================================================================================== +/* +================= +R_AddDrawSurf +================= +*/ +void R_AddDrawSurf( surfaceType_t *surface, shader_t *shader, int fogIndex, int dlightMap ) +{ + // instead of checking for overflow, we just mask the index so it wraps around + int index = tr.refdef.numDrawSurfs & DRAWSURF_MASK; + // the sort data is packed into a single 32 bit value so it can be + // compared quickly during the qsorting process + tr.refdef.drawSurfs[index].sort = (shader->sortedIndex << QSORT_SHADERNUM_SHIFT) + | tr.shiftedEntityNum | ( fogIndex << QSORT_FOGNUM_SHIFT ) | (int)dlightMap; + tr.refdef.drawSurfs[index].surface = surface; + tr.refdef.numDrawSurfs++; +} + +/* +================= +R_DecomposeSort +================= +*/ +void R_DecomposeSort( unsigned sort, int *entityNum, shader_t **shader, + int *fogNum, int *dlightMap ) { + *fogNum = ( sort >> QSORT_FOGNUM_SHIFT ) & 31; + *shader = tr.sortedShaders[ ( sort >> QSORT_SHADERNUM_SHIFT ) & (MAX_SHADERS-1) ]; + *entityNum = ( sort >> QSORT_ENTITYNUM_SHIFT ) & 1023; + *dlightMap = sort & 3; +} + + +static void R_SortDrawSurfs( drawSurf_t *drawSurfs, int numDrawSurfs ) +{ + shader_t *shader; + int fogNum; + int entityNum; + int dlighted; + int i; + + // it is possible for some views to not have any surfaces + if ( numDrawSurfs < 1 ) { + // we still need to add it for hyperspace cases + R_AddDrawSurfCmd( drawSurfs, numDrawSurfs ); + return; + } + + // if we overflowed MAX_DRAWSURFS, the drawsurfs + // wrapped around in the buffer and we will be missing + // the first surfaces, not the last ones + if ( numDrawSurfs > MAX_DRAWSURFS ) { + numDrawSurfs = MAX_DRAWSURFS; + ri.Printf(PRINT_WARNING, " numDrawSurfs overflowed. \n"); + + } + + // sort the drawsurfs by sort type, then orientation, then shader + qsortFast (drawSurfs, numDrawSurfs, sizeof(drawSurf_t) ); + + // check for any pass through drawing, which + // may cause another view to be rendered first + for ( i = 0 ; i < numDrawSurfs ; i++ ) + { + R_DecomposeSort( (drawSurfs+i)->sort, &entityNum, &shader, &fogNum, &dlighted ); + + if ( shader->sort > SS_PORTAL ) { + break; + } + + // no shader should ever have this sort type + if ( shader->sort == SS_BAD ) { + ri.Error (ERR_DROP, "Shader '%s'with sort == SS_BAD", shader->name ); + } + + // if the mirror was completely clipped away, we may need to check another surface + if ( R_MirrorViewBySurface( (drawSurfs+i), entityNum) ) { + // this is a debug option to see exactly what is being mirrored + if ( r_portalOnly->integer ) { + return; + } + break; // only one mirror view at a time + } + } + + R_AddDrawSurfCmd( drawSurfs, numDrawSurfs ); +} + + + +void R_AddEntitySurfaces (viewParms_t * const pViewParam) +{ + // entities that will have procedurally generated surfaces will just + // point at this for their sorting surface + static surfaceType_t entitySurface = SF_ENTITY; + if ( !r_drawentities->integer ) { + return; + } + + for ( tr.currentEntityNum = 0; + tr.currentEntityNum < tr.refdef.num_entities; + tr.currentEntityNum++ ) + { + shader_t* shader; + + trRefEntity_t* ent = tr.currentEntity = &tr.refdef.entities[tr.currentEntityNum]; + + ent->needDlights = qfalse; + + // preshift the value we are going to OR into the drawsurf sort + tr.shiftedEntityNum = tr.currentEntityNum << QSORT_ENTITYNUM_SHIFT; + + // + // the weapon model must be handled special -- + // we don't want the hacked weapon position showing in + // mirrors, because the true body position will already be drawn + // + if ( (ent->e.renderfx & RF_FIRST_PERSON) && pViewParam->isPortal) + { + continue; + } + + // simple generated models, like sprites and beams, are not culled + switch ( ent->e.reType ) + { + case RT_PORTALSURFACE: + break; // don't draw anything + case RT_SPRITE: + case RT_BEAM: + case RT_LIGHTNING: + case RT_RAIL_CORE: + case RT_RAIL_RINGS: + // self blood sprites, talk balloons, etc should not be drawn in the primary + // view. We can't just do this check for all entities, because md3 + // entities may still want to cast shadows from them + if ( (ent->e.renderfx & RF_THIRD_PERSON) && !pViewParam->isPortal) + { + continue; + } + shader = R_GetShaderByHandle( ent->e.customShader ); + R_AddDrawSurf( &entitySurface, shader, R_SpriteFogNum( ent ), 0 ); + break; + + case RT_MODEL: + // we must set up parts of tr.or for model culling + R_RotateForEntity( ent, pViewParam, &tr.or ); + + tr.currentModel = R_GetModelByHandle( ent->e.hModel ); + if (!tr.currentModel) + { + R_AddDrawSurf( &entitySurface, tr.defaultShader, 0, 0 ); + } + else + { + switch ( tr.currentModel->type ) + { + case MOD_MESH: + R_AddMD3Surfaces( ent ); + break; + case MOD_MDR: + R_MDRAddAnimSurfaces( ent ); + break; + case MOD_IQM: + R_AddIQMSurfaces( ent ); + case MOD_BRUSH: + R_AddBrushModelSurfaces( ent ); + break; + case MOD_BAD: // null model axis + if ( (ent->e.renderfx & RF_THIRD_PERSON) && !pViewParam->isPortal) { + break; + } + shader = R_GetShaderByHandle( ent->e.customShader ); + R_AddDrawSurf( &entitySurface, tr.defaultShader, 0, 0 ); + break; + default: + ri.Error( ERR_DROP, "Add entity surfaces: Bad modeltype" ); + break; + } + } + break; + default: + ri.Error( ERR_DROP, "Add entity surfaces: Bad reType" ); + } + } +} + + +static void R_SetupProjection( viewParms_t * const pViewParams) +{ + float zFar; + + // set the projection matrix with the minimum zfar + // now that we have the world bounded + // this needs to be done before entities are added, + // because they use the projection matrix for lod calculation + + // dynamically compute far clip plane distance + // if not rendering the world (icons, menus, etc), set a 2k far clip plane + + if ( tr.refdef.rd.rdflags & RDF_NOWORLDMODEL ) + { + pViewParams->zFar = zFar = 2048.0f; + } + else + { + float o[3]; + + o[0] = pViewParams->or.origin[0]; + o[1] = pViewParams->or.origin[1]; + o[2] = pViewParams->or.origin[2]; + + float farthestCornerDistance = 0; + uint32_t i; + + // set far clipping planes dynamically + for ( i = 0; i < 8; i++ ) + { + float v[3]; + + v[0] = ((i & 1) ? pViewParams->visBounds[0][0] : pViewParams->visBounds[1][0]) - o[0]; + v[1] = ((i & 2) ? pViewParams->visBounds[0][1] : pViewParams->visBounds[1][1]) - o[1]; + v[2] = ((i & 4) ? pViewParams->visBounds[0][2] : pViewParams->visBounds[1][2]) - o[0]; + + float distance = v[0]*v[0] + v[1]*v[1] + v[2]*v[2]; + + if( distance > farthestCornerDistance ) + { + farthestCornerDistance = distance; + } + } + + pViewParams->zFar = zFar = sqrtf(farthestCornerDistance); + } + + // set up projection matrix + // update q3's proj matrix (opengl) to vulkan conventions: z - [0, 1] instead of [-1, 1] and invert y direction + + // Vulkan clip space has inverted Y and half Z. + float zNear = r_znear->value; + float p10 = -zFar / (zFar - zNear); + + float py = tan(pViewParams->fovY * (M_PI / 360.0f)); + float px = tan(pViewParams->fovX * (M_PI / 360.0f)); + + pViewParams->projectionMatrix[0] = 1.0f / px; + pViewParams->projectionMatrix[1] = 0; + pViewParams->projectionMatrix[2] = 0; + pViewParams->projectionMatrix[3] = 0; + + pViewParams->projectionMatrix[4] = 0; + pViewParams->projectionMatrix[5] = -1.0f / py; + pViewParams->projectionMatrix[6] = 0; + pViewParams->projectionMatrix[7] = 0; + + pViewParams->projectionMatrix[8] = 0; // normally 0 + pViewParams->projectionMatrix[9] = 0; + pViewParams->projectionMatrix[10] = p10; + pViewParams->projectionMatrix[11] = -1.0f; + + pViewParams->projectionMatrix[12] = 0; + pViewParams->projectionMatrix[13] = 0; + pViewParams->projectionMatrix[14] = zNear * p10; + pViewParams->projectionMatrix[15] = 0; +} + + + +/* +================ +R_RenderView + +A view may be either the actual camera view, +or a mirror / remote location +================ +*/ +void R_RenderView (viewParms_t *parms) +{ + int firstDrawSurf; + + tr.viewCount++; + + tr.viewParms = *parms; + + firstDrawSurf = tr.refdef.numDrawSurfs; + + tr.viewCount++; + + // set viewParms.world + R_RotateForViewer (&tr.viewParms, &tr.or); + // Setup that culling frustum planes for the current view + R_SetupFrustum (&tr.viewParms); + + R_AddWorldSurfaces (); + + R_AddPolygonSurfaces(); + + // set the projection matrix with the minimum zfar + // now that we have the world bounded + // this needs to be done before entities are + // added, because they use the projection matrix for LOD calculation + R_SetupProjection (&tr.viewParms); + + R_AddEntitySurfaces (&tr.viewParms); + + R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, tr.refdef.numDrawSurfs - firstDrawSurf ); + + if ( r_debugSurface->integer ) + { + // draw main system development information (surface outlines, etc) + R_DebugGraphics(); + } +} diff --git a/code/renderer_vulkan/tr_marks.c b/code/renderer_vulkan/tr_marks.c new file mode 100644 index 0000000000..353254ad32 --- /dev/null +++ b/code/renderer_vulkan/tr_marks.c @@ -0,0 +1,441 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_marks.c -- polygon projection on the world polygons + +#include "tr_local.h" +#include "tr_globals.h" +#include "matrix_multiplication.h" + + +#define MAX_VERTS_ON_POLY 64 + +#define MARKER_OFFSET 0 // 1 + +/* +============= +R_ChopPolyBehindPlane + +Out must have space for two more vertexes than in +============= +*/ +#define SIDE_FRONT 0 +#define SIDE_BACK 1 +#define SIDE_ON 2 +static void R_ChopPolyBehindPlane( int numInPoints, vec3_t inPoints[MAX_VERTS_ON_POLY], + int *numOutPoints, vec3_t outPoints[MAX_VERTS_ON_POLY], + vec3_t normal, vec_t dist, vec_t epsilon) { + float dists[MAX_VERTS_ON_POLY+4]; + int sides[MAX_VERTS_ON_POLY+4]; + int counts[3]; + float dot; + int i, j; + float *p1, *p2, *clip; + float d; + + // don't clip if it might overflow + if ( numInPoints >= MAX_VERTS_ON_POLY - 2 ) { + *numOutPoints = 0; + return; + } + + counts[0] = counts[1] = counts[2] = 0; + + // determine sides for each point + for ( i = 0 ; i < numInPoints ; i++ ) { + dot = DotProduct( inPoints[i], normal ); + dot -= dist; + dists[i] = dot; + if ( dot > epsilon ) { + sides[i] = SIDE_FRONT; + } else if ( dot < -epsilon ) { + sides[i] = SIDE_BACK; + } else { + sides[i] = SIDE_ON; + } + counts[sides[i]]++; + } + sides[i] = sides[0]; + dists[i] = dists[0]; + + *numOutPoints = 0; + + if ( !counts[0] ) { + return; + } + if ( !counts[1] ) { + *numOutPoints = numInPoints; + memcpy( outPoints, inPoints, numInPoints * sizeof(vec3_t) ); + return; + } + + for ( i = 0 ; i < numInPoints ; i++ ) { + p1 = inPoints[i]; + clip = outPoints[ *numOutPoints ]; + + if ( sides[i] == SIDE_ON ) { + VectorCopy( p1, clip ); + (*numOutPoints)++; + continue; + } + + if ( sides[i] == SIDE_FRONT ) { + VectorCopy( p1, clip ); + (*numOutPoints)++; + clip = outPoints[ *numOutPoints ]; + } + + if ( sides[i+1] == SIDE_ON || sides[i+1] == sides[i] ) { + continue; + } + + // generate a split point + p2 = inPoints[ (i+1) % numInPoints ]; + + d = dists[i] - dists[i+1]; + if ( d == 0 ) { + dot = 0; + } else { + dot = dists[i] / d; + } + + // clip xyz + + for (j=0 ; j<3 ; j++) { + clip[j] = p1[j] + dot * ( p2[j] - p1[j] ); + } + + (*numOutPoints)++; + } +} + +/* +================= +R_BoxSurfaces_r + +================= +*/ +void R_BoxSurfaces_r(mnode_t *node, vec3_t mins, vec3_t maxs, surfaceType_t **list, int listsize, int *listlength, vec3_t dir) { + + int s, c; + msurface_t *surf, **mark; + + // do the tail recursion in a loop + while ( node->contents == -1 ) { + s = BoxOnPlaneSide( mins, maxs, node->plane ); + if (s == 1) { + node = node->children[0]; + } else if (s == 2) { + node = node->children[1]; + } else { + R_BoxSurfaces_r(node->children[0], mins, maxs, list, listsize, listlength, dir); + node = node->children[1]; + } + } + + // add the individual surfaces + mark = node->firstmarksurface; + c = node->nummarksurfaces; + while (c--) { + // + if (*listlength >= listsize) break; + // + surf = *mark; + // check if the surface has NOIMPACT or NOMARKS set + if ( ( surf->shader->surfaceFlags & ( SURF_NOIMPACT | SURF_NOMARKS ) ) + || ( surf->shader->contentFlags & CONTENTS_FOG ) ) { + surf->viewCount = tr.viewCount; + } + // extra check for surfaces to avoid list overflows + else if (*(surf->data) == SF_FACE) { + // the face plane should go through the box + s = BoxOnPlaneSide( mins, maxs, &(( srfSurfaceFace_t * ) surf->data)->plane ); + if (s == 1 || s == 2) { + surf->viewCount = tr.viewCount; + } else if (DotProduct((( srfSurfaceFace_t * ) surf->data)->plane.normal, dir) > -0.5) { + // don't add faces that make sharp angles with the projection direction + surf->viewCount = tr.viewCount; + } + } + else if (*(surfaceType_t *) (surf->data) != SF_GRID) surf->viewCount = tr.viewCount; + // check the viewCount because the surface may have + // already been added if it spans multiple leafs + if (surf->viewCount != tr.viewCount) { + surf->viewCount = tr.viewCount; + list[*listlength] = (surfaceType_t *) surf->data; + (*listlength)++; + } + mark++; + } +} + +/* +================= +R_AddMarkFragments + +================= +*/ +void R_AddMarkFragments(int numClipPoints, vec3_t clipPoints[2][MAX_VERTS_ON_POLY], + int numPlanes, vec3_t *normals, float *dists, + int maxPoints, vec3_t pointBuffer, + int maxFragments, markFragment_t *fragmentBuffer, + int *returnedPoints, int *returnedFragments, + vec3_t mins, vec3_t maxs) { + int pingPong, i; + markFragment_t *mf; + + // chop the surface by all the bounding planes of the to be projected polygon + pingPong = 0; + + for ( i = 0 ; i < numPlanes ; i++ ) { + + R_ChopPolyBehindPlane( numClipPoints, clipPoints[pingPong], + &numClipPoints, clipPoints[!pingPong], + normals[i], dists[i], 0.5 ); + pingPong ^= 1; + if ( numClipPoints == 0 ) { + break; + } + } + // completely clipped away? + if ( numClipPoints == 0 ) { + return; + } + + // add this fragment to the returned list + if ( numClipPoints + (*returnedPoints) > maxPoints ) { + return; // not enough space for this polygon + } + /* + // all the clip points should be within the bounding box + for ( i = 0 ; i < numClipPoints ; i++ ) { + int j; + for ( j = 0 ; j < 3 ; j++ ) { + if (clipPoints[pingPong][i][j] < mins[j] - 0.5) break; + if (clipPoints[pingPong][i][j] > maxs[j] + 0.5) break; + } + if (j < 3) break; + } + if (i < numClipPoints) return; + */ + + mf = fragmentBuffer + (*returnedFragments); + mf->firstPoint = (*returnedPoints); + mf->numPoints = numClipPoints; + memcpy( pointBuffer + (*returnedPoints) * 3, clipPoints[pingPong], numClipPoints * sizeof(vec3_t) ); + + (*returnedPoints) += numClipPoints; + (*returnedFragments)++; +} + + +int RE_MarkFragments( int numPoints, const vec3_t *points, const vec3_t projection, + int maxPoints, vec3_t pointBuffer, int maxFragments, markFragment_t *fragmentBuffer ) +{ + int numsurfaces, numPlanes; + int i, j, k, m, n; + surfaceType_t *surfaces[64]; + vec3_t mins, maxs; + int returnedFragments; + int returnedPoints; + vec3_t normals[MAX_VERTS_ON_POLY+2]; + float dists[MAX_VERTS_ON_POLY+2]; + vec3_t clipPoints[2][MAX_VERTS_ON_POLY]; + int numClipPoints; + float *v; + srfSurfaceFace_t *surf; + srfGridMesh_t *cv; + drawVert_t *dv; + vec3_t normal; + vec3_t projectionDir; + vec3_t v1, v2; + int *indexes; + + //increment view count for double check prevention + tr.viewCount++; + + // + VectorNormalize2( projection, projectionDir ); + // find all the brushes that are to be considered + ClearBounds( mins, maxs ); + for ( i = 0 ; i < numPoints ; i++ ) { + vec3_t temp; + + AddPointToBounds( points[i], mins, maxs ); + VectorAdd( points[i], projection, temp ); + AddPointToBounds( temp, mins, maxs ); + // make sure we get all the leafs (also the one(s) in front of the hit surface) + VectorMA( points[i], -20, projectionDir, temp ); + AddPointToBounds( temp, mins, maxs ); + } + + if (numPoints > MAX_VERTS_ON_POLY) numPoints = MAX_VERTS_ON_POLY; + // create the bounding planes for the to be projected polygon + for ( i = 0 ; i < numPoints ; i++ ) { + VectorSubtract(points[(i+1)%numPoints], points[i], v1); + VectorAdd(points[i], projection, v2); + VectorSubtract(points[i], v2, v2); + CrossProduct(v1, v2, normals[i]); + VectorNorm(normals[i]); + dists[i] = DotProduct(normals[i], points[i]); + } + // add near and far clipping planes for projection + VectorCopy(projectionDir, normals[numPoints]); + dists[numPoints] = DotProduct(normals[numPoints], points[0]) - 32; + VectorCopy(projectionDir, normals[numPoints+1]); + VectorInverse(normals[numPoints+1]); + dists[numPoints+1] = DotProduct(normals[numPoints+1], points[0]) - 20; + numPlanes = numPoints + 2; + + numsurfaces = 0; + R_BoxSurfaces_r(tr.world->nodes, mins, maxs, surfaces, 64, &numsurfaces, projectionDir); + //assert(numsurfaces <= 64); + //assert(numsurfaces != 64); + + returnedPoints = 0; + returnedFragments = 0; + + for ( i = 0 ; i < numsurfaces ; i++ ) { + + if (*surfaces[i] == SF_GRID) { + + cv = (srfGridMesh_t *) surfaces[i]; + for ( m = 0 ; m < cv->height - 1 ; m++ ) { + for ( n = 0 ; n < cv->width - 1 ; n++ ) { + // We triangulate the grid and chop all triangles within + // the bounding planes of the to be projected polygon. + // LOD is not taken into account, not such a big deal though. + // + // It's probably much nicer to chop the grid itself and deal + // with this grid as a normal SF_GRID surface so LOD will + // be applied. However the LOD of that chopped grid must + // be synced with the LOD of the original curve. + // One way to do this; the chopped grid shares vertices with + // the original curve. When LOD is applied to the original + // curve the unused vertices are flagged. Now the chopped curve + // should skip the flagged vertices. This still leaves the + // problems with the vertices at the chopped grid edges. + // + // To avoid issues when LOD applied to "hollow curves" (like + // the ones around many jump pads) we now just add a 2 unit + // offset to the triangle vertices. + // The offset is added in the vertex normal vector direction + // so all triangles will still fit together. + // The 2 unit offset should avoid pretty much all LOD problems. + + numClipPoints = 3; + + dv = cv->verts + m * cv->width + n; + + VectorCopy(dv[0].xyz, clipPoints[0][0]); + VectorMA(clipPoints[0][0], MARKER_OFFSET, dv[0].normal, clipPoints[0][0]); + VectorCopy(dv[cv->width].xyz, clipPoints[0][1]); + VectorMA(clipPoints[0][1], MARKER_OFFSET, dv[cv->width].normal, clipPoints[0][1]); + VectorCopy(dv[1].xyz, clipPoints[0][2]); + VectorMA(clipPoints[0][2], MARKER_OFFSET, dv[1].normal, clipPoints[0][2]); + // check the normal of this triangle + VectorSubtract(clipPoints[0][0], clipPoints[0][1], v1); + VectorSubtract(clipPoints[0][2], clipPoints[0][1], v2); + CrossProduct(v1, v2, normal); + VectorNorm(normal); + if (DotProduct(normal, projectionDir) < -0.1) { + // add the fragments of this triangle + R_AddMarkFragments(numClipPoints, clipPoints, + numPlanes, normals, dists, + maxPoints, pointBuffer, + maxFragments, fragmentBuffer, + &returnedPoints, &returnedFragments, mins, maxs); + + if ( returnedFragments == maxFragments ) { + return returnedFragments; // not enough space for more fragments + } + } + + VectorCopy(dv[1].xyz, clipPoints[0][0]); + VectorMA(clipPoints[0][0], MARKER_OFFSET, dv[1].normal, clipPoints[0][0]); + VectorCopy(dv[cv->width].xyz, clipPoints[0][1]); + VectorMA(clipPoints[0][1], MARKER_OFFSET, dv[cv->width].normal, clipPoints[0][1]); + VectorCopy(dv[cv->width+1].xyz, clipPoints[0][2]); + VectorMA(clipPoints[0][2], MARKER_OFFSET, dv[cv->width+1].normal, clipPoints[0][2]); + // check the normal of this triangle + VectorSubtract(clipPoints[0][0], clipPoints[0][1], v1); + VectorSubtract(clipPoints[0][2], clipPoints[0][1], v2); + CrossProduct(v1, v2, normal); + VectorNorm(normal); + if (DotProduct(normal, projectionDir) < -0.05) { + // add the fragments of this triangle + R_AddMarkFragments(numClipPoints, clipPoints, + numPlanes, normals, dists, + maxPoints, pointBuffer, + maxFragments, fragmentBuffer, + &returnedPoints, &returnedFragments, mins, maxs); + + if ( returnedFragments == maxFragments ) { + return returnedFragments; // not enough space for more fragments + } + } + } + } + } + else if (*surfaces[i] == SF_FACE) { + + surf = ( srfSurfaceFace_t * ) surfaces[i]; + // check the normal of this face + if (DotProduct(surf->plane.normal, projectionDir) > -0.5) { + continue; + } + + /* + VectorSubtract(clipPoints[0][0], clipPoints[0][1], v1); + VectorSubtract(clipPoints[0][2], clipPoints[0][1], v2); + CrossProduct(v1, v2, normal); + VectorNormalize(normal); + if (DotProduct(normal, projectionDir) > -0.5) continue; + */ + indexes = (int *)( (byte *)surf + surf->ofsIndices ); + for ( k = 0 ; k < surf->numIndices ; k += 3 ) { + for ( j = 0 ; j < 3 ; j++ ) { + v = surf->points[0] + VERTEXSIZE * indexes[k+j];; + VectorMA( v, MARKER_OFFSET, surf->plane.normal, clipPoints[0][j] ); + } + // add the fragments of this face + R_AddMarkFragments( 3 , clipPoints, + numPlanes, normals, dists, + maxPoints, pointBuffer, + maxFragments, fragmentBuffer, + &returnedPoints, &returnedFragments, mins, maxs); + if ( returnedFragments == maxFragments ) { + return returnedFragments; // not enough space for more fragments + } + } + continue; + } + else { + // ignore all other world surfaces + // might be cool to also project polygons on a triangle soup + // however this will probably create huge amounts of extra polys + // even more than the projection onto curves + continue; + } + } + return returnedFragments; +} + diff --git a/code/renderer_vulkan/tr_mesh.c b/code/renderer_vulkan/tr_mesh.c new file mode 100644 index 0000000000..11b8867325 --- /dev/null +++ b/code/renderer_vulkan/tr_mesh.c @@ -0,0 +1,347 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_mesh.c: triangle model functions + +#include "tr_local.h" +#include "tr_globals.h" +#include "tr_cvar.h" +#include "vk_shade_geometry.h" + +#include "ref_import.h" +#include "tr_light.h" + + +static int R_CullModel( md3Header_t *header, trRefEntity_t *ent ) +{ + vec3_t bounds[2]; + int i; + + // compute frame pointers + md3Frame_t* newFrame = ( md3Frame_t * ) ( ( byte * ) header + header->ofsFrames ) + ent->e.frame; + md3Frame_t* oldFrame = ( md3Frame_t * ) ( ( byte * ) header + header->ofsFrames ) + ent->e.oldframe; + + // cull bounding sphere ONLY if this is not an upscaled entity + if ( !ent->e.nonNormalizedAxes ) + { + if ( ent->e.frame == ent->e.oldframe ) + { + switch ( R_CullLocalPointAndRadius( newFrame->localOrigin, newFrame->radius ) ) + { + case CULL_OUT: + tr.pc.c_sphere_cull_md3_out++; + return CULL_OUT; + + case CULL_IN: + tr.pc.c_sphere_cull_md3_in++; + return CULL_IN; + + case CULL_CLIP: + tr.pc.c_sphere_cull_md3_clip++; + break; + } + } + else + { + int sphereCull, sphereCullB; + + sphereCull = R_CullLocalPointAndRadius( newFrame->localOrigin, newFrame->radius ); + if ( newFrame == oldFrame ) { + sphereCullB = sphereCull; + } else { + sphereCullB = R_CullLocalPointAndRadius( oldFrame->localOrigin, oldFrame->radius ); + } + + if ( sphereCull == sphereCullB ) + { + if ( sphereCull == CULL_OUT ) + { + tr.pc.c_sphere_cull_md3_out++; + return CULL_OUT; + } + else if ( sphereCull == CULL_IN ) + { + tr.pc.c_sphere_cull_md3_in++; + return CULL_IN; + } + else + { + tr.pc.c_sphere_cull_md3_clip++; + } + } + } + } + + // calculate a bounding box in the current coordinate system + for (i = 0 ; i < 3 ; i++) { + bounds[0][i] = oldFrame->bounds[0][i] < newFrame->bounds[0][i] ? oldFrame->bounds[0][i] : newFrame->bounds[0][i]; + bounds[1][i] = oldFrame->bounds[1][i] > newFrame->bounds[1][i] ? oldFrame->bounds[1][i] : newFrame->bounds[1][i]; + } + + switch ( R_CullLocalBox( bounds ) ) + { + case CULL_IN: + tr.pc.c_box_cull_md3_in++; + return CULL_IN; + case CULL_CLIP: + tr.pc.c_box_cull_md3_clip++; + return CULL_CLIP; + case CULL_OUT: + default: + tr.pc.c_box_cull_md3_out++; + return CULL_OUT; + } +} + + + +int R_ComputeLOD( trRefEntity_t *ent ) +{ + + int lod = 0; + + float radius; + // radius are guarentee large than 0; + + // multiple LODs exist, so compute projected bounding sphere + // and use that as a criteria for selecting LOD + if(tr.currentModel->type == MOD_MDR) + { + mdrHeader_t * mdr = (mdrHeader_t *) tr.currentModel->modelData; + int frameSize = (size_t) (&((mdrFrame_t *)0)->bones[mdr->numBones]); + + mdrFrame_t * mdrframe = (mdrFrame_t *) ((byte *) mdr + mdr->ofsFrames + frameSize * ent->e.frame); + + radius = RadiusFromBounds(mdrframe->bounds[0], mdrframe->bounds[1]); + } + else + { + md3Frame_t * frame = ( md3Frame_t * ) ( ( ( unsigned char * ) tr.currentModel->md3[0] ) + tr.currentModel->md3[0]->ofsFrames ); + + frame += ent->e.frame; + + radius = RadiusFromBounds( frame->bounds[0], frame->bounds[1] ); + } + + float tmpVec[3]; + VectorSubtract(ent->e.origin, tr.viewParms.or.origin, tmpVec); + float dist = DotProduct( tr.viewParms.or.axis[0], tmpVec); + if ( dist > 0 ) + { + + // vec3_t p; + // p[0] = 0; + // p[1] = r ; + // p[2] = -dist; + // p[3] = 1; + + // pMatProj = tr.viewParms.projectionMatrix + // float projected[4]; + // projected[0] = p[0] * pMatProj[0] + p[1] * pMatProj[4] + p[2] * pMatProj[8] + pMatProj[12]; + // projected[1] = p[0] * pMatProj[1] - p[1] * pMatProj[5] + p[2] * pMatProj[9] + pMatProj[13]; + // projected[2] = p[0] * pMatProj[2] + p[1] * pMatProj[6] + p[2] * pMatProj[10] + pMatProj[14]; + // projected[3] = p[0] * pMatProj[3] + p[1] * pMatProj[7] + p[2] * pMatProj[11] + pMatProj[15]; + // perspective devide + // pr = projected[1] / projected[3]; + + float p1 = - radius * tr.viewParms.projectionMatrix[5] - dist * tr.viewParms.projectionMatrix[9] + tr.viewParms.projectionMatrix[13]; + float p3 = radius * tr.viewParms.projectionMatrix[7] - dist * tr.viewParms.projectionMatrix[11] + tr.viewParms.projectionMatrix[15]; + + float projectedRadius = p1 / p3; + + //ri.Printf( PRINT_ALL, "%f: \n", projectedRadius); + + lod = (1.0f - projectedRadius * 6 ) * tr.currentModel->numLods; + + + if ( lod < 0 ) + { + lod = 0; + } + else if ( lod >= tr.currentModel->numLods ) + { + lod = tr.currentModel->numLods - 1; + } + + } + + + return lod; +} + +/* +================= +R_ComputeFogNum + +================= +*/ +int R_ComputeFogNum( md3Header_t *header, trRefEntity_t *ent ) { + int i, j; + fog_t *fog; + md3Frame_t *md3Frame; + vec3_t localOrigin; + + if ( tr.refdef.rd.rdflags & RDF_NOWORLDMODEL ) { + return 0; + } + + // FIXME: non-normalized axis issues + md3Frame = ( md3Frame_t * ) ( ( byte * ) header + header->ofsFrames ) + ent->e.frame; + VectorAdd( ent->e.origin, md3Frame->localOrigin, localOrigin ); + for ( i = 1 ; i < tr.world->numfogs ; i++ ) { + fog = &tr.world->fogs[i]; + for ( j = 0 ; j < 3 ; j++ ) { + if ( localOrigin[j] - md3Frame->radius >= fog->bounds[1][j] ) { + break; + } + if ( localOrigin[j] + md3Frame->radius <= fog->bounds[0][j] ) { + break; + } + } + if ( j == 3 ) { + return i; + } + } + + return 0; +} + + + +void R_AddMD3Surfaces( trRefEntity_t *ent ) +{ + int i; + md3Header_t *header = NULL; + md3Surface_t *surface = NULL; + md3Shader_t *md3Shader = NULL; + shader_t *shader = NULL; + int cull; + int lod = 0; + int fogNum; + + // don't add third_person objects if not in a portal + qboolean personalModel = (ent->e.renderfx & RF_THIRD_PERSON) && !tr.viewParms.isPortal; + + if ( ent->e.renderfx & RF_WRAP_FRAMES ) { + ent->e.frame %= tr.currentModel->md3[0]->numFrames; + ent->e.oldframe %= tr.currentModel->md3[0]->numFrames; + } + + // + // Validate the frames so there is no chance of a crash. + // This will write directly into the entity structure, so + // when the surfaces are rendered, they don't need to be + // range checked again. + + + if ( (ent->e.frame >= tr.currentModel->md3[0]->numFrames) + || (ent->e.frame < 0) + || (ent->e.oldframe >= tr.currentModel->md3[0]->numFrames) + || (ent->e.oldframe < 0) ) + { + ri.Printf( PRINT_ALL, "R_AddMD3Surfaces: no such frame %d to %d for '%s'\n", + ent->e.oldframe, ent->e.frame, + tr.currentModel->name ); + ent->e.frame = 0; + ent->e.oldframe = 0; + } + + // + // compute LOD + // model has only 1 LOD level, skip computations and bias + if ( tr.currentModel->numLods > 1 ) + lod = R_ComputeLOD( ent ); + + header = tr.currentModel->md3[lod]; + + // + // cull the entire model if merged bounding box of both frames + // is outside the view frustum. + // + cull = R_CullModel ( header, ent ); + if ( cull == CULL_OUT ) { + return; + } + + + // + // set up lighting now that we know we aren't culled + // + if ( !personalModel) { + R_SetupEntityLighting( &tr.refdef, ent ); + } + + // + // draw all surfaces + // + surface = (md3Surface_t *)( (byte *)header + header->ofsSurfaces ); + for ( i = 0 ; i < header->numSurfaces ; i++ ) + { + if ( ent->e.customShader ) + { + shader = R_GetShaderByHandle( ent->e.customShader ); + } + else if ( ent->e.customSkin > 0 && ent->e.customSkin < tr.numSkins ) + { + skin_t *skin = R_GetSkinByHandle( ent->e.customSkin ); + + // match the surface name to something in the skin file + shader = tr.defaultShader; + + int j; + + for ( j = 0 ; j < skin->numSurfaces ; j++ ) + { + // the names have both been lowercased + if ( !strcmp( skin->pSurfaces[j].name, surface->name ) ) { + shader = skin->pSurfaces[j].shader; + break; + } + } + if (shader == tr.defaultShader) { + ri.Printf( PRINT_DEVELOPER, "no shader for surface %s in skin %s\n", surface->name, skin->name); + } + else if (shader->defaultShader) { + ri.Printf( PRINT_DEVELOPER, "WARNING: shader %s in skin %s not found\n", shader->name, skin->name); + } + } else if ( surface->numShaders <= 0 ) { + shader = tr.defaultShader; + } else { + md3Shader = (md3Shader_t *) ( (byte *)surface + surface->ofsShaders ); + md3Shader += ent->e.skinNum % surface->numShaders; + shader = tr.shaders[ md3Shader->shaderIndex ]; + } + + + // we will add shadows even if the main object isn't visible in the view + + // don't add third_person objects if not viewing through a portal + if ( !personalModel ) + { + // see if we are in a fog volume + fogNum = R_ComputeFogNum( header, ent ); + R_AddDrawSurf( (void *)surface, shader, fogNum, qfalse ); + } + + surface = (md3Surface_t *)( (byte *)surface + surface->ofsEnd ); + } +} + diff --git a/code/renderer_vulkan/tr_model.c b/code/renderer_vulkan/tr_model.c new file mode 100644 index 0000000000..9438b4e448 --- /dev/null +++ b/code/renderer_vulkan/tr_model.c @@ -0,0 +1,81 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_models.c -- model loading and caching + +#include "tr_local.h" +#include "tr_globals.h" + +#include "tr_model.h" +#include "ref_import.h" + + +model_t* R_GetModelByHandle( qhandle_t index ) +{ + if ( (index < 0) || (index >= tr.numModels) ) + { + ri.Printf(PRINT_WARNING, "index = %d, out of range gets the defualt model.\n", index); + return tr.models[0]; + } + + return tr.models[index]; +} + + +/////////////////////////////////////////////////////////////////////////////// + + +void R_ModelInit( void ) +{ + ri.Printf( PRINT_ALL, "R_ModelInit: \n"); + + // leave a space for NULL model + model_t* mod = ri.Hunk_Alloc( sizeof( model_t ), h_low ); + mod->index = tr.numModels = 0; + mod->type = MOD_BAD; + + tr.models[tr.numModels] = mod; + tr.numModels++; +} + + +void R_Modellist_f( void ) +{ + int i; + int total = 0; + + for ( i = 1 ; i < tr.numModels; i++ ) + { + model_t* mod = tr.models[i]; + int lods = 1; + int j; + for ( j = 1 ; j < MD3_MAX_LODS ; j++ ) + { + if ( mod->md3[j] && mod->md3[j] != mod->md3[j-1] ) + { + lods++; + } + } + ri.Printf( PRINT_ALL, "%8i : (%i) %s\n",mod->dataSize, lods, mod->name ); + total += mod->dataSize; + } + ri.Printf( PRINT_ALL, "%8i : Total models\n", total ); +} diff --git a/code/renderer_vulkan/tr_model.h b/code/renderer_vulkan/tr_model.h new file mode 100644 index 0000000000..b2c0416933 --- /dev/null +++ b/code/renderer_vulkan/tr_model.h @@ -0,0 +1,102 @@ +#ifndef TR_MODEL_H_ +#define TR_MODEL_H_ + +#include "../renderercommon/iqm.h" + + + + +typedef enum { + MOD_BAD, + MOD_BRUSH, + MOD_MESH, + MOD_MDR, + MOD_IQM +} modtype_t; + +typedef struct model_s { + char name[MAX_QPATH]; + modtype_t type; + int index; // model = tr.models[model->index] + + int dataSize; // just for listing purposes + bmodel_t *bmodel; // only if type == MOD_BRUSH + md3Header_t *md3[MD3_MAX_LODS]; // only if type == MOD_MESH + void *modelData; // only if type == (MOD_MDR | MOD_IQM) + + int numLods; +} model_t; + + +#define MAX_MOD_KNOWN 1024 + +void R_ModelInit( void ); +model_t* R_GetModelByHandle( qhandle_t hModel ); +void R_Modellist_f( void ); + +//==================================================== + +qhandle_t R_RegisterMD3(const char *name, model_t *mod); + +qboolean R_LoadMDR( model_t *mod, void *buffer, int filesize, const char *mod_name ); +qhandle_t R_RegisterMDR(const char *name, model_t *mod); + +qboolean R_LoadIQM (model_t *mod, void *buffer, int filesize, const char *name ); +qhandle_t R_RegisterIQM(const char *name, model_t *mod); + + +//==================================================== +// IQM + +// inter-quake-model +typedef struct { + int num_vertexes; + int num_triangles; + int num_frames; + int num_surfaces; + int num_joints; + int num_poses; + struct srfIQModel_s *surfaces; + + float *positions; + float *texcoords; + float *normals; + float *tangents; + byte *blendIndexes; + union { + float *f; + byte *b; + } blendWeights; + byte *colors; + int *triangles; + + // depending upon the exporter, blend indices and weights might be int/float + // as opposed to the recommended byte/byte, for example Noesis exports + // int/float whereas the official IQM tool exports byte/byte + byte blendWeightsType; // IQM_UBYTE or IQM_FLOAT + + int *jointParents; + float *jointMats; + float *poseMats; + float *bounds; + char *names; +} iqmData_t; + +// inter-quake-model surface +typedef struct srfIQModel_s { + surfaceType_t surfaceType; + char name[MAX_QPATH]; + shader_t *shader; + iqmData_t *data; + int first_vertex, num_vertexes; + int first_triangle, num_triangles; +} srfIQModel_t; + + +void R_AddIQMSurfaces( trRefEntity_t *ent ); +void RB_IQMSurfaceAnim( surfaceType_t *surface ); +void ComputePoseMats( iqmData_t *data, int frame, int oldframe, float backlerp, float *mat ); + + + +#endif diff --git a/code/renderer_vulkan/tr_model_iqm.c b/code/renderer_vulkan/tr_model_iqm.c new file mode 100644 index 0000000000..e0336b8978 --- /dev/null +++ b/code/renderer_vulkan/tr_model_iqm.c @@ -0,0 +1,1168 @@ +/* +=========================================================================== +Copyright (C) 2011 Thilo Schulz +Copyright (C) 2011 Matthias Bentrup + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + +#include "tr_local.h" +#include "tr_globals.h" +#include "tr_backend.h" +#include "tr_model.h" +#include "tr_cvar.h" +#include "ref_import.h" +#include "matrix_multiplication.h" +#include "tr_shader.h" +#include "tr_light.h" + +#define LL(x) x=LittleLong(x) + +// 3x4 identity matrix +const static float identityMatrix[12] = { + 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0 +}; + +static qboolean IQM_CheckRange( iqmHeader_t *header, int offset, + int count,int size ) { + // return true if the range specified by offset, count and size + // doesn't fit into the file + return ( count <= 0 || + offset < 0 || + offset > header->filesize || + offset + count * size < 0 || + offset + count * size > header->filesize ); +} +// "multiply" 3x4 matrices, these are assumed to be the top 3 rows +// of a 4x4 matrix with the last row = (0 0 0 1) +static void Matrix34Multiply( float *a, float const *b, float *out ) { + out[ 0] = a[0] * b[0] + a[1] * b[4] + a[ 2] * b[ 8]; + out[ 1] = a[0] * b[1] + a[1] * b[5] + a[ 2] * b[ 9]; + out[ 2] = a[0] * b[2] + a[1] * b[6] + a[ 2] * b[10]; + out[ 3] = a[0] * b[3] + a[1] * b[7] + a[ 2] * b[11] + a[ 3]; + out[ 4] = a[4] * b[0] + a[5] * b[4] + a[ 6] * b[ 8]; + out[ 5] = a[4] * b[1] + a[5] * b[5] + a[ 6] * b[ 9]; + out[ 6] = a[4] * b[2] + a[5] * b[6] + a[ 6] * b[10]; + out[ 7] = a[4] * b[3] + a[5] * b[7] + a[ 6] * b[11] + a[ 7]; + out[ 8] = a[8] * b[0] + a[9] * b[4] + a[10] * b[ 8]; + out[ 9] = a[8] * b[1] + a[9] * b[5] + a[10] * b[ 9]; + out[10] = a[8] * b[2] + a[9] * b[6] + a[10] * b[10]; + out[11] = a[8] * b[3] + a[9] * b[7] + a[10] * b[11] + a[11]; +} + +static void InterpolateMatrix( float *a, float *b, float lerp, float *mat ) { + float unLerp = 1.0f - lerp; + + mat[ 0] = a[ 0] * unLerp + b[ 0] * lerp; + mat[ 1] = a[ 1] * unLerp + b[ 1] * lerp; + mat[ 2] = a[ 2] * unLerp + b[ 2] * lerp; + mat[ 3] = a[ 3] * unLerp + b[ 3] * lerp; + mat[ 4] = a[ 4] * unLerp + b[ 4] * lerp; + mat[ 5] = a[ 5] * unLerp + b[ 5] * lerp; + mat[ 6] = a[ 6] * unLerp + b[ 6] * lerp; + mat[ 7] = a[ 7] * unLerp + b[ 7] * lerp; + mat[ 8] = a[ 8] * unLerp + b[ 8] * lerp; + mat[ 9] = a[ 9] * unLerp + b[ 9] * lerp; + mat[10] = a[10] * unLerp + b[10] * lerp; + mat[11] = a[11] * unLerp + b[11] * lerp; +} +static void JointToMatrix( vec4_t rot, vec3_t scale, vec3_t trans, + float *mat ) { + float xx = 2.0f * rot[0] * rot[0]; + float yy = 2.0f * rot[1] * rot[1]; + float zz = 2.0f * rot[2] * rot[2]; + float xy = 2.0f * rot[0] * rot[1]; + float xz = 2.0f * rot[0] * rot[2]; + float yz = 2.0f * rot[1] * rot[2]; + float wx = 2.0f * rot[3] * rot[0]; + float wy = 2.0f * rot[3] * rot[1]; + float wz = 2.0f * rot[3] * rot[2]; + + mat[ 0] = scale[0] * (1.0f - (yy + zz)); + mat[ 1] = scale[0] * (xy - wz); + mat[ 2] = scale[0] * (xz + wy); + mat[ 3] = trans[0]; + mat[ 4] = scale[1] * (xy + wz); + mat[ 5] = scale[1] * (1.0f - (xx + zz)); + mat[ 6] = scale[1] * (yz - wx); + mat[ 7] = trans[1]; + mat[ 8] = scale[2] * (xz - wy); + mat[ 9] = scale[2] * (yz + wx); + mat[10] = scale[2] * (1.0f - (xx + yy)); + mat[11] = trans[2]; +} +static void Matrix34Invert( float *inMat, float *outMat ) +{ + vec3_t trans; + float invSqrLen, *v; + + outMat[ 0] = inMat[ 0]; outMat[ 1] = inMat[ 4]; outMat[ 2] = inMat[ 8]; + outMat[ 4] = inMat[ 1]; outMat[ 5] = inMat[ 5]; outMat[ 6] = inMat[ 9]; + outMat[ 8] = inMat[ 2]; outMat[ 9] = inMat[ 6]; outMat[10] = inMat[10]; + + v = outMat + 0; invSqrLen = 1.0f / DotProduct(v, v); VectorScale(v, invSqrLen, v); + v = outMat + 4; invSqrLen = 1.0f / DotProduct(v, v); VectorScale(v, invSqrLen, v); + v = outMat + 8; invSqrLen = 1.0f / DotProduct(v, v); VectorScale(v, invSqrLen, v); + + trans[0] = inMat[ 3]; + trans[1] = inMat[ 7]; + trans[2] = inMat[11]; + + outMat[ 3] = -DotProduct(outMat + 0, trans); + outMat[ 7] = -DotProduct(outMat + 4, trans); + outMat[11] = -DotProduct(outMat + 8, trans); +} + +/* +================= +R_LoadIQM + +Load an IQM model and compute the joint matrices for every frame. +================= +*/ +qboolean R_LoadIQM( model_t *mod, void *buffer, int filesize, const char *mod_name ) { + iqmHeader_t *header; + iqmVertexArray_t *vertexarray; + iqmTriangle_t *triangle; + iqmMesh_t *mesh; + iqmJoint_t *joint; + iqmPose_t *pose; + iqmBounds_t *bounds; + unsigned short *framedata; + char *str; + int i, j; + float jointInvMats[IQM_MAX_JOINTS * 12] = {0.0f}; + float *mat, *matInv; + size_t size, joint_names; + iqmData_t *iqmData; + srfIQModel_t *surface; + char meshName[MAX_QPATH]; + byte blendIndexesType, blendWeightsType; + + if( filesize < sizeof(iqmHeader_t) ) { + return qfalse; + } + + header = (iqmHeader_t *)buffer; + if( Q_strncmp( header->magic, IQM_MAGIC, sizeof(header->magic) ) ) { + return qfalse; + } + + LL( header->version ); + if( header->version != IQM_VERSION ) { + ri.Printf(PRINT_WARNING, "R_LoadIQM: %s is a unsupported IQM version (%d), only version %d is supported.\n", + mod_name, header->version, IQM_VERSION); + return qfalse; + } + + LL( header->filesize ); + if( header->filesize > filesize || header->filesize > 16<<20 ) { + return qfalse; + } + + LL( header->flags ); + LL( header->num_text ); + LL( header->ofs_text ); + LL( header->num_meshes ); + LL( header->ofs_meshes ); + LL( header->num_vertexarrays ); + LL( header->num_vertexes ); + LL( header->ofs_vertexarrays ); + LL( header->num_triangles ); + LL( header->ofs_triangles ); + LL( header->ofs_adjacency ); + LL( header->num_joints ); + LL( header->ofs_joints ); + LL( header->num_poses ); + LL( header->ofs_poses ); + LL( header->num_anims ); + LL( header->ofs_anims ); + LL( header->num_frames ); + LL( header->num_framechannels ); + LL( header->ofs_frames ); + LL( header->ofs_bounds ); + LL( header->num_comment ); + LL( header->ofs_comment ); + LL( header->num_extensions ); + LL( header->ofs_extensions ); + + // check ioq3 joint limit + if ( header->num_joints > IQM_MAX_JOINTS ) { + ri.Printf(PRINT_WARNING, "R_LoadIQM: %s has more than %d joints (%d).\n", + mod_name, IQM_MAX_JOINTS, header->num_joints); + return qfalse; + } + + blendIndexesType = blendWeightsType = IQM_UBYTE; + + // check and swap vertex arrays + if( IQM_CheckRange( header, header->ofs_vertexarrays, + header->num_vertexarrays, + sizeof(iqmVertexArray_t) ) ) { + return qfalse; + } + vertexarray = (iqmVertexArray_t *)((byte *)header + header->ofs_vertexarrays); + for( i = 0; i < header->num_vertexarrays; i++, vertexarray++ ) { + int n, *intPtr; + + if( vertexarray->size <= 0 || vertexarray->size > 4 ) { + return qfalse; + } + + // total number of values + n = header->num_vertexes * vertexarray->size; + + switch( vertexarray->format ) { + case IQM_BYTE: + case IQM_UBYTE: + // 1 byte, no swapping necessary + if( IQM_CheckRange( header, vertexarray->offset, + n, sizeof(byte) ) ) { + return qfalse; + } + break; + case IQM_INT: + case IQM_UINT: + case IQM_FLOAT: + // 4-byte swap + if( IQM_CheckRange( header, vertexarray->offset, + n, sizeof(float) ) ) { + return qfalse; + } + intPtr = (int *)((byte *)header + vertexarray->offset); + for( j = 0; j < n; j++, intPtr++ ) { + LL( *intPtr ); + } + break; + default: + // not supported + return qfalse; + break; + } + + switch( vertexarray->type ) { + case IQM_POSITION: + case IQM_NORMAL: + if( vertexarray->format != IQM_FLOAT || + vertexarray->size != 3 ) { + return qfalse; + } + break; + case IQM_TANGENT: + if( vertexarray->format != IQM_FLOAT || + vertexarray->size != 4 ) { + return qfalse; + } + break; + case IQM_TEXCOORD: + if( vertexarray->format != IQM_FLOAT || + vertexarray->size != 2 ) { + return qfalse; + } + break; + case IQM_BLENDINDEXES: + if( (vertexarray->format != IQM_INT && + vertexarray->format != IQM_UBYTE) || + vertexarray->size != 4 ) { + return qfalse; + } + blendIndexesType = vertexarray->format; + break; + case IQM_BLENDWEIGHTS: + if( (vertexarray->format != IQM_FLOAT && + vertexarray->format != IQM_UBYTE) || + vertexarray->size != 4 ) { + return qfalse; + } + blendWeightsType = vertexarray->format; + break; + case IQM_COLOR: + if( vertexarray->format != IQM_UBYTE || + vertexarray->size != 4 ) { + return qfalse; + } + break; + } + } + + // check and swap triangles + if( IQM_CheckRange( header, header->ofs_triangles, + header->num_triangles, sizeof(iqmTriangle_t) ) ) { + return qfalse; + } + triangle = (iqmTriangle_t *)((byte *)header + header->ofs_triangles); + for( i = 0; i < header->num_triangles; i++, triangle++ ) { + LL( triangle->vertex[0] ); + LL( triangle->vertex[1] ); + LL( triangle->vertex[2] ); + + if( triangle->vertex[0] > header->num_vertexes || + triangle->vertex[1] > header->num_vertexes || + triangle->vertex[2] > header->num_vertexes ) { + return qfalse; + } + } + + // check and swap meshes + if( IQM_CheckRange( header, header->ofs_meshes, + header->num_meshes, sizeof(iqmMesh_t) ) ) { + return qfalse; + } + mesh = (iqmMesh_t *)((byte *)header + header->ofs_meshes); + for( i = 0; i < header->num_meshes; i++, mesh++) { + LL( mesh->name ); + LL( mesh->material ); + LL( mesh->first_vertex ); + LL( mesh->num_vertexes ); + LL( mesh->first_triangle ); + LL( mesh->num_triangles ); + + if ( mesh->name < header->num_text ) { + Q_strncpyz( meshName, (char*)header + header->ofs_text + mesh->name, sizeof (meshName) ); + } else { + meshName[0] = '\0'; + } + + // check ioq3 limits + if ( mesh->num_vertexes >= SHADER_MAX_VERTEXES ) + { + ri.Printf(PRINT_WARNING, "R_LoadIQM: %s has more than %i verts on %s (%i).\n", + mod_name, SHADER_MAX_VERTEXES - 1, meshName[0] ? meshName : "a surface", + mesh->num_vertexes ); + return qfalse; + } + if ( mesh->num_triangles*3 >= SHADER_MAX_INDEXES ) + { + ri.Printf(PRINT_WARNING, "R_LoadIQM: %s has more than %i triangles on %s (%i).\n", + mod_name, ( SHADER_MAX_INDEXES / 3 ) - 1, meshName[0] ? meshName : "a surface", + mesh->num_triangles ); + return qfalse; + } + + if( mesh->first_vertex >= header->num_vertexes || + mesh->first_vertex + mesh->num_vertexes > header->num_vertexes || + mesh->first_triangle >= header->num_triangles || + mesh->first_triangle + mesh->num_triangles > header->num_triangles || + mesh->name >= header->num_text || + mesh->material >= header->num_text ) { + return qfalse; + } + } + + if( header->num_poses != header->num_joints && header->num_poses != 0 ) { + ri.Printf(PRINT_WARNING, "R_LoadIQM: %s has %d poses and %d joints, must have the same number or 0 poses\n", + mod_name, header->num_poses, header->num_joints ); + return qfalse; + } + + joint_names = 0; + + if ( header->num_joints ) + { + // check and swap joints + if( IQM_CheckRange( header, header->ofs_joints, + header->num_joints, sizeof(iqmJoint_t) ) ) { + return qfalse; + } + joint = (iqmJoint_t *)((byte *)header + header->ofs_joints); + for( i = 0; i < header->num_joints; i++, joint++ ) { + LL( joint->name ); + LL( joint->parent ); + LL( joint->translate[0] ); + LL( joint->translate[1] ); + LL( joint->translate[2] ); + LL( joint->rotate[0] ); + LL( joint->rotate[1] ); + LL( joint->rotate[2] ); + LL( joint->rotate[3] ); + LL( joint->scale[0] ); + LL( joint->scale[1] ); + LL( joint->scale[2] ); + + if( joint->parent < -1 || + joint->parent >= (int)header->num_joints || + joint->name >= (int)header->num_text ) { + return qfalse; + } + joint_names += strlen( (char *)header + header->ofs_text + + joint->name ) + 1; + } + } + + if ( header->num_poses ) + { + // check and swap poses + if( IQM_CheckRange( header, header->ofs_poses, + header->num_poses, sizeof(iqmPose_t) ) ) { + return qfalse; + } + pose = (iqmPose_t *)((byte *)header + header->ofs_poses); + for( i = 0; i < header->num_poses; i++, pose++ ) { + LL( pose->parent ); + LL( pose->mask ); + LL( pose->channeloffset[0] ); + LL( pose->channeloffset[1] ); + LL( pose->channeloffset[2] ); + LL( pose->channeloffset[3] ); + LL( pose->channeloffset[4] ); + LL( pose->channeloffset[5] ); + LL( pose->channeloffset[6] ); + LL( pose->channeloffset[7] ); + LL( pose->channeloffset[8] ); + LL( pose->channeloffset[9] ); + LL( pose->channelscale[0] ); + LL( pose->channelscale[1] ); + LL( pose->channelscale[2] ); + LL( pose->channelscale[3] ); + LL( pose->channelscale[4] ); + LL( pose->channelscale[5] ); + LL( pose->channelscale[6] ); + LL( pose->channelscale[7] ); + LL( pose->channelscale[8] ); + LL( pose->channelscale[9] ); + } + } + + if (header->ofs_bounds) + { + // check and swap model bounds + if(IQM_CheckRange(header, header->ofs_bounds, + header->num_frames, sizeof(*bounds))) + { + return qfalse; + } + bounds = (iqmBounds_t *) ((byte *) header + header->ofs_bounds); + for(i = 0; i < header->num_frames; i++) + { + LL(bounds->bbmin[0]); + LL(bounds->bbmin[1]); + LL(bounds->bbmin[2]); + LL(bounds->bbmax[0]); + LL(bounds->bbmax[1]); + LL(bounds->bbmax[2]); + + bounds++; + } + } + + // allocate the model and copy the data + size = sizeof(iqmData_t); + size += header->num_meshes * sizeof( srfIQModel_t ); + size += header->num_joints * 12 * sizeof( float ); // joint mats + size += header->num_poses * header->num_frames * 12 * sizeof( float ); // pose mats + if(header->ofs_bounds) + size += header->num_frames * 6 * sizeof(float); // model bounds + size += header->num_vertexes * 3 * sizeof(float); // positions + size += header->num_vertexes * 2 * sizeof(float); // texcoords + size += header->num_vertexes * 3 * sizeof(float); // normals + size += header->num_vertexes * 4 * sizeof(float); // tangents + size += header->num_vertexes * 4 * sizeof(byte); // blendIndexes + size += header->num_vertexes * 4 * sizeof(byte); // colors + size += header->num_joints * sizeof(int); // parents + size += header->num_triangles * 3 * sizeof(int); // triangles + size += joint_names; // joint names + + // blendWeights + if (blendWeightsType == IQM_FLOAT) { + size += header->num_vertexes * 4 * sizeof(float); + } else { + size += header->num_vertexes * 4 * sizeof(byte); + } + + mod->type = MOD_IQM; + iqmData = (iqmData_t *)ri.Hunk_Alloc( size, h_low ); + mod->modelData = iqmData; + + // fill header + iqmData->num_vertexes = header->num_vertexes; + iqmData->num_triangles = header->num_triangles; + iqmData->num_frames = header->num_frames; + iqmData->num_surfaces = header->num_meshes; + iqmData->num_joints = header->num_joints; + iqmData->num_poses = header->num_poses; + iqmData->blendWeightsType = blendWeightsType; + iqmData->surfaces = (srfIQModel_t *)(iqmData + 1); + iqmData->jointMats = (float *) (iqmData->surfaces + iqmData->num_surfaces); + iqmData->poseMats = iqmData->jointMats + 12 * header->num_joints; + if(header->ofs_bounds) + { + iqmData->bounds = iqmData->poseMats + 12 * header->num_poses * header->num_frames; + iqmData->positions = iqmData->bounds + 6 * header->num_frames; + } + else + iqmData->positions = iqmData->poseMats + 12 * header->num_poses * header->num_frames; + iqmData->texcoords = iqmData->positions + 3 * header->num_vertexes; + iqmData->normals = iqmData->texcoords + 2 * header->num_vertexes; + iqmData->tangents = iqmData->normals + 3 * header->num_vertexes; + iqmData->blendIndexes = (byte *)(iqmData->tangents + 4 * header->num_vertexes); + + if(blendWeightsType == IQM_FLOAT) { + iqmData->blendWeights.f = (float *)(iqmData->blendIndexes + 4 * header->num_vertexes); + iqmData->colors = (byte *)(iqmData->blendWeights.f + 4 * header->num_vertexes); + } else { + iqmData->blendWeights.b = iqmData->blendIndexes + 4 * header->num_vertexes; + iqmData->colors = iqmData->blendWeights.b + 4 * header->num_vertexes; + } + + iqmData->jointParents = (int *)(iqmData->colors + 4 * header->num_vertexes); + iqmData->triangles = iqmData->jointParents + header->num_joints; + iqmData->names = (char *)(iqmData->triangles + 3 * header->num_triangles); + + if ( header->num_joints == 0 ) + iqmData->jointMats = NULL; + + if ( header->num_poses == 0 ) + iqmData->poseMats = NULL; + + // calculate joint matrices and their inverses + // joint inverses are needed only until the pose matrices are calculated + mat = iqmData->jointMats; + matInv = jointInvMats; + joint = (iqmJoint_t *)((byte *)header + header->ofs_joints); + for( i = 0; i < header->num_joints; i++, joint++ ) { + float baseFrame[12], invBaseFrame[12]; + + JointToMatrix( joint->rotate, joint->scale, joint->translate, baseFrame ); + Matrix34Invert( baseFrame, invBaseFrame ); + + if ( joint->parent >= 0 ) + { + Matrix34Multiply( iqmData->jointMats + 12 * joint->parent, baseFrame, mat ); + mat += 12; + Matrix34Multiply( invBaseFrame, jointInvMats + 12 * joint->parent, matInv ); + matInv += 12; + } + else + { + memcpy( mat, baseFrame, sizeof(baseFrame) ); + mat += 12; + memcpy( matInv, invBaseFrame, sizeof(invBaseFrame) ); + matInv += 12; + } + } + + // calculate pose matrices + framedata = (unsigned short *)((byte *)header + header->ofs_frames); + mat = iqmData->poseMats; + for( i = 0; i < header->num_frames; i++ ) { + pose = (iqmPose_t *)((byte *)header + header->ofs_poses); + for( j = 0; j < header->num_poses; j++, pose++ ) { + vec3_t translate; + vec4_t rotate; + vec3_t scale; + float mat1[12], mat2[12]; + + translate[0] = pose->channeloffset[0]; + if( pose->mask & 0x001) + translate[0] += *framedata++ * pose->channelscale[0]; + translate[1] = pose->channeloffset[1]; + if( pose->mask & 0x002) + translate[1] += *framedata++ * pose->channelscale[1]; + translate[2] = pose->channeloffset[2]; + if( pose->mask & 0x004) + translate[2] += *framedata++ * pose->channelscale[2]; + + rotate[0] = pose->channeloffset[3]; + if( pose->mask & 0x008) + rotate[0] += *framedata++ * pose->channelscale[3]; + rotate[1] = pose->channeloffset[4]; + if( pose->mask & 0x010) + rotate[1] += *framedata++ * pose->channelscale[4]; + rotate[2] = pose->channeloffset[5]; + if( pose->mask & 0x020) + rotate[2] += *framedata++ * pose->channelscale[5]; + rotate[3] = pose->channeloffset[6]; + if( pose->mask & 0x040) + rotate[3] += *framedata++ * pose->channelscale[6]; + + scale[0] = pose->channeloffset[7]; + if( pose->mask & 0x080) + scale[0] += *framedata++ * pose->channelscale[7]; + scale[1] = pose->channeloffset[8]; + if( pose->mask & 0x100) + scale[1] += *framedata++ * pose->channelscale[8]; + scale[2] = pose->channeloffset[9]; + if( pose->mask & 0x200) + scale[2] += *framedata++ * pose->channelscale[9]; + + // construct transformation matrix + JointToMatrix( rotate, scale, translate, mat1 ); + + if( pose->parent >= 0 ) { + Matrix34Multiply( iqmData->jointMats + 12 * pose->parent, + mat1, mat2 ); + } else { + memcpy( mat2, mat1, sizeof(mat1) ); + } + + Matrix34Multiply( mat2, jointInvMats + 12 * j, mat ); + mat += 12; + } + } + + // register shaders + // overwrite the material offset with the shader index + mesh = (iqmMesh_t *)((byte *)header + header->ofs_meshes); + surface = iqmData->surfaces; + str = (char *)header + header->ofs_text; + for( i = 0; i < header->num_meshes; i++, mesh++, surface++ ) { + surface->surfaceType = SF_IQM; + Q_strncpyz(surface->name, str + mesh->name, sizeof (surface->name)); + Q_strlwr(surface->name); // lowercase the surface name so skin compares are faster + surface->shader = R_FindShader( str + mesh->material, LIGHTMAP_NONE, qtrue ); + if( surface->shader->defaultShader ) + surface->shader = tr.defaultShader; + surface->data = iqmData; + surface->first_vertex = mesh->first_vertex; + surface->num_vertexes = mesh->num_vertexes; + surface->first_triangle = mesh->first_triangle; + surface->num_triangles = mesh->num_triangles; + } + + // copy vertexarrays and indexes + vertexarray = (iqmVertexArray_t *)((byte *)header + header->ofs_vertexarrays); + for( i = 0; i < header->num_vertexarrays; i++, vertexarray++ ) { + int n; + + // total number of values + n = header->num_vertexes * vertexarray->size; + + switch( vertexarray->type ) { + case IQM_POSITION: + memcpy( iqmData->positions, + (byte *)header + vertexarray->offset, + n * sizeof(float) ); + break; + case IQM_NORMAL: + memcpy( iqmData->normals, + (byte *)header + vertexarray->offset, + n * sizeof(float) ); + break; + case IQM_TANGENT: + memcpy( iqmData->tangents, + (byte *)header + vertexarray->offset, + n * sizeof(float) ); + break; + case IQM_TEXCOORD: + memcpy( iqmData->texcoords, + (byte *)header + vertexarray->offset, + n * sizeof(float) ); + break; + case IQM_BLENDINDEXES: + if( blendIndexesType == IQM_INT ) { + int *data = (int*)((byte*)header + vertexarray->offset); + for ( j = 0; j < n; j++ ) { + iqmData->blendIndexes[j] = (byte)data[j]; + } + } else { + memcpy( iqmData->blendIndexes, + (byte *)header + vertexarray->offset, + n * sizeof(byte) ); + } + break; + case IQM_BLENDWEIGHTS: + if( blendWeightsType == IQM_FLOAT ) { + memcpy( iqmData->blendWeights.f, + (byte *)header + vertexarray->offset, + n * sizeof(float) ); + } else { + memcpy( iqmData->blendWeights.b, + (byte *)header + vertexarray->offset, + n * sizeof(byte) ); + } + break; + case IQM_COLOR: + memcpy( iqmData->colors, + (byte *)header + vertexarray->offset, + n * sizeof(byte) ); + break; + } + } + + // copy joint parents + joint = (iqmJoint_t *)((byte *)header + header->ofs_joints); + for( i = 0; i < header->num_joints; i++, joint++ ) { + iqmData->jointParents[i] = joint->parent; + } + + // copy triangles + triangle = (iqmTriangle_t *)((byte *)header + header->ofs_triangles); + for( i = 0; i < header->num_triangles; i++, triangle++ ) { + iqmData->triangles[3*i+0] = triangle->vertex[0]; + iqmData->triangles[3*i+1] = triangle->vertex[1]; + iqmData->triangles[3*i+2] = triangle->vertex[2]; + } + + // copy joint names + str = iqmData->names; + joint = (iqmJoint_t *)((byte *)header + header->ofs_joints); + for( i = 0; i < header->num_joints; i++, joint++ ) { + char *name = (char *)header + header->ofs_text + + joint->name; + int len = strlen( name ) + 1; + memcpy( str, name, len ); + str += len; + } + + // copy model bounds + if(header->ofs_bounds) + { + mat = iqmData->bounds; + bounds = (iqmBounds_t *) ((byte *) header + header->ofs_bounds); + for(i = 0; i < header->num_frames; i++) + { + mat[0] = bounds->bbmin[0]; + mat[1] = bounds->bbmin[1]; + mat[2] = bounds->bbmin[2]; + mat[3] = bounds->bbmax[0]; + mat[4] = bounds->bbmax[1]; + mat[5] = bounds->bbmax[2]; + + mat += 6; + bounds++; + } + } + + return qtrue; +} + +/* +============= +R_CullIQM +============= +*/ +static int R_CullIQM( iqmData_t *data, trRefEntity_t *ent ) { + vec3_t bounds[2]; + vec_t *oldBounds, *newBounds; + int i; + + if (!data->bounds) { + tr.pc.c_box_cull_md3_clip++; + return CULL_CLIP; + } + + // compute bounds pointers + oldBounds = data->bounds + 6*ent->e.oldframe; + newBounds = data->bounds + 6*ent->e.frame; + + // calculate a bounding box in the current coordinate system + for (i = 0 ; i < 3 ; i++) { + bounds[0][i] = oldBounds[i] < newBounds[i] ? oldBounds[i] : newBounds[i]; + bounds[1][i] = oldBounds[i+3] > newBounds[i+3] ? oldBounds[i+3] : newBounds[i+3]; + } + + switch ( R_CullLocalBox( bounds ) ) + { + case CULL_IN: + tr.pc.c_box_cull_md3_in++; + return CULL_IN; + case CULL_CLIP: + tr.pc.c_box_cull_md3_clip++; + return CULL_CLIP; + case CULL_OUT: + default: + tr.pc.c_box_cull_md3_out++; + return CULL_OUT; + } +} + +/* +================= +R_ComputeIQMFogNum + +================= +*/ +int R_ComputeIQMFogNum( iqmData_t *data, trRefEntity_t *ent ) { + int i, j; + fog_t *fog; + const vec_t *bounds; + const vec_t defaultBounds[6] = { -8, -8, -8, 8, 8, 8 }; + vec3_t diag, center; + vec3_t localOrigin; + vec_t radius; + + if ( tr.refdef.rd.rdflags & RDF_NOWORLDMODEL ) { + return 0; + } + + // FIXME: non-normalized axis issues + if (data->bounds) { + bounds = data->bounds + 6*ent->e.frame; + } else { + bounds = defaultBounds; + } + VectorSubtract( bounds+3, bounds, diag ); + VectorMA( bounds, 0.5f, diag, center ); + VectorAdd( ent->e.origin, center, localOrigin ); + radius = 0.5f * VectorLen( diag ); + + for ( i = 1 ; i < tr.world->numfogs ; i++ ) { + fog = &tr.world->fogs[i]; + for ( j = 0 ; j < 3 ; j++ ) { + if ( localOrigin[j] - radius >= fog->bounds[1][j] ) { + break; + } + if ( localOrigin[j] + radius <= fog->bounds[0][j] ) { + break; + } + } + if ( j == 3 ) { + return i; + } + } + + return 0; +} + +/* +================= +R_AddIQMSurfaces + +Add all surfaces of this model +================= +*/ +void R_AddIQMSurfaces( trRefEntity_t *ent ) +{ + // + ri.Printf( PRINT_ALL, "Add IQM Surfaces. \n"); + + iqmData_t *data; + srfIQModel_t *surface; + int i, j; + qboolean personalModel; + int cull; + int fogNum; + shader_t *shader; + skin_t *skin; + + data = tr.currentModel->modelData; + surface = data->surfaces; + + // don't add third_person objects if not in a portal + personalModel = (ent->e.renderfx & RF_THIRD_PERSON) && !tr.viewParms.isPortal; + + if ( ent->e.renderfx & RF_WRAP_FRAMES ) { + ent->e.frame %= data->num_frames; + ent->e.oldframe %= data->num_frames; + } + + // + // Validate the frames so there is no chance of a crash. + // This will write directly into the entity structure, so + // when the surfaces are rendered, they don't need to be + // range checked again. + // + if ( (ent->e.frame >= data->num_frames) + || (ent->e.frame < 0) + || (ent->e.oldframe >= data->num_frames) + || (ent->e.oldframe < 0) ) { + ri.Printf( PRINT_DEVELOPER, "R_AddIQMSurfaces: no such frame %d to %d for '%s'\n", + ent->e.oldframe, ent->e.frame, + tr.currentModel->name ); + ent->e.frame = 0; + ent->e.oldframe = 0; + } + + // + // cull the entire model if merged bounding box of both frames + // is outside the view frustum. + // + cull = R_CullIQM ( data, ent ); + if ( cull == CULL_OUT ) { + return; + } + + // + // set up lighting now that we know we aren't culled + // + if ( !personalModel || r_shadows->integer > 1 ) { + R_SetupEntityLighting( &tr.refdef, ent ); + } + + // + // see if we are in a fog volume + // + fogNum = R_ComputeIQMFogNum( data, ent ); + + for ( i = 0 ; i < data->num_surfaces ; i++ ) { + if(ent->e.customShader) + shader = R_GetShaderByHandle( ent->e.customShader ); + else if(ent->e.customSkin > 0 && ent->e.customSkin < tr.numSkins) + { + skin = R_GetSkinByHandle(ent->e.customSkin); + shader = tr.defaultShader; + + for(j = 0; j < skin->numSurfaces; j++) + { + if (!strcmp(skin->pSurfaces[j].name, surface->name)) + { + shader = skin->pSurfaces[j].shader; + break; + } + } + } else { + shader = surface->shader; + } + + // we will add shadows even if the main object isn't visible in the view + + // stencil shadows can't do personal models unless I polyhedron clip + if ( !personalModel + && r_shadows->integer == 2 + && fogNum == 0 + && !(ent->e.renderfx & ( RF_NOSHADOW | RF_DEPTHHACK ) ) + && shader->sort == SS_OPAQUE ) { + R_AddDrawSurf( (void *)surface, tr.shadowShader, 0, 0 ); + } + + // projection shadows work fine with personal models + if ( r_shadows->integer == 3 + && fogNum == 0 + && (ent->e.renderfx & RF_SHADOW_PLANE ) + && shader->sort == SS_OPAQUE ) { + R_AddDrawSurf( (void *)surface, tr.projectionShadowShader, 0, 0 ); + } + + if( !personalModel ) { + R_AddDrawSurf( (void *)surface, shader, fogNum, 0 ); + } + + surface++; + } +} + + +void ComputePoseMats( iqmData_t *data, int frame, int oldframe, + float backlerp, float *mat ) { + float *mat1, *mat2; + int *joint = data->jointParents; + int i; + + if ( data->num_poses == 0 ) { + for( i = 0; i < data->num_joints; i++, joint++ ) { + if( *joint >= 0 ) { + Matrix34Multiply( mat + 12 * *joint, + identityMatrix, mat + 12*i ); + } else { + memcpy( mat + 12*i, identityMatrix, 12 * sizeof(float) ); + } + } + return; + } + + if ( oldframe == frame ) { + mat1 = data->poseMats + 12 * data->num_poses * frame; + for( i = 0; i < data->num_poses; i++, joint++ ) { + if( *joint >= 0 ) { + Matrix34Multiply( mat + 12 * *joint, + mat1 + 12*i, mat + 12*i ); + } else { + memcpy( mat + 12*i, mat1 + 12*i, 12 * sizeof(float) ); + } + } + } else { + mat1 = data->poseMats + 12 * data->num_poses * frame; + mat2 = data->poseMats + 12 * data->num_poses * oldframe; + + for( i = 0; i < data->num_poses; i++, joint++ ) { + if( *joint >= 0 ) { + float tmpMat[12]; + InterpolateMatrix( mat1 + 12*i, mat2 + 12*i, + backlerp, tmpMat ); + Matrix34Multiply( mat + 12 * *joint, + tmpMat, mat + 12*i ); + + } else { + InterpolateMatrix( mat1 + 12*i, mat2 + 12*i, + backlerp, mat ); + } + } + } +} + + +/* +================= +RB_AddIQMSurfaces + +Compute vertices for this model surface +================= +*/ +void RB_IQMSurfaceAnim( surfaceType_t *surface ) +{ + srfIQModel_t *surf = (srfIQModel_t *)surface; + iqmData_t *data = surf->data; + float jointMats[IQM_MAX_JOINTS * 12]; + int i; + + vec4_t *outXYZ; + vec4_t *outNormal; + vec2_t (*outTexCoord)[2]; + color4ub_t *outColor; + + int frame = data->num_frames ? backEnd.currentEntity->e.frame % data->num_frames : 0; + int oldframe = data->num_frames ? backEnd.currentEntity->e.oldframe % data->num_frames : 0; + float backlerp = backEnd.currentEntity->e.backlerp; + + int *tri; + unsigned int* ptr; + unsigned int base; + + RB_CHECKOVERFLOW( surf->num_vertexes, surf->num_triangles * 3 ); + + outXYZ = &tess.xyz[tess.numVertexes]; + outNormal = &tess.normal[tess.numVertexes]; + outTexCoord = &tess.texCoords[tess.numVertexes]; + outColor = &tess.vertexColors[tess.numVertexes]; + + // compute interpolated joint matrices + if ( data->num_poses > 0 ) { + ComputePoseMats( data, frame, oldframe, backlerp, jointMats ); + } + + // transform vertexes and fill other data + for( i = 0; i < surf->num_vertexes; + i++, outXYZ++, outNormal++, outTexCoord++, outColor++ ) { + int j, k; + float vtxMat[12]; + float nrmMat[9]; + int vtx = i + surf->first_vertex; + float blendWeights[4]; + int numWeights; + + for ( numWeights = 0; numWeights < 4; numWeights++ ) { + if ( data->blendWeightsType == IQM_FLOAT ) + blendWeights[numWeights] = data->blendWeights.f[4*vtx + numWeights]; + else + blendWeights[numWeights] = (float)data->blendWeights.b[4*vtx + numWeights] / 255.0f; + + if ( blendWeights[numWeights] <= 0 ) + break; + } + + if ( data->num_poses == 0 || numWeights == 0 ) { + // no blend joint, use identity matrix. + memcpy( vtxMat, identityMatrix, 12 * sizeof (float) ); + } else { + // compute the vertex matrix by blending the up to + // four blend weights + memset( vtxMat, 0, 12 * sizeof (float) ); + for( j = 0; j < numWeights; j++ ) { + for( k = 0; k < 12; k++ ) { + vtxMat[k] += blendWeights[j] * jointMats[12*data->blendIndexes[4*vtx + j] + k]; + } + } + } + + // compute the normal matrix as transpose of the adjoint + // of the vertex matrix + nrmMat[ 0] = vtxMat[ 5]*vtxMat[10] - vtxMat[ 6]*vtxMat[ 9]; + nrmMat[ 1] = vtxMat[ 6]*vtxMat[ 8] - vtxMat[ 4]*vtxMat[10]; + nrmMat[ 2] = vtxMat[ 4]*vtxMat[ 9] - vtxMat[ 5]*vtxMat[ 8]; + nrmMat[ 3] = vtxMat[ 2]*vtxMat[ 9] - vtxMat[ 1]*vtxMat[10]; + nrmMat[ 4] = vtxMat[ 0]*vtxMat[10] - vtxMat[ 2]*vtxMat[ 8]; + nrmMat[ 5] = vtxMat[ 1]*vtxMat[ 8] - vtxMat[ 0]*vtxMat[ 9]; + nrmMat[ 6] = vtxMat[ 1]*vtxMat[ 6] - vtxMat[ 2]*vtxMat[ 5]; + nrmMat[ 7] = vtxMat[ 2]*vtxMat[ 4] - vtxMat[ 0]*vtxMat[ 6]; + nrmMat[ 8] = vtxMat[ 0]*vtxMat[ 5] - vtxMat[ 1]*vtxMat[ 4]; + + (*outTexCoord)[0][0] = data->texcoords[2*vtx + 0]; + (*outTexCoord)[0][1] = data->texcoords[2*vtx + 1]; + (*outTexCoord)[1][0] = (*outTexCoord)[0][0]; + (*outTexCoord)[1][1] = (*outTexCoord)[0][1]; + + (*outXYZ)[0] = + vtxMat[ 0] * data->positions[3*vtx+0] + + vtxMat[ 1] * data->positions[3*vtx+1] + + vtxMat[ 2] * data->positions[3*vtx+2] + + vtxMat[ 3]; + (*outXYZ)[1] = + vtxMat[ 4] * data->positions[3*vtx+0] + + vtxMat[ 5] * data->positions[3*vtx+1] + + vtxMat[ 6] * data->positions[3*vtx+2] + + vtxMat[ 7]; + (*outXYZ)[2] = + vtxMat[ 8] * data->positions[3*vtx+0] + + vtxMat[ 9] * data->positions[3*vtx+1] + + vtxMat[10] * data->positions[3*vtx+2] + + vtxMat[11]; + (*outXYZ)[3] = 1.0f; + + (*outNormal)[0] = + nrmMat[ 0] * data->normals[3*vtx+0] + + nrmMat[ 1] * data->normals[3*vtx+1] + + nrmMat[ 2] * data->normals[3*vtx+2]; + (*outNormal)[1] = + nrmMat[ 3] * data->normals[3*vtx+0] + + nrmMat[ 4] * data->normals[3*vtx+1] + + nrmMat[ 5] * data->normals[3*vtx+2]; + (*outNormal)[2] = + nrmMat[ 6] * data->normals[3*vtx+0] + + nrmMat[ 7] * data->normals[3*vtx+1] + + nrmMat[ 8] * data->normals[3*vtx+2]; + (*outNormal)[3] = 0.0f; + + (*outColor)[0] = data->colors[4*vtx+0]; + (*outColor)[1] = data->colors[4*vtx+1]; + (*outColor)[2] = data->colors[4*vtx+2]; + (*outColor)[3] = data->colors[4*vtx+3]; + } + + tri = data->triangles + 3 * surf->first_triangle; + ptr = &tess.indexes[tess.numIndexes]; + base = tess.numVertexes; + + for( i = 0; i < surf->num_triangles; i++ ) { + *ptr++ = base + (*tri++ - surf->first_vertex); + *ptr++ = base + (*tri++ - surf->first_vertex); + *ptr++ = base + (*tri++ - surf->first_vertex); + } + + tess.numIndexes += 3 * surf->num_triangles; + tess.numVertexes += surf->num_vertexes; +} + +qhandle_t R_RegisterIQM(const char *name, model_t *mod) +{ + char* buf; + + qboolean loaded = qfalse; + int filesize; + + filesize = ri.FS_ReadFile(name, (void**)&buf); + if(!buf) + { + mod->type = MOD_BAD; + return 0; + } + + loaded = R_LoadIQM(mod, buf, filesize, name); + + ri.FS_FreeFile (buf); + + if(!loaded) + { + ri.Printf(PRINT_WARNING,"R_RegisterIQM: couldn't load iqm file %s\n", name); + mod->type = MOD_BAD; + return 0; + } + + return mod->index; +} diff --git a/code/renderer_vulkan/tr_noise.c b/code/renderer_vulkan/tr_noise.c new file mode 100644 index 0000000000..1bd331cbd9 --- /dev/null +++ b/code/renderer_vulkan/tr_noise.c @@ -0,0 +1,96 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_noise.c +#include "tr_local.h" + +#define NOISE_SIZE 256 +#define NOISE_MASK ( NOISE_SIZE - 1 ) + +#define VAL( a ) s_noise_perm[ ( a ) & ( NOISE_MASK )] +#define INDEX( x, y, z, t ) VAL( x + VAL( y + VAL( z + VAL( t ) ) ) ) + +static float s_noise_table[NOISE_SIZE]; +static int s_noise_perm[NOISE_SIZE]; + +inline static float LerpF(float a, float b, float w ) +{ + return ( a + (b - a) * w ); +} + +static float GetNoiseValue( int x, int y, int z, int t ) +{ + int index = INDEX( ( int ) x, ( int ) y, ( int ) z, ( int ) t ); + + return s_noise_table[index]; +} + +void R_NoiseInit( void ) +{ + int i; + + srand( 1001 ); + + for ( i = 0; i < NOISE_SIZE; i++ ) + { + s_noise_table[i] = ( float ) ( ( ( rand() / ( float ) RAND_MAX ) * 2.0 - 1.0 ) ); + s_noise_perm[i] = ( unsigned char ) ( rand() / ( float ) RAND_MAX * 255 ); + } +} + +float R_NoiseGet4f( float x, float y, float z, float t ) +{ + int i; + int ix, iy, iz, it; + float fx, fy, fz, ft; + float front[4]; + float back[4]; + float fvalue, bvalue, value[2]; + + ix = ( int ) floor( x ); + fx = x - ix; + iy = ( int ) floor( y ); + fy = y - iy; + iz = ( int ) floor( z ); + fz = z - iz; + it = ( int ) floor( t ); + ft = t - it; + + for ( i = 0; i < 2; i++ ) + { + front[0] = GetNoiseValue( ix, iy, iz, it + i ); + front[1] = GetNoiseValue( ix+1, iy, iz, it + i ); + front[2] = GetNoiseValue( ix, iy+1, iz, it + i ); + front[3] = GetNoiseValue( ix+1, iy+1, iz, it + i ); + + back[0] = GetNoiseValue( ix, iy, iz + 1, it + i ); + back[1] = GetNoiseValue( ix+1, iy, iz + 1, it + i ); + back[2] = GetNoiseValue( ix, iy+1, iz + 1, it + i ); + back[3] = GetNoiseValue( ix+1, iy+1, iz + 1, it + i ); + + fvalue = LerpF( LerpF( front[0], front[1], fx ), LerpF( front[2], front[3], fx ), fy ); + bvalue = LerpF( LerpF( back[0], back[1], fx ), LerpF( back[2], back[3], fx ), fy ); + + value[i] = LerpF( fvalue, bvalue, fz ); + } + + return LerpF( value[0], value[1], ft ); +} diff --git a/code/renderer_vulkan/tr_scene.c b/code/renderer_vulkan/tr_scene.c new file mode 100644 index 0000000000..f9859b4b1f --- /dev/null +++ b/code/renderer_vulkan/tr_scene.c @@ -0,0 +1,441 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + +#include "tr_globals.h" +#include "tr_cvar.h" +#include "matrix_multiplication.h" +#include "ref_import.h" +#include "tr_light.h" +// these are sort of arbitrary limits. +// the limits apply to the sum of all scenes in a frame -- +// the main view, all the 3D icons, etc +#define MAX_POLYS 600 +#define MAX_POLYVERTS 3000 + +static int max_polys; +static int max_polyverts; + +static int r_firstSceneDrawSurf; + +static int r_numdlights; +static int r_firstSceneDlight; + +static int r_numentities; +static int r_firstSceneEntity; + +static int r_numpolys; +static int r_firstScenePoly; + +static int r_numpolyverts; + +static int r_frameCount; // incremented every frame + + + +// All of the information needed by the back end must be contained in a backEndData_t. +// This entire structure is duplicated so the front and back end can run in parallel +// on an SMP machine + +typedef struct +{ + drawSurf_t drawSurfs[MAX_DRAWSURFS]; + dlight_t dlights[MAX_DLIGHTS]; + trRefEntity_t entities[MAX_REFENTITIES]; + srfPoly_t *polys;//[MAX_POLYS]; + polyVert_t *polyVerts;//[MAX_POLYVERTS]; +// renderCommandList_t commands; +} backEndData_t; + + +static backEndData_t* backEndData; + + +void R_InitNextFrame(void) +{ + r_firstSceneDrawSurf = 0; + + r_numdlights = 0; + r_firstSceneDlight = 0; + + r_numentities = 0; + r_firstSceneEntity = 0; + + r_numpolys = 0; + r_firstScenePoly = 0; + + r_numpolyverts = 0; + + r_frameCount++; +} + + +void R_InitScene(void) +{ + max_polys = r_maxpolys->integer; + if (max_polys < MAX_POLYS) + max_polys = MAX_POLYS; + + max_polyverts = r_maxpolyverts->integer; + if (max_polyverts < MAX_POLYVERTS) + max_polyverts = MAX_POLYVERTS; + + unsigned int len = sizeof( backEndData_t ) + sizeof(srfPoly_t) * max_polys + sizeof(polyVert_t) * max_polyverts; + + char* ptr = ri.Hunk_Alloc( len, h_low); + memset(ptr, 0, len); + + backEndData = (backEndData_t *) ptr; + backEndData->polys = (srfPoly_t *) (ptr + sizeof( backEndData_t )); + backEndData->polyVerts = (polyVert_t *) (ptr + sizeof( backEndData_t ) + sizeof(srfPoly_t) * max_polys); + + R_InitNextFrame(); +} + + + +void RE_ClearScene( void ) { + r_firstSceneDlight = r_numdlights; + r_firstSceneEntity = r_numentities; + r_firstScenePoly = r_numpolys; +} + +/* +=========================================================================== + +DISCRETE POLYS + +=========================================================================== +*/ + +/* +===================== +R_AddPolygonSurfaces + +Adds all the scene's polys into this view's drawsurf list +===================== +*/ +void R_AddPolygonSurfaces( void ) +{ + int i; + shader_t *sh; + srfPoly_t *poly; + + tr.currentEntityNum = REFENTITYNUM_WORLD; + tr.shiftedEntityNum = tr.currentEntityNum << QSORT_ENTITYNUM_SHIFT; + + for ( i = 0, poly = tr.refdef.polys; i < tr.refdef.numPolys ; i++, poly++ ) { + sh = R_GetShaderByHandle( poly->hShader ); + R_AddDrawSurf( (surfaceType_t*) ( void * )poly, sh, poly->fogIndex, qfalse ); + } +} + +/* +===================== +RE_AddPolyToScene + +===================== +*/ +void RE_AddPolyToScene( qhandle_t hShader, int numVerts, const polyVert_t *verts, int numPolys ) { + srfPoly_t *poly; + int i, j; + int fogIndex; + fog_t *fog; + vec3_t bounds[2]; + + if ( !tr.registered ) { + return; + } + + if ( !hShader ) { + ri.Printf( PRINT_WARNING, "WARNING: RE_AddPolyToScene: NULL poly shader\n"); + return; + } + + for ( j = 0; j < numPolys; j++ ) { + if ( r_numpolyverts + numVerts > max_polyverts || r_numpolys >= max_polys ) { + /* + NOTE TTimo this was initially a PRINT_WARNING + but it happens a lot with high fighting scenes and particles + since we don't plan on changing the const and making for room for those effects + simply cut this message to developer only + */ + ri.Printf( PRINT_DEVELOPER, "WARNING: RE_AddPolyToScene: r_max_polys or r_max_polyverts reached\n"); + return; + } + + poly = &backEndData->polys[r_numpolys]; + poly->surfaceType = SF_POLY; + poly->hShader = hShader; + poly->numVerts = numVerts; + poly->verts = &backEndData->polyVerts[r_numpolyverts]; + + memcpy( poly->verts, &verts[numVerts*j], numVerts * sizeof( *verts ) ); + + // done. + r_numpolys++; + r_numpolyverts += numVerts; + + // if no world is loaded + if ( tr.world == NULL ) { + fogIndex = 0; + } + // see if it is in a fog volume + else if ( tr.world->numfogs == 1 ) { + fogIndex = 0; + } else { + // find which fog volume the poly is in + VectorCopy( poly->verts[0].xyz, bounds[0] ); + VectorCopy( poly->verts[0].xyz, bounds[1] ); + for ( i = 1 ; i < poly->numVerts ; i++ ) { + AddPointToBounds( poly->verts[i].xyz, bounds[0], bounds[1] ); + } + for ( fogIndex = 1 ; fogIndex < tr.world->numfogs ; fogIndex++ ) { + fog = &tr.world->fogs[fogIndex]; + if ( bounds[1][0] >= fog->bounds[0][0] + && bounds[1][1] >= fog->bounds[0][1] + && bounds[1][2] >= fog->bounds[0][2] + && bounds[0][0] <= fog->bounds[1][0] + && bounds[0][1] <= fog->bounds[1][1] + && bounds[0][2] <= fog->bounds[1][2] ) { + break; + } + } + if ( fogIndex == tr.world->numfogs ) { + fogIndex = 0; + } + } + poly->fogIndex = fogIndex; + } +} + + +//================================================================================= + + +/* +===================== +RE_AddRefEntityToScene + +===================== +*/ +void RE_AddRefEntityToScene( const refEntity_t *ent ) { + if ( !tr.registered ) { + return; + } + // https://zerowing.idsoftware.com/bugzilla/show_bug.cgi?id=402 + if ( r_numentities >= REFENTITYNUM_WORLD ) { + return; + } + if ( ent->reType < 0 || ent->reType >= RT_MAX_REF_ENTITY_TYPE ) { + ri.Error( ERR_DROP, "RE_AddRefEntityToScene: bad reType %i", ent->reType ); + } + + backEndData->entities[r_numentities].e = *ent; + backEndData->entities[r_numentities].lightingCalculated = qfalse; + + r_numentities++; +} + + +/* +===================== +RE_AddDynamicLightToScene + +===================== +*/ +void RE_AddDynamicLightToScene( const vec3_t org, float intensity, float r, float g, float b, int additive ) { + dlight_t *dl; + + if ( !tr.registered ) { + return; + } + if ( r_numdlights >= MAX_DLIGHTS ) { + return; + } + if ( intensity <= 0 ) { + return; + } + dl = &backEndData->dlights[r_numdlights++]; + VectorCopy (org, dl->origin); + dl->radius = intensity; + dl->color[0] = r; + dl->color[1] = g; + dl->color[2] = b; + dl->additive = additive; +} + +/* +===================== +RE_AddLightToScene + +===================== +*/ +void RE_AddLightToScene( const vec3_t org, float intensity, float r, float g, float b ) { + RE_AddDynamicLightToScene( org, intensity, r, g, b, qfalse ); +} + +/* +===================== +RE_AddAdditiveLightToScene + +===================== +*/ +void RE_AddAdditiveLightToScene( const vec3_t org, float intensity, float r, float g, float b ) { + RE_AddDynamicLightToScene( org, intensity, r, g, b, qtrue ); +} + + +/* +@@@@@@@@@@@@@@@@@@@@@ +RE_RenderScene + +Draw a 3D view into a part of the window, then return +to 2D drawing. + +Rendering a scene may require multiple views to be rendered +to handle mirrors, +@@@@@@@@@@@@@@@@@@@@@ +*/ +void RE_RenderScene( const refdef_t *fd ) +{ + if ( !tr.registered ) { + return; + } + + if ( r_norefresh->integer ) { + return; + } + + int startTime = ri.Milliseconds(); + + tr.refdef.AreamaskModified = qfalse; + + if ( ! (fd->rdflags & RDF_NOWORLDMODEL) ) + { + int i; + // check if the areamask data has changed, which will force + // a reset of the visible leafs even if the view hasn't moved + // compare the area bits + for (i = 0 ; i < MAX_MAP_AREA_BYTES; i++) + { + + if( tr.refdef.rd.areamask[i] ^ fd->areamask[i] ) + { + tr.refdef.AreamaskModified = qtrue; + //ri.Printf(PRINT_ALL, "%d:%d,%d\n", i, tr.refdef.rd.areamask[i], fd->areamask[i]); + break; + } + } + } + + tr.refdef.rd = *fd; + + // a single frame may have multiple scenes draw inside it -- + // a 3D game view, 3D status bar renderings, 3D menus, etc. + // They need to be distinguished by the light flare code, because + // the visibility state for a given surface may be different in + // each scene / view. + + // derived info + + tr.refdef.floatTime = tr.refdef.rd.time * 0.001f; + + tr.refdef.numDrawSurfs = r_firstSceneDrawSurf; + tr.refdef.drawSurfs = backEndData->drawSurfs; + + tr.refdef.num_entities = r_numentities - r_firstSceneEntity; + tr.refdef.entities = &backEndData->entities[r_firstSceneEntity]; + + tr.refdef.num_dlights = r_numdlights - r_firstSceneDlight; + tr.refdef.dlights = &backEndData->dlights[r_firstSceneDlight]; + + tr.refdef.numPolys = r_numpolys - r_firstScenePoly; + tr.refdef.polys = &backEndData->polys[r_firstScenePoly]; + + // turn off dynamic lighting globally by clearing all the + // dlights if it needs to be disabled or if vertex lighting is enabled + if ( r_dynamiclight->integer == 0 || r_vertexLight->integer == 1 ) { + tr.refdef.num_dlights = 0; + } + + // ri.Printf(PRINT_ALL, "(%d, %d, %d, %d)\n", tr.refdef.x, tr.refdef.y, tr.refdef.width, tr.refdef.height); + // setup view parms for the initial view + // + // set up viewport + // The refdef takes 0-at-the-top y coordinates + // 0 +-------> x + // | + // | + // | + // y + viewParms_t parms; + memset( &parms, 0, sizeof( parms ) ); + + + parms.viewportX = fd->x; + parms.viewportY = fd->y; + + parms.viewportWidth = fd->width; + parms.viewportHeight = fd->height; + + parms.fovX = fd->fov_x; + parms.fovY = fd->fov_y; + + VectorCopy( fd->vieworg, parms.or.origin ); + //VectorCopy( fd->viewaxis[0], parms.or.axis[0] ); + //VectorCopy( fd->viewaxis[1], parms.or.axis[1] ); + //VectorCopy( fd->viewaxis[2], parms.or.axis[2] ); + VectorCopy( fd->vieworg, parms.pvsOrigin ); + + Mat3x3Copy(parms.or.axis, fd->viewaxis); + parms.isPortal = qfalse; + + if ( (parms.viewportWidth > 0) && (parms.viewportHeight > 0) ) + { + R_RenderView( &parms ); + } + + // the next scene rendered in this frame will tack on after this one + r_firstSceneDrawSurf = tr.refdef.numDrawSurfs; + r_firstSceneEntity = r_numentities; + r_firstSceneDlight = r_numdlights; + r_firstScenePoly = r_numpolys; + + tr.frontEndMsec += ri.Milliseconds() - startTime; +} + +/* +typedef struct { +/ orientationr_t or; + orientationr_t world; +// vec3_t pvsOrigin; // may be different than or.origin for portals +// qboolean isPortal; // true if this view is through a portal + qboolean isMirror; // the portal is a mirror, invert the face culling + cplane_t portalPlane; // clip anything behind this if mirroring +// int viewportX, viewportY, viewportWidth, viewportHeight; +// float fovX, fovY; + float projectionMatrix[16] QALIGN(16); + cplane_t frustum[4]; + vec3_t visBounds[2]; + float zFar; +} viewParms_t; +*/ diff --git a/code/renderer_vulkan/tr_shade.c b/code/renderer_vulkan/tr_shade.c new file mode 100644 index 0000000000..b983b7ea23 --- /dev/null +++ b/code/renderer_vulkan/tr_shade.c @@ -0,0 +1,138 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_shade.c + +#include "tr_globals.h" +#include "vk_shade_geometry.h" + +#include "ref_import.h" +#include "tr_backend.h" +#include "tr_cvar.h" +#include "RB_DrawTris.h" +#include "RB_DrawNormals.h" + +/* + + THIS ENTIRE FILE IS BACK END + + This file deals with applying shaders to surface data in the tess struct. +*/ + + + +/* +============================================================= + +SURFACE SHADERS + +============================================================= +*/ + +shaderCommands_t tess; + + +/* +============== +RB_BeginSurface + +We must set some things up before beginning any tesselation, +because a surface may be forced to perform a RB_End due to overflow. +============== +*/ +void RB_BeginSurface( shader_t *shader, int fogNum ) +{ + + shader_t *state = (shader->remappedShader) ? shader->remappedShader : shader; + + tess.numIndexes = 0; + tess.numVertexes = 0; + tess.shader = state; + tess.fogNum = fogNum; + tess.dlightBits = 0; // will be OR'd in by surface functions + tess.xstages = state->stages; + tess.numPasses = state->numUnfoggedPasses; + + tess.shaderTime = backEnd.refdef.floatTime - tess.shader->timeOffset; + + if (tess.shader->clampTime && tess.shaderTime >= tess.shader->clampTime) { + tess.shaderTime = tess.shader->clampTime; + } +} + + + +void RB_EndSurface( void ) +{ + if (tess.numIndexes == 0) { + return; + } + + if (tess.indexes[SHADER_MAX_INDEXES-1] != 0) { + ri.Error (ERR_DROP, "RB_EndSurface() - SHADER_MAX_INDEXES hit"); + } + if (tess.xyz[SHADER_MAX_VERTEXES-1][0] != 0) { + ri.Error (ERR_DROP, "RB_EndSurface() - SHADER_MAX_VERTEXES hit"); + } + + if ( tess.shader == tr.shadowShader ) { + RB_ShadowTessEnd(); + return; + } + + // for debugging of sort order issues, stop rendering after a given sort value + if ( r_debugSort->integer && r_debugSort->integer < tess.shader->sort ) { + return; + } + + // + // update performance counters + // + backEnd.pc.c_shaders++; + backEnd.pc.c_vertexes += tess.numVertexes; + backEnd.pc.c_indexes += tess.numIndexes; + backEnd.pc.c_totalIndexes += tess.numIndexes * tess.numPasses; + + // + // call off to shader specific tess end function + // + if (tess.shader->isSky) + RB_StageIteratorSky(); + else + RB_StageIteratorGeneric(); + + // + // draw debugging stuff + // + if ( r_showtris->integer ) + { + RB_DrawTris (&tess); + } + if ( r_shownormals->integer ) + { + RB_DrawNormals (&tess, tess.numVertexes); + } + // clear shader so we can tell we don't have any unclosed surfaces + tess.numIndexes = 0; + tess.numVertexes = 0; + +} + diff --git a/code/renderer_vulkan/tr_shade_calc.c b/code/renderer_vulkan/tr_shade_calc.c new file mode 100644 index 0000000000..9563eafd0e --- /dev/null +++ b/code/renderer_vulkan/tr_shade_calc.c @@ -0,0 +1,1232 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_shade_calc.c + +#include "tr_local.h" +#include "tr_globals.h" +#include "tr_fog.h" +#include "tr_backend.h" +#include "ref_import.h" +#include "matrix_multiplication.h" + +#define WAVEVALUE( table, base, amplitude, phase, freq ) ((base) + table[ (int)( ( ( (phase) + tess.shaderTime * (freq) ) * FUNCTABLE_SIZE ) ) & FUNCTABLE_MASK ] * (amplitude)) + +static float *TableForFunc( genFunc_t func ) +{ + switch ( func ) + { + case GF_SIN: + return tr.sinTable; + case GF_TRIANGLE: + return tr.triangleTable; + case GF_SQUARE: + return tr.squareTable; + case GF_SAWTOOTH: + return tr.sawToothTable; + case GF_INVERSE_SAWTOOTH: + return tr.inverseSawToothTable; + case GF_NONE: + default: + break; + } + + ri.Error( ERR_DROP, "TableForFunc called with invalid function '%d' in shader '%s'\n", func, tess.shader->name ); + return NULL; +} + +/* +** EvalWaveForm +** +** Evaluates a given waveForm_t, referencing backEnd.refdef.time directly +*/ +static float EvalWaveForm( const waveForm_t *wf ) +{ + float *table; + + table = TableForFunc( wf->func ); + + return WAVEVALUE( table, wf->base, wf->amplitude, wf->phase, wf->frequency ); +} + +static float EvalWaveFormClamped( const waveForm_t *wf ) +{ + float glow = EvalWaveForm( wf ); + + if ( glow < 0 ) + { + return 0; + } + + if ( glow > 1 ) + { + return 1; + } + + return glow; +} + +/* +** RB_CalcStretchTexCoords +*/ +void RB_CalcStretchTexCoords( const waveForm_t *wf, float *st ) +{ + float p; + texModInfo_t tmi; + + p = 1.0f / EvalWaveForm( wf ); + + tmi.matrix[0][0] = p; + tmi.matrix[1][0] = 0; + tmi.translate[0] = 0.5f - 0.5f * p; + + tmi.matrix[0][1] = 0; + tmi.matrix[1][1] = p; + tmi.translate[1] = 0.5f - 0.5f * p; + + RB_CalcTransformTexCoords( &tmi, st ); +} + +/* +==================================================================== + +DEFORMATIONS + +==================================================================== +*/ + +/* +======================== +RB_CalcDeformVertexes + +======================== +*/ +void RB_CalcDeformVertexes( deformStage_t *ds ) +{ + int i; + vec3_t offset; + float scale; + float *xyz = ( float * ) tess.xyz; + float *normal = ( float * ) tess.normal; + float *table; + + if ( ds->deformationWave.frequency == 0 ) + { + scale = EvalWaveForm( &ds->deformationWave ); + + for ( i = 0; i < tess.numVertexes; i++, xyz += 4, normal += 4 ) + { + VectorScale( normal, scale, offset ); + + xyz[0] += offset[0]; + xyz[1] += offset[1]; + xyz[2] += offset[2]; + } + } + else + { + table = TableForFunc( ds->deformationWave.func ); + + for ( i = 0; i < tess.numVertexes; i++, xyz += 4, normal += 4 ) + { + float off = ( xyz[0] + xyz[1] + xyz[2] ) * ds->deformationSpread; + + scale = WAVEVALUE( table, ds->deformationWave.base, + ds->deformationWave.amplitude, + ds->deformationWave.phase + off, + ds->deformationWave.frequency ); + + VectorScale( normal, scale, offset ); + + xyz[0] += offset[0]; + xyz[1] += offset[1]; + xyz[2] += offset[2]; + } + } +} + +/* +========================= +RB_CalcDeformNormals + +Wiggle the normals for wavy environment mapping +========================= +*/ +void RB_CalcDeformNormals( deformStage_t *ds ) { + int i; + float scale; + float *xyz = ( float * ) tess.xyz; + float *normal = ( float * ) tess.normal; + + for ( i = 0; i < tess.numVertexes; i++, xyz += 4, normal += 4 ) { + scale = 0.98f; + scale = R_NoiseGet4f( xyz[0] * scale, xyz[1] * scale, xyz[2] * scale, + tess.shaderTime * ds->deformationWave.frequency ); + normal[ 0 ] += ds->deformationWave.amplitude * scale; + + scale = 0.98f; + scale = R_NoiseGet4f( 100 + xyz[0] * scale, xyz[1] * scale, xyz[2] * scale, + tess.shaderTime * ds->deformationWave.frequency ); + normal[ 1 ] += ds->deformationWave.amplitude * scale; + + scale = 0.98f; + scale = R_NoiseGet4f( 200 + xyz[0] * scale, xyz[1] * scale, xyz[2] * scale, + tess.shaderTime * ds->deformationWave.frequency ); + normal[ 2 ] += ds->deformationWave.amplitude * scale; + + VectorNorm( normal ); + } +} + +/* +======================== +RB_CalcBulgeVertexes + +======================== +*/ +void RB_CalcBulgeVertexes( deformStage_t *ds ) { + int i; + const float *st = ( const float * ) tess.texCoords[0]; + float *xyz = ( float * ) tess.xyz; + float *normal = ( float * ) tess.normal; + float now; + + now = backEnd.refdef.rd.time * ds->bulgeSpeed * 0.001f; + + for ( i = 0; i < tess.numVertexes; i++, xyz += 4, st += 4, normal += 4 ) { + int off; + float scale; + + off = (float)( FUNCTABLE_SIZE / (M_PI*2) ) * ( st[0] * ds->bulgeWidth + now ); + + scale = tr.sinTable[ off & FUNCTABLE_MASK ] * ds->bulgeHeight; + + xyz[0] += normal[0] * scale; + xyz[1] += normal[1] * scale; + xyz[2] += normal[2] * scale; + } +} + + +/* +====================== +RB_CalcMoveVertexes + +A deformation that can move an entire surface along a wave path +====================== +*/ +void RB_CalcMoveVertexes( deformStage_t *ds ) { + int i; + float *xyz; + float *table; + float scale; + vec3_t offset; + + table = TableForFunc( ds->deformationWave.func ); + + scale = WAVEVALUE( table, ds->deformationWave.base, + ds->deformationWave.amplitude, + ds->deformationWave.phase, + ds->deformationWave.frequency ); + + VectorScale( ds->moveVector, scale, offset ); + + xyz = ( float * ) tess.xyz; + for ( i = 0; i < tess.numVertexes; i++, xyz += 4 ) { + VectorAdd( xyz, offset, xyz ); + } +} + + +/* +============= +DeformText + +Change a polygon into a bunch of text polygons +============= +*/ +void DeformText( const char *text ) { + int i; + vec3_t origin, width, height; + int len; + int ch; + byte color[4]; + float bottom, top; + vec3_t mid; + + height[0] = 0; + height[1] = 0; + height[2] = -1; + CrossProduct( tess.normal[0], height, width ); + + // find the midpoint of the box + VectorClear( mid ); + bottom = 999999; + top = -999999; + for ( i = 0 ; i < 4 ; i++ ) { + VectorAdd( tess.xyz[i], mid, mid ); + if ( tess.xyz[i][2] < bottom ) { + bottom = tess.xyz[i][2]; + } + if ( tess.xyz[i][2] > top ) { + top = tess.xyz[i][2]; + } + } + VectorScale( mid, 0.25f, origin ); + + // determine the individual character size + height[0] = 0; + height[1] = 0; + height[2] = ( top - bottom ) * 0.5f; + + VectorScale( width, height[2] * -0.75f, width ); + + // determine the starting position + len = (int)strlen( text ); + VectorMA( origin, (len-1), width, origin ); + + // clear the shader indexes + tess.numIndexes = 0; + tess.numVertexes = 0; + + color[0] = color[1] = color[2] = color[3] = 255; + + // draw each character + for ( i = 0 ; i < len ; i++ ) { + ch = text[i]; + ch &= 255; + + if ( ch != ' ' ) { + int row, col; + float frow, fcol, size; + + row = ch>>4; + col = ch&15; + + frow = row*0.0625f; + fcol = col*0.0625f; + size = 0.0625f; + + RB_AddQuadStampExt( origin, width, height, color, fcol, frow, fcol + size, frow + size ); + } + VectorMA( origin, -2, width, origin ); + } +} + +/* +================== +GlobalVectorToLocal +================== +*/ +static void GlobalVectorToLocal( const vec3_t in, vec3_t out ) { + out[0] = DotProduct( in, backEnd.or.axis[0] ); + out[1] = DotProduct( in, backEnd.or.axis[1] ); + out[2] = DotProduct( in, backEnd.or.axis[2] ); +} + +/* +===================== +AutospriteDeform + +Assuming all the triangles for this shader are independant +quads, rebuild them as forward facing sprites +===================== +*/ +static void AutospriteDeform( void ) { + int i; + int oldVerts; + float *xyz; + vec3_t mid, delta; + float radius; + vec3_t left, up; + vec3_t leftDir, upDir; + + if ( tess.numVertexes & 3 ) { + ri.Printf( PRINT_WARNING, "Autosprite shader %s had odd vertex count", tess.shader->name ); + } + if ( tess.numIndexes != ( tess.numVertexes >> 2 ) * 6 ) { + ri.Printf( PRINT_WARNING, "Autosprite shader %s had odd index count", tess.shader->name ); + } + + oldVerts = tess.numVertexes; + tess.numVertexes = 0; + tess.numIndexes = 0; + + if ( backEnd.currentEntity != &tr.worldEntity ) { + GlobalVectorToLocal( backEnd.viewParms.or.axis[1], leftDir ); + GlobalVectorToLocal( backEnd.viewParms.or.axis[2], upDir ); + } else { + VectorCopy( backEnd.viewParms.or.axis[1], leftDir ); + VectorCopy( backEnd.viewParms.or.axis[2], upDir ); + } + + for ( i = 0 ; i < oldVerts ; i+=4 ) { + // find the midpoint + xyz = tess.xyz[i]; + + mid[0] = 0.25f * (xyz[0] + xyz[4] + xyz[8] + xyz[12]); + mid[1] = 0.25f * (xyz[1] + xyz[5] + xyz[9] + xyz[13]); + mid[2] = 0.25f * (xyz[2] + xyz[6] + xyz[10] + xyz[14]); + + VectorSubtract( xyz, mid, delta ); + radius = VectorLength( delta ) * 0.707f; // / sqrt(2) + + VectorScale( leftDir, radius, left ); + VectorScale( upDir, radius, up ); + + if ( backEnd.viewParms.isMirror ) { + VectorSubtract( vec3_origin, left, left ); + } + + // compensate for scale in the axes if necessary + if ( backEnd.currentEntity->e.nonNormalizedAxes ) { + float axisLength; + axisLength = VectorLength( backEnd.currentEntity->e.axis[0] ); + if ( !axisLength ) { + axisLength = 0; + } else { + axisLength = 1.0f / axisLength; + } + VectorScale(left, axisLength, left); + VectorScale(up, axisLength, up); + } + + RB_AddQuadStamp( mid, left, up, tess.vertexColors[i] ); + } +} + + +/* +===================== +Autosprite2Deform + +Autosprite2 will pivot a rectangular quad along the center of its long axis +===================== +*/ +int edgeVerts[6][2] = { + { 0, 1 }, + { 0, 2 }, + { 0, 3 }, + { 1, 2 }, + { 1, 3 }, + { 2, 3 } +}; + +static void Autosprite2Deform( void ) { + int i, j, k; + int indexes; + float *xyz; + vec3_t forward; + + if ( tess.numVertexes & 3 ) { + ri.Printf( PRINT_WARNING, "Autosprite2 shader %s had odd vertex count", tess.shader->name ); + } + if ( tess.numIndexes != ( tess.numVertexes >> 2 ) * 6 ) { + ri.Printf( PRINT_WARNING, "Autosprite2 shader %s had odd index count", tess.shader->name ); + } + + if ( backEnd.currentEntity != &tr.worldEntity ) { + GlobalVectorToLocal( backEnd.viewParms.or.axis[0], forward ); + } else { + VectorCopy( backEnd.viewParms.or.axis[0], forward ); + } + + // this is a lot of work for two triangles... + // we could precalculate a lot of it is an issue, but it would mess up + // the shader abstraction + for ( i = 0, indexes = 0 ; i < tess.numVertexes ; i+=4, indexes+=6 ) { + float lengths[2]; + int nums[2]; + vec3_t mid[2]; + vec3_t major, minor; + float *v1, *v2; + + // find the midpoint + xyz = tess.xyz[i]; + + // identify the two shortest edges + nums[0] = nums[1] = 0; + lengths[0] = lengths[1] = 999999; + + for ( j = 0 ; j < 6 ; j++ ) { + float l; + vec3_t temp; + + v1 = xyz + 4 * edgeVerts[j][0]; + v2 = xyz + 4 * edgeVerts[j][1]; + + VectorSubtract( v1, v2, temp ); + + l = DotProduct( temp, temp ); + if ( l < lengths[0] ) { + nums[1] = nums[0]; + lengths[1] = lengths[0]; + nums[0] = j; + lengths[0] = l; + } else if ( l < lengths[1] ) { + nums[1] = j; + lengths[1] = l; + } + } + + for ( j = 0 ; j < 2 ; j++ ) { + v1 = xyz + 4 * edgeVerts[nums[j]][0]; + v2 = xyz + 4 * edgeVerts[nums[j]][1]; + + mid[j][0] = 0.5f * (v1[0] + v2[0]); + mid[j][1] = 0.5f * (v1[1] + v2[1]); + mid[j][2] = 0.5f * (v1[2] + v2[2]); + } + + // find the vector of the major axis + VectorSubtract( mid[1], mid[0], major ); + + // cross this with the view direction to get minor axis + CrossProduct( major, forward, minor ); + VectorNormalize( minor ); + + // re-project the points + for ( j = 0 ; j < 2 ; j++ ) { + float l; + + v1 = xyz + 4 * edgeVerts[nums[j]][0]; + v2 = xyz + 4 * edgeVerts[nums[j]][1]; + + l = 0.5 * sqrt( lengths[j] ); + + // we need to see which direction this edge + // is used to determine direction of projection + for ( k = 0 ; k < 5 ; k++ ) { + if ( (int)tess.indexes[ indexes + k ] == i + edgeVerts[nums[j]][0] + && (int)tess.indexes[ indexes + k + 1 ] == i + edgeVerts[nums[j]][1] ) { + break; + } + } + + if ( k == 5 ) { + VectorMA( mid[j], l, minor, v1 ); + VectorMA( mid[j], -l, minor, v2 ); + } else { + VectorMA( mid[j], -l, minor, v1 ); + VectorMA( mid[j], l, minor, v2 ); + } + } + } +} + + +/* +===================== +RB_DeformTessGeometry + +===================== +*/ +void RB_DeformTessGeometry( void ) { + int i; + deformStage_t *ds; + + for ( i = 0 ; i < tess.shader->numDeforms ; i++ ) { + ds = &tess.shader->deforms[ i ]; + + switch ( ds->deformation ) { + case DEFORM_NONE: + break; + case DEFORM_NORMALS: + RB_CalcDeformNormals( ds ); + break; + case DEFORM_WAVE: + RB_CalcDeformVertexes( ds ); + break; + case DEFORM_BULGE: + RB_CalcBulgeVertexes( ds ); + break; + case DEFORM_MOVE: + RB_CalcMoveVertexes( ds ); + break; + case DEFORM_PROJECTION_SHADOW: + RB_ProjectionShadowDeform(); + break; + case DEFORM_AUTOSPRITE: + AutospriteDeform(); + break; + case DEFORM_AUTOSPRITE2: + Autosprite2Deform(); + break; + case DEFORM_TEXT0: + case DEFORM_TEXT1: + case DEFORM_TEXT2: + case DEFORM_TEXT3: + case DEFORM_TEXT4: + case DEFORM_TEXT5: + case DEFORM_TEXT6: + case DEFORM_TEXT7: + DeformText( backEnd.refdef.rd.text[ds->deformation - DEFORM_TEXT0] ); + break; + } + } +} + +/* +==================================================================== + +COLORS + +==================================================================== +*/ + + +void RB_CalcColorFromEntity( unsigned char (*dstColors)[4] ) +{ + if ( backEnd.currentEntity ) + { + uint32_t i; + uint32_t nVerts = tess.numVertexes; + + unsigned char srColor[4]; + + memcpy(srColor, backEnd.currentEntity->e.shaderRGBA, 4); + for ( i = 0; i < nVerts; i++) + { + // dstColors[i][0]=backEnd.currentEntity->e.shaderRGBA[0]; + // dstColors[i][1]=backEnd.currentEntity->e.shaderRGBA[1]; + // dstColors[i][2]=backEnd.currentEntity->e.shaderRGBA[2]; + // dstColors[i][3]=backEnd.currentEntity->e.shaderRGBA[3]; + memcpy(dstColors[i], srColor, 4); + } + } +} + + +/* +** RB_CalcColorFromOneMinusEntity +*/ +void RB_CalcColorFromOneMinusEntity( unsigned char (*dstColors)[4] ) +{ + if ( backEnd.currentEntity ) + { + unsigned char invModulate[4]; + + invModulate[0] = 255 - backEnd.currentEntity->e.shaderRGBA[0]; + invModulate[1] = 255 - backEnd.currentEntity->e.shaderRGBA[1]; + invModulate[2] = 255 - backEnd.currentEntity->e.shaderRGBA[2]; + invModulate[3] = 255 - backEnd.currentEntity->e.shaderRGBA[3]; + // this trashes alpha, but the AGEN block fixes it + + uint32_t nVerts = tess.numVertexes; + uint32_t i; + for ( i = 0; i < nVerts; i++ ) + { + memcpy(dstColors[i], invModulate, 4); + } + } +} + +/* +** RB_CalcAlphaFromEntity +*/ +void RB_CalcAlphaFromEntity( unsigned char *dstColors ) +{ + int i; + + if ( !backEnd.currentEntity ) + return; + + dstColors += 3; + + for ( i = 0; i < tess.numVertexes; i++, dstColors += 4 ) + { + *dstColors = backEnd.currentEntity->e.shaderRGBA[3]; + } +} + +/* +** RB_CalcAlphaFromOneMinusEntity +*/ +void RB_CalcAlphaFromOneMinusEntity( unsigned char *dstColors ) +{ + int i; + + if ( !backEnd.currentEntity ) + return; + + dstColors += 3; + + for ( i = 0; i < tess.numVertexes; i++, dstColors += 4 ) + { + *dstColors = 0xff - backEnd.currentEntity->e.shaderRGBA[3]; + } +} + +/* +** RB_CalcWaveColor +void RB_CalcWaveColor( const waveForm_t *wf, unsigned char *dstColors ) +{ + int i; + + float glow; + int *colors = ( int * ) dstColors; + uint8_t color[4]; + + + if ( wf->func == GF_NOISE ) { + glow = wf->base + R_NoiseGet4f( 0, 0, 0, ( tess.shaderTime + wf->phase ) * wf->frequency ) * wf->amplitude; + } else { + glow = EvalWaveForm( wf ) * tr.identityLight; + } + + if ( glow < 0 ) { + glow = 0; + } + else if ( glow > 1 ) { + glow = 1; + } + + int v = 255 * glow; + color[0] = color[1] = color[2] = v; + color[3] = 255; + v = *(int *)color; + + for ( i = 0; i < tess.numVertexes; i++, colors++ ) + { + *colors = v; + } +} +*/ + +void RB_CalcWaveColor( const waveForm_t* wf, unsigned char (*dstColors)[4] ) +{ + float glow; + + if ( wf->func == GF_NOISE ) + glow = wf->base + R_NoiseGet4f( 0, 0, 0, ( tess.shaderTime + wf->phase ) * wf->frequency ) * wf->amplitude; + else + glow = EvalWaveForm( wf ) * tr.identityLight; + + if( glow < 0 ) + glow = 0; + else if( glow > 1 ) + glow = 1; + + + uint8_t color = glow * 255; + + uint32_t i; + for(i = 0; i < tess.numVertexes; i++) + { + dstColors[i][0] = color; + dstColors[i][1] = color; + dstColors[i][2] = color; + dstColors[i][3] = 255; + } +} + + +/* +** RB_CalcWaveAlpha +*/ +void RB_CalcWaveAlpha( const waveForm_t *wf, unsigned char *dstColors ) +{ + int i; + int v; + float glow; + + glow = EvalWaveFormClamped( wf ); + + v = 255 * glow; + + for ( i = 0; i < tess.numVertexes; i++, dstColors += 4 ) + { + dstColors[3] = v; + } +} + +/* +** RB_CalcModulateColorsByFog +*/ +void RB_CalcModulateColorsByFog( unsigned char *colors ) { + int i; + float texCoords[SHADER_MAX_VERTEXES][2]; + + // calculate texcoords so we can derive density + // this is not wasted, because it would only have + // been previously called if the surface was opaque + RB_CalcFogTexCoords( texCoords[0] ); + + for ( i = 0; i < tess.numVertexes; i++, colors += 4 ) { + float f = 1.0 - R_FogFactor( texCoords[i][0], texCoords[i][1] ); + colors[0] *= f; + colors[1] *= f; + colors[2] *= f; + } +} + +/* +** RB_CalcModulateAlphasByFog +*/ +void RB_CalcModulateAlphasByFog( unsigned char *colors ) { + int i; + float texCoords[SHADER_MAX_VERTEXES][2]; + + // calculate texcoords so we can derive density + // this is not wasted, because it would only have + // been previously called if the surface was opaque + RB_CalcFogTexCoords( texCoords[0] ); + + for ( i = 0; i < tess.numVertexes; i++, colors += 4 ) { + float f = 1.0 - R_FogFactor( texCoords[i][0], texCoords[i][1] ); + colors[3] *= f; + } +} + +/* +** RB_CalcModulateRGBAsByFog +*/ +void RB_CalcModulateRGBAsByFog( unsigned char *colors ) { + int i; + float texCoords[SHADER_MAX_VERTEXES][2]; + + // calculate texcoords so we can derive density + // this is not wasted, because it would only have + // been previously called if the surface was opaque + RB_CalcFogTexCoords( texCoords[0] ); + + for ( i = 0; i < tess.numVertexes; i++, colors += 4 ) { + float f = 1.0 - R_FogFactor( texCoords[i][0], texCoords[i][1] ); + colors[0] *= f; + colors[1] *= f; + colors[2] *= f; + colors[3] *= f; + } +} + + +/* +==================================================================== + +TEX COORDS + +==================================================================== +*/ + +/* +======================== +RB_CalcFogTexCoords + +To do the clipped fog plane really correctly, we should use +projected textures, but I don't trust the drivers and it +doesn't fit our shader data. +======================== +*/ +void RB_CalcFogTexCoords( float *st ) { + int i; + float *v; + float s, t; + float eyeT; + qboolean eyeOutside; + fog_t *fog; + vec3_t local; + vec4_t fogDistanceVector, fogDepthVector = {0}; + + fog = tr.world->fogs + tess.fogNum; + + // all fogging distance is based on world Z units + VectorSubtract( backEnd.or.origin, backEnd.viewParms.or.origin, local ); + fogDistanceVector[0] = -backEnd.or.modelMatrix[2]; + fogDistanceVector[1] = -backEnd.or.modelMatrix[6]; + fogDistanceVector[2] = -backEnd.or.modelMatrix[10]; + fogDistanceVector[3] = DotProduct( local, backEnd.viewParms.or.axis[0] ); + + // scale the fog vectors based on the fog's thickness + fogDistanceVector[0] *= fog->tcScale; + fogDistanceVector[1] *= fog->tcScale; + fogDistanceVector[2] *= fog->tcScale; + fogDistanceVector[3] *= fog->tcScale; + + // rotate the gradient vector for this orientation + if ( fog->hasSurface ) { + fogDepthVector[0] = fog->surface[0] * backEnd.or.axis[0][0] + + fog->surface[1] * backEnd.or.axis[0][1] + fog->surface[2] * backEnd.or.axis[0][2]; + fogDepthVector[1] = fog->surface[0] * backEnd.or.axis[1][0] + + fog->surface[1] * backEnd.or.axis[1][1] + fog->surface[2] * backEnd.or.axis[1][2]; + fogDepthVector[2] = fog->surface[0] * backEnd.or.axis[2][0] + + fog->surface[1] * backEnd.or.axis[2][1] + fog->surface[2] * backEnd.or.axis[2][2]; + fogDepthVector[3] = -fog->surface[3] + DotProduct( backEnd.or.origin, fog->surface ); + + eyeT = DotProduct( backEnd.or.viewOrigin, fogDepthVector ) + fogDepthVector[3]; + } else { + eyeT = 1; // non-surface fog always has eye inside + } + + // see if the viewpoint is outside + // this is needed for clipping distance even for constant fog + + if ( eyeT < 0 ) { + eyeOutside = qtrue; + } else { + eyeOutside = qfalse; + } + + fogDistanceVector[3] += 1.0/512; + + // calculate density for each point + for (i = 0, v = tess.xyz[0] ; i < tess.numVertexes ; i++, v += 4) { + // calculate the length in fog + s = DotProduct( v, fogDistanceVector ) + fogDistanceVector[3]; + t = DotProduct( v, fogDepthVector ) + fogDepthVector[3]; + + // partially clipped fogs use the T axis + if ( eyeOutside ) { + if ( t < 1.0 ) { + t = 1.0/32; // point is outside, so no fogging + } else { + t = 1.0/32 + 30.0/32 * t / ( t - eyeT ); // cut the distance at the fog plane + } + } else { + if ( t < 0 ) { + t = 1.0/32; // point is outside, so no fogging + } else { + t = 31.0/32; + } + } + + st[0] = s; + st[1] = t; + st += 2; + } +} + + + +/* +** RB_CalcEnvironmentTexCoords +*/ +void RB_CalcEnvironmentTexCoords( float *st ) +{ + int i; + float *v, *normal; + vec3_t viewer, reflected; + float d; + + v = tess.xyz[0]; + normal = tess.normal[0]; + + for (i = 0 ; i < tess.numVertexes ; i++, v += 4, normal += 4, st += 2 ) + { + VectorSubtract (backEnd.or.viewOrigin, v, viewer); + VectorNorm(viewer); + + d = DotProduct (normal, viewer); + + reflected[0] = normal[0]*2*d - viewer[0]; + reflected[1] = normal[1]*2*d - viewer[1]; + reflected[2] = normal[2]*2*d - viewer[2]; + + st[0] = 0.5 + reflected[1] * 0.5; + st[1] = 0.5 - reflected[2] * 0.5; + } +} + +/* +** RB_CalcTurbulentTexCoords +*/ +void RB_CalcTurbulentTexCoords( const waveForm_t *wf, float *st ) +{ + int i; + float now; + + now = ( wf->phase + tess.shaderTime * wf->frequency ); + + for ( i = 0; i < tess.numVertexes; i++, st += 2 ) + { + float s = st[0]; + float t = st[1]; + + st[0] = s + tr.sinTable[ ( ( int ) ( ( ( tess.xyz[i][0] + tess.xyz[i][2] )* 1.0/128 * 0.125 + now ) * FUNCTABLE_SIZE ) ) & ( FUNCTABLE_MASK ) ] * wf->amplitude; + st[1] = t + tr.sinTable[ ( ( int ) ( ( tess.xyz[i][1] * 1.0/128 * 0.125 + now ) * FUNCTABLE_SIZE ) ) & ( FUNCTABLE_MASK ) ] * wf->amplitude; + } +} + +/* +** RB_CalcScaleTexCoords +*/ +void RB_CalcScaleTexCoords( const float scale[2], float *st ) +{ + int i; + + for ( i = 0; i < tess.numVertexes; i++, st += 2 ) + { + st[0] *= scale[0]; + st[1] *= scale[1]; + } +} + +/* +** RB_CalcScrollTexCoords +*/ +void RB_CalcScrollTexCoords( const float scrollSpeed[2], float *st ) +{ + int i; + float timeScale = tess.shaderTime; + float adjustedScrollS, adjustedScrollT; + + adjustedScrollS = scrollSpeed[0] * timeScale; + adjustedScrollT = scrollSpeed[1] * timeScale; + + // clamp so coordinates don't continuously get larger, causing problems + // with hardware limits + adjustedScrollS = adjustedScrollS - floor( adjustedScrollS ); + adjustedScrollT = adjustedScrollT - floor( adjustedScrollT ); + + for ( i = 0; i < tess.numVertexes; i++, st += 2 ) + { + st[0] += adjustedScrollS; + st[1] += adjustedScrollT; + } +} + +/* +** RB_CalcTransformTexCoords +*/ +void RB_CalcTransformTexCoords( const texModInfo_t *tmi, float *st ) +{ + int i; + + for ( i = 0; i < tess.numVertexes; i++, st += 2 ) + { + float s = st[0]; + float t = st[1]; + + st[0] = s * tmi->matrix[0][0] + t * tmi->matrix[1][0] + tmi->translate[0]; + st[1] = s * tmi->matrix[0][1] + t * tmi->matrix[1][1] + tmi->translate[1]; + } +} + +/* +** RB_CalcRotateTexCoords +*/ +void RB_CalcRotateTexCoords( float degsPerSecond, float *st ) +{ + float timeScale = tess.shaderTime; + float degs; + int index; + float sinValue, cosValue; + texModInfo_t tmi; + + degs = -degsPerSecond * timeScale; + index = degs * ( FUNCTABLE_SIZE / 360.0f ); + + sinValue = tr.sinTable[ index & FUNCTABLE_MASK ]; + cosValue = tr.sinTable[ ( index + FUNCTABLE_SIZE / 4 ) & FUNCTABLE_MASK ]; + + tmi.matrix[0][0] = cosValue; + tmi.matrix[1][0] = -sinValue; + tmi.translate[0] = 0.5 - 0.5 * cosValue + 0.5 * sinValue; + + tmi.matrix[0][1] = sinValue; + tmi.matrix[1][1] = cosValue; + tmi.translate[1] = 0.5 - 0.5 * sinValue - 0.5 * cosValue; + + RB_CalcTransformTexCoords( &tmi, st ); +} + + + +/* +** RB_CalcSpecularAlpha +** +** Calculates specular coefficient and places it in the alpha channel +*/ +vec3_t lightOrigin = { -960, 1980, 96 }; // FIXME: track dynamically + +void RB_CalcSpecularAlpha( unsigned char *alphas ) { + int i; + float *v, *normal; + vec3_t viewer, reflected; + float l, d; + int b; + vec3_t lightDir; + int numVertexes; + + v = tess.xyz[0]; + normal = tess.normal[0]; + + alphas += 3; + + numVertexes = tess.numVertexes; + for (i = 0 ; i < numVertexes ; i++, v += 4, normal += 4, alphas += 4) { + float ilength; + + VectorSubtract( lightOrigin, v, lightDir ); +// ilength = Q_rsqrt( DotProduct( lightDir, lightDir ) ); + VectorNorm( lightDir ); + + // calculate the specular color + d = DotProduct (normal, lightDir); +// d *= ilength; + + // we don't optimize for the d < 0 case since this tends to + // cause visual artifacts such as faceted "snapping" + reflected[0] = normal[0]*2*d - lightDir[0]; + reflected[1] = normal[1]*2*d - lightDir[1]; + reflected[2] = normal[2]*2*d - lightDir[2]; + + VectorSubtract (backEnd.or.viewOrigin, v, viewer); + ilength = sqrtf( DotProduct( viewer, viewer ) ); + l = DotProduct (reflected, viewer); + l *= ilength; + + if (l < 0) { + b = 0; + } else { + l = l*l; + l = l*l; + b = l * 255; + if (b > 255) { + b = 255; + } + } + + *alphas = b; + } +} + +/* +** The basic vertex lighting calc +*/ +void RB_CalcDiffuseColor( unsigned char (*colors)[4] ) +{ + int i; + float *v, *normal; + float incoming; + trRefEntity_t *ent; +// unsigned char ambientLightRGBA[4]; + vec3_t ambientLight; + vec3_t lightDir; + vec3_t directedLight; + int numVertexes; +#if idppc_altivec + vector unsigned char vSel = (vector unsigned char)(0x00, 0x00, 0x00, 0xff, + 0x00, 0x00, 0x00, 0xff, + 0x00, 0x00, 0x00, 0xff, + 0x00, 0x00, 0x00, 0xff); + vector float ambientLightVec; + vector float directedLightVec; + vector float lightDirVec; + vector float normalVec0, normalVec1; + vector float incomingVec0, incomingVec1, incomingVec2; + vector float zero, jVec; + vector signed int jVecInt; + vector signed short jVecShort; + vector unsigned char jVecChar, normalPerm; +#endif + ent = backEnd.currentEntity; +// ambientLightRGBA[0] = ent->ambientLightRGBA[0]; +// ambientLightRGBA[1] = ent->ambientLightRGBA[1]; +// ambientLightRGBA[2] = ent->ambientLightRGBA[2]; +// ambientLightRGBA[3] = ent->ambientLightRGBA[3]; + +#if idppc_altivec + // A lot of this could be simplified if we made sure + // entities light info was 16-byte aligned. + jVecChar = vec_lvsl(0, ent->ambientLight); + ambientLightVec = vec_ld(0, (vector float *)ent->ambientLight); + jVec = vec_ld(11, (vector float *)ent->ambientLight); + ambientLightVec = vec_perm(ambientLightVec,jVec,jVecChar); + + jVecChar = vec_lvsl(0, ent->directedLight); + directedLightVec = vec_ld(0,(vector float *)ent->directedLight); + jVec = vec_ld(11,(vector float *)ent->directedLight); + directedLightVec = vec_perm(directedLightVec,jVec,jVecChar); + + jVecChar = vec_lvsl(0, ent->lightDir); + lightDirVec = vec_ld(0,(vector float *)ent->lightDir); + jVec = vec_ld(11,(vector float *)ent->lightDir); + lightDirVec = vec_perm(lightDirVec,jVec,jVecChar); + + zero = (vector float)vec_splat_s8(0); + VectorCopy( ent->lightDir, lightDir ); +#else + VectorCopy( ent->ambientLight, ambientLight ); + VectorCopy( ent->directedLight, directedLight ); + VectorCopy( ent->lightDir, lightDir ); +#endif + + v = tess.xyz[0]; + normal = tess.normal[0]; + +#if idppc_altivec + normalPerm = vec_lvsl(0,normal); +#endif + numVertexes = tess.numVertexes; + for (i = 0 ; i < numVertexes ; i++, v += 4, normal += 4) { +#if idppc_altivec + normalVec0 = vec_ld(0,(vector float *)normal); + normalVec1 = vec_ld(11,(vector float *)normal); + normalVec0 = vec_perm(normalVec0,normalVec1,normalPerm); + incomingVec0 = vec_madd(normalVec0, lightDirVec, zero); + incomingVec1 = vec_sld(incomingVec0,incomingVec0,4); + incomingVec2 = vec_add(incomingVec0,incomingVec1); + incomingVec1 = vec_sld(incomingVec1,incomingVec1,4); + incomingVec2 = vec_add(incomingVec2,incomingVec1); + incomingVec0 = vec_splat(incomingVec2,0); + incomingVec0 = vec_max(incomingVec0,zero); + normalPerm = vec_lvsl(12,normal); + jVec = vec_madd(incomingVec0, directedLightVec, ambientLightVec); + jVecInt = vec_cts(jVec,0); // RGBx + jVecShort = vec_pack(jVecInt,jVecInt); // RGBxRGBx + jVecChar = vec_packsu(jVecShort,jVecShort); // RGBxRGBxRGBxRGBx + jVecChar = vec_sel(jVecChar,vSel,vSel); // RGBARGBARGBARGBA replace alpha with 255 + vec_ste((vector unsigned int)jVecChar,0,(unsigned int *)&colors[i*4]); // store color +#else + incoming = DotProduct (normal, lightDir); + if ( incoming <= 0 ) + { + colors[i][0] = ent->ambientLightRGBA[0]; + colors[i][1] = ent->ambientLightRGBA[1]; + colors[i][2] = ent->ambientLightRGBA[2]; + colors[i][3] = ent->ambientLightRGBA[3]; + + continue; + } + + int R = (int)( ambientLight[0] + incoming * directedLight[0] ); + if ( R > 255 ) { + R = 255; + } + colors[i][0] = R; + + int G = (int)( ambientLight[1] + incoming * directedLight[1] ); + if ( G > 255 ) { + G = 255; + } + colors[i][1] = G; + + int B = (int)( ambientLight[2] + incoming * directedLight[2] ); + if ( B > 255 ) { + B = 255; + } + colors[i][2] = B; + + colors[i][3] = 255; +#endif + } +} + diff --git a/code/renderer_vulkan/tr_shader.c b/code/renderer_vulkan/tr_shader.c new file mode 100644 index 0000000000..bbd7e6b055 --- /dev/null +++ b/code/renderer_vulkan/tr_shader.c @@ -0,0 +1,2216 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +#include "tr_globals.h" +#include "vk_image.h" +#include "vk_pipelines.h" +#include "vk_shaders.h" +#include "tr_cvar.h" +#include "tr_shader.h" +#include "R_Parser.h" +#include "ref_import.h" + +// tr_shader.c -- this file deals with the parsing and definition of shaders + + +// the shader is parsed into these global variables, then copied into +// dynamically allocated memory if it is valid. +static shaderStage_t stages[MAX_SHADER_STAGES] = {0}; +static shader_t shader; +static texModInfo_t texMods[MAX_SHADER_STAGES][TR_MAX_TEXMODS]; + + +/* +=============== +ParseVector +=============== +*/ +static qboolean ParseVector( char **text, int count, float *v ) { + char *token; + int i; + + // FIXME: spaces are currently required after parens, should change parseext... + token = R_ParseExt( text, qfalse ); + if ( strcmp( token, "(" ) ) { + ri.Printf( PRINT_WARNING, "WARNING: missing parenthesis in shader '%s'\n", shader.name ); + return qfalse; + } + + for ( i = 0 ; i < count ; i++ ) { + token = R_ParseExt( text, qfalse ); + if ( !token[0] ) { + ri.Printf( PRINT_WARNING, "WARNING: missing vector element in shader '%s'\n", shader.name ); + return qfalse; + } + v[i] = atof( token ); + } + + token = R_ParseExt( text, qfalse ); + if ( strcmp( token, ")" ) ) { + ri.Printf( PRINT_WARNING, "WARNING: missing parenthesis in shader '%s'\n", shader.name ); + return qfalse; + } + + return qtrue; +} + + +/* +=============== +NameToAFunc +=============== +*/ +static unsigned NameToAFunc( const char *funcname ) +{ + if ( !Q_stricmp( funcname, "GT0" ) ) + { + return GLS_ATEST_GT_0; + } + else if ( !Q_stricmp( funcname, "LT128" ) ) + { + return GLS_ATEST_LT_80; + } + else if ( !Q_stricmp( funcname, "GE128" ) ) + { + return GLS_ATEST_GE_80; + } + + ri.Printf( PRINT_WARNING, "WARNING: invalid alphaFunc name '%s' in shader '%s'\n", funcname, shader.name ); + return 0; +} + + +/* +=============== +NameToSrcBlendMode +=============== +*/ +static int NameToSrcBlendMode( const char *name ) +{ + if ( !Q_stricmp( name, "GL_ONE" ) ) + { + return GLS_SRCBLEND_ONE; + } + else if ( !Q_stricmp( name, "GL_ZERO" ) ) + { + return GLS_SRCBLEND_ZERO; + } + else if ( !Q_stricmp( name, "GL_DST_COLOR" ) ) + { + return GLS_SRCBLEND_DST_COLOR; + } + else if ( !Q_stricmp( name, "GL_ONE_MINUS_DST_COLOR" ) ) + { + return GLS_SRCBLEND_ONE_MINUS_DST_COLOR; + } + else if ( !Q_stricmp( name, "GL_SRC_ALPHA" ) ) + { + return GLS_SRCBLEND_SRC_ALPHA; + } + else if ( !Q_stricmp( name, "GL_ONE_MINUS_SRC_ALPHA" ) ) + { + return GLS_SRCBLEND_ONE_MINUS_SRC_ALPHA; + } + else if ( !Q_stricmp( name, "GL_DST_ALPHA" ) ) + { + return GLS_SRCBLEND_DST_ALPHA; + } + else if ( !Q_stricmp( name, "GL_ONE_MINUS_DST_ALPHA" ) ) + { + return GLS_SRCBLEND_ONE_MINUS_DST_ALPHA; + } + else if ( !Q_stricmp( name, "GL_SRC_ALPHA_SATURATE" ) ) + { + return GLS_SRCBLEND_ALPHA_SATURATE; + } + + ri.Printf( PRINT_WARNING, "WARNING: unknown blend mode '%s' in shader '%s', substituting GL_ONE\n", name, shader.name ); + return GLS_SRCBLEND_ONE; +} + +/* +=============== +NameToDstBlendMode +=============== +*/ +static int NameToDstBlendMode( const char *name ) +{ + if ( !Q_stricmp( name, "GL_ONE" ) ) + { + return GLS_DSTBLEND_ONE; + } + else if ( !Q_stricmp( name, "GL_ZERO" ) ) + { + return GLS_DSTBLEND_ZERO; + } + else if ( !Q_stricmp( name, "GL_SRC_ALPHA" ) ) + { + return GLS_DSTBLEND_SRC_ALPHA; + } + else if ( !Q_stricmp( name, "GL_ONE_MINUS_SRC_ALPHA" ) ) + { + return GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA; + } + else if ( !Q_stricmp( name, "GL_DST_ALPHA" ) ) + { + return GLS_DSTBLEND_DST_ALPHA; + } + else if ( !Q_stricmp( name, "GL_ONE_MINUS_DST_ALPHA" ) ) + { + return GLS_DSTBLEND_ONE_MINUS_DST_ALPHA; + } + else if ( !Q_stricmp( name, "GL_SRC_COLOR" ) ) + { + return GLS_DSTBLEND_SRC_COLOR; + } + else if ( !Q_stricmp( name, "GL_ONE_MINUS_SRC_COLOR" ) ) + { + return GLS_DSTBLEND_ONE_MINUS_SRC_COLOR; + } + + ri.Printf( PRINT_WARNING, "WARNING: unknown blend mode '%s' in shader '%s', substituting GL_ONE\n", name, shader.name ); + return GLS_DSTBLEND_ONE; +} + +/* +=============== +NameToGenFunc +=============== +*/ +static genFunc_t NameToGenFunc( const char *funcname ) +{ + if ( !Q_stricmp( funcname, "sin" ) ) + { + return GF_SIN; + } + else if ( !Q_stricmp( funcname, "square" ) ) + { + return GF_SQUARE; + } + else if ( !Q_stricmp( funcname, "triangle" ) ) + { + return GF_TRIANGLE; + } + else if ( !Q_stricmp( funcname, "sawtooth" ) ) + { + return GF_SAWTOOTH; + } + else if ( !Q_stricmp( funcname, "inversesawtooth" ) ) + { + return GF_INVERSE_SAWTOOTH; + } + else if ( !Q_stricmp( funcname, "noise" ) ) + { + return GF_NOISE; + } + + ri.Printf( PRINT_WARNING, "WARNING: invalid genfunc name '%s' in shader '%s'\n", funcname, shader.name ); + return GF_SIN; +} + + +/* +=================== +ParseWaveForm +=================== +*/ +static void ParseWaveForm( char **text, waveForm_t *wave ) +{ + char *token; + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing waveform parm in shader '%s'\n", shader.name ); + return; + } + wave->func = NameToGenFunc( token ); + + // BASE, AMP, PHASE, FREQ + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing waveform parm in shader '%s'\n", shader.name ); + return; + } + wave->base = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing waveform parm in shader '%s'\n", shader.name ); + return; + } + wave->amplitude = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing waveform parm in shader '%s'\n", shader.name ); + return; + } + wave->phase = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing waveform parm in shader '%s'\n", shader.name ); + return; + } + wave->frequency = atof( token ); +} + + +/* +=================== +ParseTexMod +=================== +*/ +static void ParseTexMod( char *_text, shaderStage_t *stage ) +{ + const char *token; + char **text = &_text; + texModInfo_t *tmi; + + if ( stage->bundle[0].numTexMods == TR_MAX_TEXMODS ) { + ri.Error( ERR_DROP, "ERROR: too many tcMod stages in shader '%s'\n", shader.name ); + return; + } + + tmi = &stage->bundle[0].texMods[stage->bundle[0].numTexMods]; + stage->bundle[0].numTexMods++; + + token = R_ParseExt( text, qfalse ); + + // + // turb + // + if ( !Q_stricmp( token, "turb" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing tcMod turb parms in shader '%s'\n", shader.name ); + return; + } + tmi->wave.base = atof( token ); + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing tcMod turb in shader '%s'\n", shader.name ); + return; + } + tmi->wave.amplitude = atof( token ); + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing tcMod turb in shader '%s'\n", shader.name ); + return; + } + tmi->wave.phase = atof( token ); + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing tcMod turb in shader '%s'\n", shader.name ); + return; + } + tmi->wave.frequency = atof( token ); + + tmi->type = TMOD_TURBULENT; + } + // + // scale + // + else if ( !Q_stricmp( token, "scale" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing scale parms in shader '%s'\n", shader.name ); + return; + } + tmi->scale[0] = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing scale parms in shader '%s'\n", shader.name ); + return; + } + tmi->scale[1] = atof( token ); + tmi->type = TMOD_SCALE; + } + // + // scroll + // + else if ( !Q_stricmp( token, "scroll" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing scale scroll parms in shader '%s'\n", shader.name ); + return; + } + tmi->scroll[0] = atof( token ); + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing scale scroll parms in shader '%s'\n", shader.name ); + return; + } + tmi->scroll[1] = atof( token ); + tmi->type = TMOD_SCROLL; + } + // + // stretch + // + else if ( !Q_stricmp( token, "stretch" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing stretch parms in shader '%s'\n", shader.name ); + return; + } + tmi->wave.func = NameToGenFunc( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing stretch parms in shader '%s'\n", shader.name ); + return; + } + tmi->wave.base = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing stretch parms in shader '%s'\n", shader.name ); + return; + } + tmi->wave.amplitude = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing stretch parms in shader '%s'\n", shader.name ); + return; + } + tmi->wave.phase = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing stretch parms in shader '%s'\n", shader.name ); + return; + } + tmi->wave.frequency = atof( token ); + + tmi->type = TMOD_STRETCH; + } + // + // transform + // + else if ( !Q_stricmp( token, "transform" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing transform parms in shader '%s'\n", shader.name ); + return; + } + tmi->matrix[0][0] = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing transform parms in shader '%s'\n", shader.name ); + return; + } + tmi->matrix[0][1] = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing transform parms in shader '%s'\n", shader.name ); + return; + } + tmi->matrix[1][0] = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing transform parms in shader '%s'\n", shader.name ); + return; + } + tmi->matrix[1][1] = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing transform parms in shader '%s'\n", shader.name ); + return; + } + tmi->translate[0] = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing transform parms in shader '%s'\n", shader.name ); + return; + } + tmi->translate[1] = atof( token ); + + tmi->type = TMOD_TRANSFORM; + } + // + // rotate + // + else if ( !Q_stricmp( token, "rotate" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing tcMod rotate parms in shader '%s'\n", shader.name ); + return; + } + tmi->rotateSpeed = atof( token ); + tmi->type = TMOD_ROTATE; + } + // + // entityTranslate + // + else if ( !Q_stricmp( token, "entityTranslate" ) ) + { + tmi->type = TMOD_ENTITY_TRANSLATE; + } + else + { + ri.Printf( PRINT_WARNING, "WARNING: unknown tcMod '%s' in shader '%s'\n", token, shader.name ); + } +} + + + +static qboolean ParseStage( shaderStage_t *stage, char **text ) +{ + int depthMaskBits = GLS_DEPTHMASK_TRUE, blendSrcBits = 0, blendDstBits = 0, atestBits = 0, depthFuncBits = 0; + qboolean depthMaskExplicit = qfalse; + + stage->active = qtrue; + + while ( 1 ) + { + char* token = R_ParseExt( text, qtrue ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: no matching '}' found\n" ); + return qfalse; + } + + if ( token[0] == '}' ) + { + break; + } + else if ( !Q_stricmp( token, "map" ) ) + { + // + // map + // + token = R_ParseExt( text, qfalse ); + if ( !token[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parameter for 'map' keyword in shader '%s'\n", shader.name ); + return qfalse; + } + + if ( !Q_stricmp( token, "$whiteimage" ) ) + { + stage->bundle[0].image[0] = tr.whiteImage; + continue; + } + else if ( !Q_stricmp( token, "$lightmap" ) ) + { + stage->bundle[0].isLightmap = qtrue; + if ( shader.lightmapIndex < 0 ) { + stage->bundle[0].image[0] = tr.whiteImage; + } else { + stage->bundle[0].image[0] = tr.lightmaps[shader.lightmapIndex]; + } + continue; + } + else + { + stage->bundle[0].image[0] = R_FindImageFile(token, !shader.noMipMaps, !shader.noPicMip, GL_REPEAT); + if ( !stage->bundle[0].image[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: R_FindImageFile could not find '%s' in shader '%s'\n", token, shader.name ); + return qfalse; + } + } + } + else if ( !Q_stricmp( token, "clampmap" ) ) + { + // + // clampmap + // + token = R_ParseExt( text, qfalse ); + if ( 0 == token[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parameter for 'clampmap' keyword in shader '%s'\n", shader.name ); + return qfalse; + } + + //ri.Printf( PRINT_ALL, "CLAMPMAP: \n"); + + stage->bundle[0].image[0] = R_FindImageFile(token, !shader.noMipMaps, !shader.noPicMip, GL_CLAMP); + if ( !stage->bundle[0].image[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: R_FindImageFile could not find '%s' in shader '%s'\n", token, shader.name ); + return qfalse; + } + } + else if ( !Q_stricmp( token, "animMap" ) ) + { + // + // animMap .... + // + + token = R_ParseExt( text, qfalse ); + if ( !token[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parameter for 'animMmap' keyword in shader '%s'\n", shader.name ); + return qfalse; + } + stage->bundle[0].imageAnimationSpeed = atof( token ); + + // parse up to MAX_IMAGE_ANIMATIONS animations + while ( 1 ) { + + token = R_ParseExt( text, qfalse ); + if ( !token[0] ) { + break; + } + int num = stage->bundle[0].numImageAnimations; + if ( num < MAX_IMAGE_ANIMATIONS ) { + stage->bundle[0].image[num] = R_FindImageFile(token, !shader.noMipMaps, !shader.noPicMip, GL_REPEAT); + if ( !stage->bundle[0].image[num] ) + { + ri.Printf( PRINT_WARNING, "WARNING: R_FindImageFile could not find '%s' in shader '%s'\n", token, shader.name ); + return qfalse; + } + stage->bundle[0].numImageAnimations++; + } + } + } + else if ( !Q_stricmp( token, "videoMap" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( !token[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parameter for 'videoMmap' keyword in shader '%s'\n", shader.name ); + return qfalse; + } + stage->bundle[0].videoMapHandle = ri.CIN_PlayCinematic( token, 0, 0, 256, 256, (CIN_loop | CIN_silent | CIN_shader)); + if (stage->bundle[0].videoMapHandle != -1) { + stage->bundle[0].isVideoMap = qtrue; + stage->bundle[0].image[0] = tr.scratchImage[stage->bundle[0].videoMapHandle]; + } + } + // + // alphafunc + // + else if ( !Q_stricmp( token, "alphaFunc" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( !token[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parameter for 'alphaFunc' keyword in shader '%s'\n", shader.name ); + return qfalse; + } + + atestBits = NameToAFunc( token ); + } + // + // depthFunc + // + else if ( !Q_stricmp( token, "depthfunc" ) ) + { + token = R_ParseExt( text, qfalse ); + + if ( !token[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parameter for 'depthfunc' keyword in shader '%s'\n", shader.name ); + return qfalse; + } + + if ( !Q_stricmp( token, "lequal" ) ) + { + depthFuncBits = 0; + } + else if ( !Q_stricmp( token, "equal" ) ) + { + depthFuncBits = GLS_DEPTHFUNC_EQUAL; + } + else + { + ri.Printf( PRINT_WARNING, "WARNING: unknown depthfunc '%s' in shader '%s'\n", token, shader.name ); + continue; + } + } + else if ( !Q_stricmp( token, "detail" ) ) + { + // + // detail + // + stage->isDetail = qtrue; + } + else if ( !Q_stricmp( token, "blendfunc" ) ) + { + // + // blendfunc + // or blendfunc + // + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parm for blendFunc in shader '%s'\n", shader.name ); + continue; + } + // check for "simple" blends first + if ( !Q_stricmp( token, "add" ) ) { + blendSrcBits = GLS_SRCBLEND_ONE; + blendDstBits = GLS_DSTBLEND_ONE; + } else if ( !Q_stricmp( token, "filter" ) ) { + blendSrcBits = GLS_SRCBLEND_DST_COLOR; + blendDstBits = GLS_DSTBLEND_ZERO; + } else if ( !Q_stricmp( token, "blend" ) ) { + blendSrcBits = GLS_SRCBLEND_SRC_ALPHA; + blendDstBits = GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA; + } else { + // complex double blends + blendSrcBits = NameToSrcBlendMode( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parm for blendFunc in shader '%s'\n", shader.name ); + continue; + } + blendDstBits = NameToDstBlendMode( token ); + } + + // clear depth mask for blended surfaces + if ( !depthMaskExplicit ) + { + depthMaskBits = 0; + } + } + // + // rgbGen + // + else if ( !Q_stricmp( token, "rgbGen" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parameters for rgbGen in shader '%s'\n", shader.name ); + continue; + } + + if ( !Q_stricmp( token, "wave" ) ) + { + ParseWaveForm( text, &stage->rgbWave ); + stage->rgbGen = CGEN_WAVEFORM; + } + else if ( !Q_stricmp( token, "const" ) ) + { + vec3_t color; + + ParseVector( text, 3, color ); + stage->constantColor[0] = 255 * color[0]; + stage->constantColor[1] = 255 * color[1]; + stage->constantColor[2] = 255 * color[2]; + + stage->rgbGen = CGEN_CONST; + } + else if ( !Q_stricmp( token, "identity" ) ) + { + stage->rgbGen = CGEN_IDENTITY; + } + else if ( !Q_stricmp( token, "identityLighting" ) ) + { + stage->rgbGen = CGEN_IDENTITY_LIGHTING; + } + else if ( !Q_stricmp( token, "entity" ) ) + { + stage->rgbGen = CGEN_ENTITY; + } + else if ( !Q_stricmp( token, "oneMinusEntity" ) ) + { + stage->rgbGen = CGEN_ONE_MINUS_ENTITY; + } + else if ( !Q_stricmp( token, "vertex" ) ) + { + stage->rgbGen = CGEN_VERTEX; + if ( stage->alphaGen == 0 ) { + stage->alphaGen = AGEN_VERTEX; + } + } + else if ( !Q_stricmp( token, "exactVertex" ) ) + { + stage->rgbGen = CGEN_EXACT_VERTEX; + } + else if ( !Q_stricmp( token, "lightingDiffuse" ) ) + { + stage->rgbGen = CGEN_LIGHTING_DIFFUSE; + } + else if ( !Q_stricmp( token, "oneMinusVertex" ) ) + { + stage->rgbGen = CGEN_ONE_MINUS_VERTEX; + } + else + { + ri.Printf( PRINT_WARNING, "WARNING: unknown rgbGen parameter '%s' in shader '%s'\n", token, shader.name ); + continue; + } + } + // + // alphaGen + // + else if ( !Q_stricmp( token, "alphaGen" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parameters for alphaGen in shader '%s'\n", shader.name ); + continue; + } + + if ( !Q_stricmp( token, "wave" ) ) + { + ParseWaveForm( text, &stage->alphaWave ); + stage->alphaGen = AGEN_WAVEFORM; + } + else if ( !Q_stricmp( token, "const" ) ) + { + token = R_ParseExt( text, qfalse ); + stage->constantColor[3] = 255 * atof( token ); + stage->alphaGen = AGEN_CONST; + } + else if ( !Q_stricmp( token, "identity" ) ) + { + stage->alphaGen = AGEN_IDENTITY; + } + else if ( !Q_stricmp( token, "entity" ) ) + { + stage->alphaGen = AGEN_ENTITY; + } + else if ( !Q_stricmp( token, "oneMinusEntity" ) ) + { + stage->alphaGen = AGEN_ONE_MINUS_ENTITY; + } + else if ( !Q_stricmp( token, "vertex" ) ) + { + stage->alphaGen = AGEN_VERTEX; + } + else if ( !Q_stricmp( token, "lightingSpecular" ) ) + { + stage->alphaGen = AGEN_LIGHTING_SPECULAR; + } + else if ( !Q_stricmp( token, "oneMinusVertex" ) ) + { + stage->alphaGen = AGEN_ONE_MINUS_VERTEX; + } + else if ( !Q_stricmp( token, "portal" ) ) + { + stage->alphaGen = AGEN_PORTAL; + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + shader.portalRange = 256; + ri.Printf( PRINT_WARNING, "WARNING: missing range parameter for alphaGen portal in shader '%s', defaulting to 256\n", shader.name ); + } + else + { + shader.portalRange = atof( token ); + } + } + else + { + ri.Printf( PRINT_WARNING, "WARNING: unknown alphaGen parameter '%s' in shader '%s'\n", token, shader.name ); + continue; + } + } + // + // tcGen + // + else if ( !Q_stricmp(token, "texgen") || !Q_stricmp( token, "tcGen" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing texgen parm in shader '%s'\n", shader.name ); + continue; + } + + if ( !Q_stricmp( token, "environment" ) ) + { + stage->bundle[0].tcGen = TCGEN_ENVIRONMENT_MAPPED; + } + else if ( !Q_stricmp( token, "lightmap" ) ) + { + stage->bundle[0].tcGen = TCGEN_LIGHTMAP; + } + else if ( !Q_stricmp( token, "texture" ) || !Q_stricmp( token, "base" ) ) + { + stage->bundle[0].tcGen = TCGEN_TEXTURE; + } + else if ( !Q_stricmp( token, "vector" ) ) + { + ParseVector( text, 3, stage->bundle[0].tcGenVectors[0] ); + ParseVector( text, 3, stage->bundle[0].tcGenVectors[1] ); + + stage->bundle[0].tcGen = TCGEN_VECTOR; + } + else + { + ri.Printf( PRINT_WARNING, "WARNING: unknown texgen parm in shader '%s'\n", shader.name ); + } + } + // + // tcMod <...> + // + else if ( !Q_stricmp( token, "tcMod" ) ) + { + char buffer[1024] = ""; + + while ( 1 ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + break; + strcat( buffer, token ); + strcat( buffer, " " ); + } + + ParseTexMod( buffer, stage ); + + continue; + } + // + // depthmask + // + else if ( !Q_stricmp( token, "depthwrite" ) ) + { + depthMaskBits = GLS_DEPTHMASK_TRUE; + depthMaskExplicit = qtrue; + + continue; + } + else + { + ri.Printf( PRINT_WARNING, "WARNING: unknown parameter '%s' in shader '%s'\n", token, shader.name ); + return qfalse; + } + } + + // + // if cgen isn't explicitly specified, use either identity or identitylighting + // + if ( stage->rgbGen == CGEN_BAD ) { + if ( blendSrcBits == 0 || + blendSrcBits == GLS_SRCBLEND_ONE || + blendSrcBits == GLS_SRCBLEND_SRC_ALPHA ) { + stage->rgbGen = CGEN_IDENTITY_LIGHTING; + } else { + stage->rgbGen = CGEN_IDENTITY; + } + } + + + // + // implicitly assume that a GL_ONE GL_ZERO blend mask disables blending + // + if ( ( blendSrcBits == GLS_SRCBLEND_ONE ) && + ( blendDstBits == GLS_DSTBLEND_ZERO ) ) + { + blendDstBits = blendSrcBits = 0; + depthMaskBits = GLS_DEPTHMASK_TRUE; + } + + // decide which agens we can skip + if ( stage->alphaGen == AGEN_IDENTITY ){ + if ( stage->rgbGen == CGEN_IDENTITY || stage->rgbGen == CGEN_LIGHTING_DIFFUSE ) + { + stage->alphaGen = AGEN_SKIP; + } + } + + // + // compute state bits + // + stage->stateBits = depthMaskBits | + blendSrcBits | blendDstBits | + atestBits | + depthFuncBits; + + return qtrue; +} + +/* +=============== +ParseDeform + +deformVertexes wave +deformVertexes normal +deformVertexes move +deformVertexes bulge +deformVertexes projectionShadow +deformVertexes autoSprite +deformVertexes autoSprite2 +deformVertexes text[0-7] +=============== +*/ +static void ParseDeform( char **text ) { + char *token; + deformStage_t *ds; + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing deform parm in shader '%s'\n", shader.name ); + return; + } + + if ( shader.numDeforms == MAX_SHADER_DEFORMS ) { + ri.Printf( PRINT_WARNING, "WARNING: MAX_SHADER_DEFORMS in '%s'\n", shader.name ); + return; + } + + ds = &shader.deforms[ shader.numDeforms ]; + shader.numDeforms++; + + if ( !Q_stricmp( token, "projectionShadow" ) ) { + ds->deformation = DEFORM_PROJECTION_SHADOW; + return; + } + + if ( !Q_stricmp( token, "autosprite" ) ) { + ds->deformation = DEFORM_AUTOSPRITE; + return; + } + + if ( !Q_stricmp( token, "autosprite2" ) ) { + ds->deformation = DEFORM_AUTOSPRITE2; + return; + } + + if ( !Q_stricmpn( token, "text", 4 ) ) { + int n; + + n = token[4] - '0'; + if ( n < 0 || n > 7 ) { + n = 0; + } + ds->deformation = DEFORM_TEXT0 + n; + return; + } + + if ( !Q_stricmp( token, "bulge" ) ) { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing deformVertexes bulge parm in shader '%s'\n", shader.name ); + return; + } + ds->bulgeWidth = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing deformVertexes bulge parm in shader '%s'\n", shader.name ); + return; + } + ds->bulgeHeight = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing deformVertexes bulge parm in shader '%s'\n", shader.name ); + return; + } + ds->bulgeSpeed = atof( token ); + + ds->deformation = DEFORM_BULGE; + return; + } + + if ( !Q_stricmp( token, "wave" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing deformVertexes parm in shader '%s'\n", shader.name ); + return; + } + + if ( atof( token ) != 0 ) + { + ds->deformationSpread = 1.0f / atof( token ); + } + else + { + ds->deformationSpread = 100.0f; + ri.Printf( PRINT_WARNING, "WARNING: illegal div value of 0 in deformVertexes command for shader '%s'\n", shader.name ); + } + + ParseWaveForm( text, &ds->deformationWave ); + ds->deformation = DEFORM_WAVE; + return; + } + + if ( !Q_stricmp( token, "normal" ) ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing deformVertexes parm in shader '%s'\n", shader.name ); + return; + } + ds->deformationWave.amplitude = atof( token ); + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing deformVertexes parm in shader '%s'\n", shader.name ); + return; + } + ds->deformationWave.frequency = atof( token ); + + ds->deformation = DEFORM_NORMALS; + return; + } + + if ( !Q_stricmp( token, "move" ) ) { + int i; + + for ( i = 0 ; i < 3 ; i++ ) { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) { + ri.Printf( PRINT_WARNING, "WARNING: missing deformVertexes parm in shader '%s'\n", shader.name ); + return; + } + ds->moveVector[i] = atof( token ); + } + + ParseWaveForm( text, &ds->deformationWave ); + ds->deformation = DEFORM_MOVE; + return; + } + + ri.Printf( PRINT_WARNING, "WARNING: unknown deformVertexes subtype '%s' found in shader '%s'\n", token, shader.name ); +} + + +/* +=============== +ParseSkyParms + +skyParms +=============== +*/ +static void ParseSkyParms( char **text ) { + char *token; + static char *suf[6] = {"rt", "bk", "lf", "ft", "up", "dn"}; + char pathname[MAX_QPATH]; + int i; + + // outerbox + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) { + ri.Printf( PRINT_WARNING, "WARNING: 'skyParms' missing parameter in shader '%s'\n", shader.name ); + return; + } + if ( strcmp( token, "-" ) ) { + for (i=0 ; i<6 ; i++) { + snprintf( pathname, sizeof(pathname), "%s_%s.tga", token, suf[i] ); + shader.sky.outerbox[i] = R_FindImageFile( pathname, qtrue, qtrue, GL_CLAMP ); + if ( !shader.sky.outerbox[i] ) { + shader.sky.outerbox[i] = tr.defaultImage; + } + } + } + + // cloudheight + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) { + ri.Printf( PRINT_WARNING, "WARNING: 'skyParms' missing parameter in shader '%s'\n", shader.name ); + return; + } + shader.sky.cloudHeight = atof( token ); + if ( !shader.sky.cloudHeight ) { + shader.sky.cloudHeight = 512; + } + R_InitSkyTexCoords( shader.sky.cloudHeight ); + + + // innerbox + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) { + ri.Printf( PRINT_WARNING, "WARNING: 'skyParms' missing parameter in shader '%s'\n", shader.name ); + return; + } + if ( strcmp( token, "-" ) ) { + for (i=0 ; i<6 ; i++) { + snprintf( pathname, sizeof(pathname), "%s_%s.tga", token, suf[i] ); + shader.sky.innerbox[i] = R_FindImageFile( ( char * ) pathname, qtrue, qtrue, GL_CLAMP ); + if ( !shader.sky.innerbox[i] ) { + shader.sky.innerbox[i] = tr.defaultImage; + } + } + } + + shader.isSky = qtrue; +} + + +/* +================= +ParseSort +================= +*/ +void ParseSort( char **text ) { + char *token; + + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) { + ri.Printf( PRINT_WARNING, "WARNING: missing sort parameter in shader '%s'\n", shader.name ); + return; + } + + if ( !Q_stricmp( token, "portal" ) ) { + shader.sort = SS_PORTAL; + } else if ( !Q_stricmp( token, "sky" ) ) { + shader.sort = SS_ENVIRONMENT; + } else if ( !Q_stricmp( token, "opaque" ) ) { + shader.sort = SS_OPAQUE; + }else if ( !Q_stricmp( token, "decal" ) ) { + shader.sort = SS_DECAL; + } else if ( !Q_stricmp( token, "seeThrough" ) ) { + shader.sort = SS_SEE_THROUGH; + } else if ( !Q_stricmp( token, "banner" ) ) { + shader.sort = SS_BANNER; + } else if ( !Q_stricmp( token, "additive" ) ) { + shader.sort = SS_BLEND1; + } else if ( !Q_stricmp( token, "nearest" ) ) { + shader.sort = SS_NEAREST; + } else if ( !Q_stricmp( token, "underwater" ) ) { + shader.sort = SS_UNDERWATER; + } else { + shader.sort = atof( token ); + } +} + + + +// this table is also present in q3map + +typedef struct { + char *name; + int clearSolid; + unsigned int surfaceFlags, contents; +} infoParm_t; + +infoParm_t infoParms[] = { + // server relevant contents + {"water", 1, 0, CONTENTS_WATER }, + {"slime", 1, 0, CONTENTS_SLIME }, // mildly damaging + {"lava", 1, 0, CONTENTS_LAVA }, // very damaging + {"playerclip", 1, 0, CONTENTS_PLAYERCLIP }, + {"monsterclip", 1, 0, CONTENTS_MONSTERCLIP }, + {"nodrop", 1, 0, CONTENTS_NODROP }, // don't drop items or leave bodies (death fog, lava, etc) + {"nonsolid", 1, SURF_NONSOLID, 0}, // clears the solid flag + + // utility relevant attributes + {"origin", 1, 0, CONTENTS_ORIGIN }, // center of rotating brushes + {"trans", 0, 0, CONTENTS_TRANSLUCENT }, // don't eat contained surfaces + {"detail", 0, 0, CONTENTS_DETAIL }, // don't include in structural bsp + {"structural", 0, 0, CONTENTS_STRUCTURAL }, // force into structural bsp even if trnas + {"areaportal", 1, 0, CONTENTS_AREAPORTAL }, // divides areas + {"clusterportal", 1,0, CONTENTS_CLUSTERPORTAL }, // for bots + {"donotenter", 1, 0, CONTENTS_DONOTENTER }, // for bots + + {"fog", 1, 0, CONTENTS_FOG}, // carves surfaces entering + {"sky", 0, SURF_SKY, 0 }, // emit light from an environment map + {"lightfilter", 0, SURF_LIGHTFILTER, 0 }, // filter light going through it + {"alphashadow", 0, SURF_ALPHASHADOW, 0 }, // test light on a per-pixel basis + {"hint", 0, SURF_HINT, 0 }, // use as a primary splitter + + // server attributes + {"slick", 0, SURF_SLICK, 0 }, + {"noimpact", 0, SURF_NOIMPACT, 0 }, // don't make impact explosions or marks + {"nomarks", 0, SURF_NOMARKS, 0 }, // don't make impact marks, but still explode + {"ladder", 0, SURF_LADDER, 0 }, + {"nodamage", 0, SURF_NODAMAGE, 0 }, + {"metalsteps", 0, SURF_METALSTEPS,0 }, + {"flesh", 0, SURF_FLESH, 0 }, + {"nosteps", 0, SURF_NOSTEPS, 0 }, + + // drawsurf attributes + {"nodraw", 0, SURF_NODRAW, 0 }, // don't generate a drawsurface (or a lightmap) + {"pointlight", 0, SURF_POINTLIGHT, 0 }, // sample lighting at vertexes + {"nolightmap", 0, SURF_NOLIGHTMAP,0 }, // don't generate a lightmap + {"nodlight", 0, SURF_NODLIGHT, 0 }, // don't ever add dynamic lights + {"dust", 0, SURF_DUST, 0} // leave a dust trail when walking on this surface +}; + + +/* +=============== +ParseSurfaceParm + +surfaceparm +=============== +*/ +static void ParseSurfaceParm( char **text ) +{ + int numInfoParms = sizeof(infoParms) / sizeof(infoParms[0]); + int i; + + char* token = R_ParseExt( text, qfalse ); + for ( i = 0 ; i < numInfoParms ; i++ ) + { + if ( !Q_stricmp( token, infoParms[i].name ) ) + { + shader.surfaceFlags |= (int)infoParms[i].surfaceFlags; + shader.contentFlags |= (int)infoParms[i].contents; +#if 0 + if ( infoParms[i].clearSolid ) { + si->contents &= ~CONTENTS_SOLID; + } +#endif + break; + } + } +} + +/* +================= +ParseShader + +The current text pointer is at the explicit text definition of the shader. +Parse it into the global shader variable. Later functions will optimize it. +================= +*/ +qboolean ParseShader( char **text ) +{ + + int s = 0; + + char *token = R_ParseExt( text, qtrue ); + if ( token[0] != '{' ) + { + ri.Printf( PRINT_WARNING, "WARNING: expecting '{', found '%s' instead in shader '%s'\n", token, shader.name ); + return qfalse; + } + + while ( 1 ) + { + token = R_ParseExt( text, qtrue ); + if ( !token[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: no concluding '}' in shader %s\n", shader.name ); + return qfalse; + } + + // end of shader definition + if ( token[0] == '}' ) + { + break; + } + // stage definition + else if ( token[0] == '{' ) + { + if ( !ParseStage( &stages[s], text ) ) + { + return qfalse; + } + stages[s].active = qtrue; + s++; + continue; + } + // skip stuff that only the QuakeEdRadient needs + else if ( !Q_stricmpn( token, "qer", 3 ) ) { + SkipRestOfLine( text ); + continue; + } + // sun parms + else if ( !Q_stricmp( token, "q3map_sun" ) ) { + float a, b; + + token = R_ParseExt( text, qfalse ); + tr.sunLight[0] = atof( token ); + token = R_ParseExt( text, qfalse ); + tr.sunLight[1] = atof( token ); + token = R_ParseExt( text, qfalse ); + tr.sunLight[2] = atof( token ); + + VectorNormalize( tr.sunLight ); + + token = R_ParseExt( text, qfalse ); + a = atof( token ); + VectorScale( tr.sunLight, a, tr.sunLight); + + token = R_ParseExt( text, qfalse ); + a = atof( token ); + a *= (M_PI/180.0f); + + token = R_ParseExt( text, qfalse ); + b = atof( token ); + b *= (M_PI/180.0f); + + tr.sunDirection[0] = cos( a ) * cos( b ); + tr.sunDirection[1] = sin( a ) * cos( b ); + tr.sunDirection[2] = sin( b ); + } + else if ( !Q_stricmp( token, "deformVertexes" ) ) { + ParseDeform( text ); + continue; + } + else if ( !Q_stricmp( token, "tesssize" ) ) { + SkipRestOfLine( text ); + continue; + } + else if ( !Q_stricmp( token, "clampTime" ) ) + { + token = R_ParseExt( text, qfalse ); + if (token[0]) { + shader.clampTime = atof(token); + } + } + // skip stuff that only the q3map needs + else if ( !Q_stricmpn( token, "q3map", 5 ) ) { + SkipRestOfLine( text ); + continue; + } + // skip stuff that only q3map or the server needs + else if ( !Q_stricmp( token, "surfaceParm" ) ) { + ParseSurfaceParm( text ); + continue; + } + // no mip maps + else if ( !Q_stricmp( token, "nomipmaps" ) ) + { + shader.noMipMaps = qtrue; + shader.noPicMip = qtrue; + continue; + } + // no picmip adjustment + else if ( !Q_stricmp( token, "nopicmip" ) ) + { + shader.noPicMip = qtrue; + continue; + } + // polygonOffset + else if ( !Q_stricmp( token, "polygonOffset" ) ) + { + shader.polygonOffset = qtrue; + continue; + } + // entityMergable, allowing sprite surfaces from multiple entities + // to be merged into one batch. This is a savings for smoke + // puffs and blood, but can't be used for anything where the + // shader calcs (not the surface function) reference the entity color or scroll + else if ( !Q_stricmp( token, "entityMergable" ) ) + { + shader.entityMergable = qtrue; + continue; + } + // fogParms + else if ( !Q_stricmp( token, "fogParms" ) ) + { + if ( !ParseVector( text, 3, shader.fogParms.color ) ) { + return qfalse; + } + + token = R_ParseExt( text, qfalse ); + if ( !token[0] ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing parm for 'fogParms' keyword in shader '%s'\n", shader.name ); + continue; + } + shader.fogParms.depthForOpaque = atof( token ); + + // skip any old gradient directions + SkipRestOfLine( text ); + continue; + } + // portal + else if ( !Q_stricmp(token, "portal") ) + { + shader.sort = SS_PORTAL; + continue; + } + // skyparms + else if ( !Q_stricmp( token, "skyparms" ) ) + { + ParseSkyParms( text ); + continue; + } + // light determines flaring in q3map, not needed here + else if ( !Q_stricmp(token, "light") ) + { + token = R_ParseExt( text, qfalse ); + continue; + } + // cull + else if ( !Q_stricmp( token, "cull") ) + { + token = R_ParseExt( text, qfalse ); + if ( token[0] == 0 ) + { + ri.Printf( PRINT_WARNING, "WARNING: missing cull parms in shader '%s'\n", shader.name ); + continue; + } + + if ( !Q_stricmp( token, "none" ) || !Q_stricmp( token, "twosided" ) || !Q_stricmp( token, "disable" ) ) + { + shader.cullType = CT_TWO_SIDED; + } + else if ( !Q_stricmp( token, "back" ) || !Q_stricmp( token, "backside" ) || !Q_stricmp( token, "backsided" ) ) + { + shader.cullType = CT_BACK_SIDED; + } + else + { + ri.Printf( PRINT_WARNING, "WARNING: invalid cull parm '%s' in shader '%s'\n", token, shader.name ); + } + continue; + } + // sort + else if ( !Q_stricmp( token, "sort" ) ) + { + ParseSort( text ); + continue; + } + else + { + ri.Printf( PRINT_WARNING, "WARNING: unknown general shader parameter '%s' in '%s'\n", token, shader.name ); + return qfalse; + } + } + + // + // ignore shaders that don't have any stages, unless it is a sky or fog + // + if ( s == 0 && !shader.isSky && !(shader.contentFlags & CONTENTS_FOG ) ) { + return qfalse; + } + + shader.explicitlyDefined = qtrue; + + return qtrue; +} + +/* +======================================================================================== + +SHADER OPTIMIZATION AND FOGGING + +======================================================================================== +*/ + +typedef struct { + int blendA; + int blendB; + + int multitextureEnv; + int multitextureBlend; +} collapse_t; + +static collapse_t collapse[] = { + { 0, GLS_DSTBLEND_SRC_COLOR | GLS_SRCBLEND_ZERO, + GL_MODULATE, 0 }, + + { 0, GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR, + GL_MODULATE, 0 }, + + { GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR, GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR, + GL_MODULATE, GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR }, + + { GLS_DSTBLEND_SRC_COLOR | GLS_SRCBLEND_ZERO, GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR, + GL_MODULATE, GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR }, + + { GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR, GLS_DSTBLEND_SRC_COLOR | GLS_SRCBLEND_ZERO, + GL_MODULATE, GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR }, + + { GLS_DSTBLEND_SRC_COLOR | GLS_SRCBLEND_ZERO, GLS_DSTBLEND_SRC_COLOR | GLS_SRCBLEND_ZERO, + GL_MODULATE, GLS_DSTBLEND_ZERO | GLS_SRCBLEND_DST_COLOR }, + + { 0, GLS_DSTBLEND_ONE | GLS_SRCBLEND_ONE, + GL_ADD, 0 }, + + { GLS_DSTBLEND_ONE | GLS_SRCBLEND_ONE, GLS_DSTBLEND_ONE | GLS_SRCBLEND_ONE, + GL_ADD, GLS_DSTBLEND_ONE | GLS_SRCBLEND_ONE }, + + { -1 } +}; + +/* +================ +CollapseMultitexture + +Attempt to combine two stages into a single multitexture stage +FIXME: I think modulated add + modulated add collapses incorrectly +================= +*/ +static qboolean CollapseMultitexture( void ) { + int abits, bbits; + int i; + textureBundle_t tmpBundle; + + // make sure both stages are active + if ( !stages[0].active || !stages[1].active ) { + return qfalse; + } + + abits = stages[0].stateBits; + bbits = stages[1].stateBits; + + // make sure that both stages have identical state other than blend modes + if ( ( abits & ~( GLS_DSTBLEND_BITS | GLS_SRCBLEND_BITS | GLS_DEPTHMASK_TRUE ) ) != + ( bbits & ~( GLS_DSTBLEND_BITS | GLS_SRCBLEND_BITS | GLS_DEPTHMASK_TRUE ) ) ) { + return qfalse; + } + + abits &= ( GLS_DSTBLEND_BITS | GLS_SRCBLEND_BITS ); + bbits &= ( GLS_DSTBLEND_BITS | GLS_SRCBLEND_BITS ); + + // search for a valid multitexture blend function + for ( i = 0; collapse[i].blendA != -1 ; i++ ) { + if ( abits == collapse[i].blendA + && bbits == collapse[i].blendB ) { + break; + } + } + + // nothing found + if ( collapse[i].blendA == -1 ) { + return qfalse; + } + + // GL_ADD is a separate extension + + + // make sure waveforms have identical parameters + if ( ( stages[0].rgbGen != stages[1].rgbGen ) || + ( stages[0].alphaGen != stages[1].alphaGen ) ) { + return qfalse; + } + + // an add collapse can only have identity colors + if ( collapse[i].multitextureEnv == GL_ADD && stages[0].rgbGen != CGEN_IDENTITY ) { + return qfalse; + } + + if ( stages[0].rgbGen == CGEN_WAVEFORM ) + { + if ( memcmp( &stages[0].rgbWave, + &stages[1].rgbWave, + sizeof( stages[0].rgbWave ) ) ) + { + return qfalse; + } + } + if ( stages[0].alphaGen == AGEN_WAVEFORM ) + { + if ( memcmp( &stages[0].alphaWave, + &stages[1].alphaWave, + sizeof( stages[0].alphaWave ) ) ) + { + return qfalse; + } + } + + + // make sure that lightmaps are in bundle 1 for 3dfx + if ( stages[0].bundle[0].isLightmap ) + { + tmpBundle = stages[0].bundle[0]; + stages[0].bundle[0] = stages[1].bundle[0]; + stages[0].bundle[1] = tmpBundle; + } + else + { + stages[0].bundle[1] = stages[1].bundle[0]; + } + + // set the new blend state bits + shader.multitextureEnv = collapse[i].multitextureEnv; + stages[0].stateBits &= ~( GLS_DSTBLEND_BITS | GLS_SRCBLEND_BITS ); + stages[0].stateBits |= collapse[i].multitextureBlend; + + // + // move down subsequent shaders + // + memmove( &stages[1], &stages[2], sizeof( stages[0] ) * ( MAX_SHADER_STAGES - 2 ) ); + memset( &stages[MAX_SHADER_STAGES-1], 0, sizeof( stages[0] ) ); + + return qtrue; +} + + + + +/* +================= +VertexLightingCollapse + +If vertex lighting is enabled, only render a single +pass, trying to guess which is the correct one to best aproximate +what it is supposed to look like. +================= +*/ +static void VertexLightingCollapse( void ) +{ + int stage; + shaderStage_t *bestStage; + int bestImageRank; + int rank; + + // if we aren't opaque, just use the first pass + if ( shader.sort == SS_OPAQUE ) { + + // pick the best texture for the single pass + bestStage = &stages[0]; + bestImageRank = -999999; + + for ( stage = 0; stage < MAX_SHADER_STAGES; stage++ ) { + shaderStage_t *pStage = &stages[stage]; + + if ( !pStage->active ) { + break; + } + rank = 0; + + if ( pStage->bundle[0].isLightmap ) { + rank -= 100; + } + if ( pStage->bundle[0].tcGen != TCGEN_TEXTURE ) { + rank -= 5; + } + if ( pStage->bundle[0].numTexMods ) { + rank -= 5; + } + if ( pStage->rgbGen != CGEN_IDENTITY && pStage->rgbGen != CGEN_IDENTITY_LIGHTING ) { + rank -= 3; + } + + if ( rank > bestImageRank ) { + bestImageRank = rank; + bestStage = pStage; + } + } + + stages[0].bundle[0] = bestStage->bundle[0]; + stages[0].stateBits &= ~( GLS_DSTBLEND_BITS | GLS_SRCBLEND_BITS ); + stages[0].stateBits |= GLS_DEPTHMASK_TRUE; + if ( shader.lightmapIndex == LIGHTMAP_NONE ) { + stages[0].rgbGen = CGEN_LIGHTING_DIFFUSE; + } else { + stages[0].rgbGen = CGEN_EXACT_VERTEX; + } + stages[0].alphaGen = AGEN_SKIP; + } else { + // don't use a lightmap (tesla coils) + if ( stages[0].bundle[0].isLightmap ) { + stages[0] = stages[1]; + } + + // if we were in a cross-fade cgen, hack it to normal + if ( stages[0].rgbGen == CGEN_ONE_MINUS_ENTITY || stages[1].rgbGen == CGEN_ONE_MINUS_ENTITY ) { + stages[0].rgbGen = CGEN_IDENTITY_LIGHTING; + } + if ( ( stages[0].rgbGen == CGEN_WAVEFORM && stages[0].rgbWave.func == GF_SAWTOOTH ) + && ( stages[1].rgbGen == CGEN_WAVEFORM && stages[1].rgbWave.func == GF_INVERSE_SAWTOOTH ) ) { + stages[0].rgbGen = CGEN_IDENTITY_LIGHTING; + } + if ( ( stages[0].rgbGen == CGEN_WAVEFORM && stages[0].rgbWave.func == GF_INVERSE_SAWTOOTH ) + && ( stages[1].rgbGen == CGEN_WAVEFORM && stages[1].rgbWave.func == GF_SAWTOOTH ) ) { + stages[0].rgbGen = CGEN_IDENTITY_LIGHTING; + } + } + + for ( stage = 1; stage < MAX_SHADER_STAGES; stage++ ) { + shaderStage_t *pStage = &stages[stage]; + + if ( !pStage->active ) { + break; + } + + memset( pStage, 0, sizeof( *pStage ) ); + } +} + +/* +========================= +FinishShader + +Returns a freshly allocated shader with all the needed info +from the current global working shader +========================= +*/ +shader_t* FinishShader( void ) +{ + qboolean hasLightmapStage = qfalse; + // + // set sky stuff appropriate + // + if ( shader.isSky ) { + shader.sort = SS_ENVIRONMENT; + } + + // + // set polygon offset + // + if ( shader.polygonOffset && !shader.sort ) { + shader.sort = SS_DECAL; + } + + // + // set appropriate stage information + // + int iStage; + for ( iStage = 0; iStage < MAX_SHADER_STAGES; iStage++ ) + { + shaderStage_t *pStage = &stages[iStage]; + + if ( !pStage->active ) { + break; + } + + // check for a missing texture + if ( !pStage->bundle[0].image[0] ) + { + ri.Printf( PRINT_WARNING, "Shader %s has a stage with no image\n", shader.name ); + pStage->active = qfalse; + continue; + } + + + // + // default texture coordinate generation + // + if ( pStage->bundle[0].isLightmap ) + { + if ( pStage->bundle[0].tcGen == TCGEN_BAD ) { + pStage->bundle[0].tcGen = TCGEN_LIGHTMAP; + } + hasLightmapStage = qtrue; + } + else + { + if ( pStage->bundle[0].tcGen == TCGEN_BAD ) { + pStage->bundle[0].tcGen = TCGEN_TEXTURE; + } + } + + // + // determine sort order and fog color adjustment + // + if ( ( pStage->stateBits & ( GLS_SRCBLEND_BITS | GLS_DSTBLEND_BITS ) ) && + ( stages[0].stateBits & ( GLS_SRCBLEND_BITS | GLS_DSTBLEND_BITS ) ) ) + { + int blendSrcBits = pStage->stateBits & GLS_SRCBLEND_BITS; + int blendDstBits = pStage->stateBits & GLS_DSTBLEND_BITS; + + // fog color adjustment only works for blend modes that have a contribution + // that aproaches 0 as the modulate values aproach 0 -- + // GL_ONE, GL_ONE + // GL_ZERO, GL_ONE_MINUS_SRC_COLOR + // GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA + + // modulate, additive + if ( ( ( blendSrcBits == GLS_SRCBLEND_ONE ) && ( blendDstBits == GLS_DSTBLEND_ONE ) ) || + ( ( blendSrcBits == GLS_SRCBLEND_ZERO ) && ( blendDstBits == GLS_DSTBLEND_ONE_MINUS_SRC_COLOR ) ) ) { + pStage->adjustColorsForFog = ACFF_MODULATE_RGB; + } + // strict blend + else if ( ( blendSrcBits == GLS_SRCBLEND_SRC_ALPHA ) && ( blendDstBits == GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA ) ) + { + pStage->adjustColorsForFog = ACFF_MODULATE_ALPHA; + } + // premultiplied alpha + else if ( ( blendSrcBits == GLS_SRCBLEND_ONE ) && ( blendDstBits == GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA ) ) + { + pStage->adjustColorsForFog = ACFF_MODULATE_RGBA; + } else { + // we can't adjust this one correctly, so it won't be exactly correct in fog + } + + // don't screw with sort order if this is a portal or environment + if ( !shader.sort ) + { + // see through item, like a grill or grate + if ( pStage->stateBits & GLS_DEPTHMASK_TRUE ) { + shader.sort = SS_SEE_THROUGH; + } else { + shader.sort = SS_BLEND0; + } + } + } + } + + // there are times when you will need to manually apply a sort to + // opaque alpha tested shaders that have later blend passes + if ( !shader.sort ) { + shader.sort = SS_OPAQUE; + } + + // + // if we are in r_vertexLight mode, never use a lightmap texture + // + if ( iStage > 1 && ( r_vertexLight->integer && !r_uiFullScreen->integer ) ) { + VertexLightingCollapse(); + iStage = 1; + hasLightmapStage = qfalse; + } + + // + // look for multitexture potential + // + if ( iStage > 1 && CollapseMultitexture() ) { + iStage--; + } + + if ( shader.lightmapIndex >= 0 && !hasLightmapStage ) { + ri.Printf( PRINT_DEVELOPER, "WARNING: shader '%s' has lightmap but no lightmap stage!\n", shader.name ); + shader.lightmapIndex = LIGHTMAP_NONE; + } + + + // + // compute number of passes + // + shader.numUnfoggedPasses = iStage; + + // fogonly shaders don't have any normal passes + if ( iStage == 0 ) { + shader.sort = SS_FOG; + } + + + // VULKAN: create pipelines for each shader stage + int i = 0; + for (i=0; i < iStage; i++) + { + create_pipelines_for_each_stage(&stages[i], &shader); + } + + return GeneratePermanentShader(); +} + +//======================================================================================== + + +void R_SetTheShader( const char *name, int lightmapIndex ) +{ + + // clear the global shader + memset( &shader, 0, sizeof( shader ) ); + + strncpy( shader.name, name, sizeof( shader.name ) ); + + shader.lightmapIndex = lightmapIndex; + // FIXME: set these "need" values appropriately + shader.needsNormal = qtrue; + shader.needsST1 = qtrue; + shader.needsST2 = qtrue; + shader.needsColor = qtrue; + + + // stages + memset( &stages, 0, sizeof( stages ) ); + int i; + for ( i = 0 ; i < MAX_SHADER_STAGES ; i++ ) + { + stages[i].bundle[0].texMods = texMods[i]; + } + +} + + +void R_SetDefaultShader( void ) +{ + shader.defaultShader = qtrue; +} + + +/* +============== +SortNewShader + +Positions the most recently created shader in the tr.sortedShaders[] +array so that the shader->sort key is sorted reletive to the other +shaders. + +Sets shader->sortedIndex +============== +*/ +static void SortNewShader( void ) +{ + int i; + shader_t* newShader = tr.shaders[ tr.numShaders - 1 ]; + float sort = newShader->sort; + + for ( i = tr.numShaders - 2 ; i >= 0 ; i-- ) + { + if ( tr.sortedShaders[ i ]->sort <= sort ) { + break; + } + tr.sortedShaders[i+1] = tr.sortedShaders[i]; + tr.sortedShaders[i+1]->sortedIndex++; + } + + // Arnout: fix rendercommandlist + // https://zerowing.idsoftware.com/bugzilla/show_bug.cgi?id=493 + FixRenderCommandList( i+1 ); + + newShader->sortedIndex = i+1; + tr.sortedShaders[i+1] = newShader; +} + + + + +shader_t* GeneratePermanentShader( void ) +{ + int i; + + if ( tr.numShaders == MAX_SHADERS ) { + ri.Printf( PRINT_WARNING, "WARNING: GeneratePermanentShader - MAX_SHADERS hit\n"); + return tr.defaultShader; + } + + shader_t* newShader = (shader_t*) ri.Hunk_Alloc( sizeof( shader_t ), h_low ); + + *newShader = shader; + + if ( newShader->sort <= SS_OPAQUE ) + newShader->fogPass = FP_EQUAL; + else if ( newShader->contentFlags & CONTENTS_FOG ) + newShader->fogPass = FP_LE; + + tr.shaders[ tr.numShaders ] = newShader; + newShader->index = tr.numShaders; + + tr.sortedShaders[ tr.numShaders ] = newShader; + newShader->sortedIndex = tr.numShaders; + + tr.numShaders++; + + for ( i = 0 ; i < newShader->numUnfoggedPasses ; i++ ) + { + if ( !stages[i].active ) { + break; + } + newShader->stages[i] = (shaderStage_t*) ri.Hunk_Alloc( sizeof( stages[i] ), h_low ); + *newShader->stages[i] = stages[i]; + + int b; + for ( b = 0 ; b < NUM_TEXTURE_BUNDLES ; b++ ) + { + int size = newShader->stages[i]->bundle[b].numTexMods * sizeof( texModInfo_t ); + newShader->stages[i]->bundle[b].texMods = (texModInfo_t*) ri.Hunk_Alloc( size, h_low ); + memcpy( newShader->stages[i]->bundle[b].texMods, stages[i].bundle[b].texMods, size ); + } + } + + SortNewShader(); + + R_UpdateShaderHashTable(newShader); + + return newShader; +} + +void setDefaultShader(void) +{ + shader.defaultShader = qtrue; +} + +void R_CreateDefaultShadingCmds(const char* name, image_t* image) +{ + // ri.Printf( PRINT_ALL, "R_CreateDefaultShade: shader %s, image: %s\n", name, image->imgName ); + + if ( shader.lightmapIndex == LIGHTMAP_NONE ) + { + // dynamic colors at vertexes + stages[0].bundle[0].image[0] = image; + stages[0].active = qtrue; + stages[0].rgbGen = CGEN_LIGHTING_DIFFUSE; + stages[0].stateBits = GLS_DEFAULT; + } + else if ( shader.lightmapIndex == LIGHTMAP_BY_VERTEX ) + { + // explicit colors at vertexes + stages[0].bundle[0].image[0] = image; + stages[0].active = qtrue; + stages[0].rgbGen = CGEN_EXACT_VERTEX; + stages[0].alphaGen = AGEN_SKIP; + stages[0].stateBits = GLS_DEFAULT; + } + else if ( shader.lightmapIndex == LIGHTMAP_2D ) + { + // GUI elements + stages[0].bundle[0].image[0] = image; + stages[0].active = qtrue; + stages[0].rgbGen = CGEN_VERTEX; + stages[0].alphaGen = AGEN_VERTEX; + stages[0].stateBits = GLS_DEPTHTEST_DISABLE | + GLS_SRCBLEND_SRC_ALPHA | + GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA; + } + else if ( shader.lightmapIndex == LIGHTMAP_WHITEIMAGE ) + { + // fullbright level + stages[0].bundle[0].image[0] = tr.whiteImage; + stages[0].active = qtrue; + stages[0].rgbGen = CGEN_IDENTITY_LIGHTING; + stages[0].stateBits = GLS_DEFAULT; + + stages[1].bundle[0].image[0] = image; + stages[1].active = qtrue; + stages[1].rgbGen = CGEN_IDENTITY; + stages[1].stateBits |= GLS_SRCBLEND_DST_COLOR | GLS_DSTBLEND_ZERO; + } + else + { + // two pass lightmap + stages[0].bundle[0].image[0] = tr.lightmaps[shader.lightmapIndex]; + stages[0].bundle[0].isLightmap = qtrue; + stages[0].active = qtrue; + stages[0].rgbGen = CGEN_IDENTITY; // lightmaps are scaled on creation + // for identitylight + stages[0].stateBits = GLS_DEFAULT; + + stages[1].bundle[0].image[0] = image; + stages[1].active = qtrue; + stages[1].rgbGen = CGEN_IDENTITY; + stages[1].stateBits |= GLS_SRCBLEND_DST_COLOR | GLS_DSTBLEND_ZERO; + } + +} + + + + + + +/* +==================== +R_GetShaderByHandle + +When a handle is passed in by another module, this range checks +it and returns a valid (possibly default) shader_t to be used internally. +==================== +*/ +shader_t *R_GetShaderByHandle( qhandle_t hShader ) +{ + if ( hShader < 0 ) { + ri.Printf( PRINT_WARNING, "R_GetShaderByHandle: out of range hShader '%d'\n", hShader ); // bk: FIXME name + return tr.defaultShader; + } + if ( hShader >= tr.numShaders ) { + ri.Printf( PRINT_WARNING, "R_GetShaderByHandle: out of range hShader '%d'\n", hShader ); + return tr.defaultShader; + } + return tr.shaders[hShader]; +} + + + +/* +==================== +CreateInternalShaders +==================== +*/ +static void CreateInternalShaders( void ) +{ + ri.Printf( PRINT_ALL, "CreateInternalShaders\n" ); + + tr.numShaders = 0; + + // init the default shader + memset( &shader, 0, sizeof( shader ) ); + memset( &stages, 0, sizeof( stages ) ); + + strncpy( shader.name, "", sizeof( shader.name ) ); + shader.lightmapIndex = LIGHTMAP_NONE; + + stages[0].bundle[0].image[0] = tr.defaultImage; + stages[0].active = qtrue; + stages[0].stateBits = GLS_DEFAULT; + + tr.defaultShader = FinishShader(); + + // shadow shader is just a marker + strncpy( shader.name, "", sizeof( shader.name ) ); + shader.sort = SS_STENCIL_SHADOW; + + tr.shadowShader = FinishShader(); + + // cinematic shader + memset( &shader, 0, sizeof( shader ) ); + memset( &stages, 0, sizeof( stages ) ); + + strncpy( shader.name, "", sizeof( shader.name ) ); + shader.lightmapIndex = LIGHTMAP_NONE; + + stages[0].bundle[0].image[0] = tr.defaultImage; // will be updated by specific cinematic images + stages[0].active = qtrue; + stages[0].rgbGen = CGEN_IDENTITY_LIGHTING; + stages[0].stateBits = GLS_DEPTHTEST_DISABLE; + + tr.cinematicShader = FinishShader(); +} + +static void CreateExternalShaders( void ) +{ + ri.Printf( PRINT_ALL, "CreateExternalShaders\n" ); + + tr.projectionShadowShader = R_FindShader( "projectionShadow", LIGHTMAP_NONE, qtrue ); +} + + +void R_InitShaders( void ) +{ + + ri.Printf( PRINT_ALL, "Initializing Shaders\n" ); + + R_ClearShaderHashTable(); + + CreateInternalShaders(); + + ScanAndLoadShaderFiles(); + + CreateExternalShaders(); +} diff --git a/code/renderer_vulkan/tr_shader.h b/code/renderer_vulkan/tr_shader.h new file mode 100644 index 0000000000..5d48238756 --- /dev/null +++ b/code/renderer_vulkan/tr_shader.h @@ -0,0 +1,49 @@ +#ifndef TR_SHADER_H_ +#define TR_SHADER_H_ + + +#ifndef GL_MODULATE +#define GL_MODULATE 0x2100 +#endif + +#ifndef GL_ADD +#define GL_ADD 0x0104 +#endif + + + +// any change in the LIGHTMAP_* defines here MUST be reflected in +// R_FindShader() in tr_bsp.c +#define LIGHTMAP_2D -4 // shader is for 2D rendering +#define LIGHTMAP_BY_VERTEX -3 // pre-lit triangle models +#define LIGHTMAP_WHITEIMAGE -2 +#define LIGHTMAP_NONE -1 + +typedef enum { + SS_BAD, + SS_PORTAL, // mirrors, portals, viewscreens + SS_ENVIRONMENT, // sky box + SS_OPAQUE, // opaque + + SS_DECAL, // scorch marks, etc. + SS_SEE_THROUGH, // ladders, grates, grills that may have small blended edges + // in addition to alpha test + SS_BANNER, + + SS_FOG, + + SS_UNDERWATER, // for items that should be drawn in front of the water plane + + SS_BLEND0, // regular transparency and filters + SS_BLEND1, // generally only used for additive type effects + SS_BLEND2, + SS_BLEND3, + + SS_BLEND6, + SS_STENCIL_SHADOW, + SS_ALMOST_NEAREST, // gun smoke puffs + + SS_NEAREST // blood blobs +} shaderSort_t; + +#endif diff --git a/code/renderer_vulkan/tr_shadows.c b/code/renderer_vulkan/tr_shadows.c new file mode 100644 index 0000000000..9bbf520db2 --- /dev/null +++ b/code/renderer_vulkan/tr_shadows.c @@ -0,0 +1,335 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +#include "tr_globals.h" +#include "vk_shade_geometry.h" +#include "vk_instance.h" +#include "vk_pipelines.h" +#include "vk_image.h" +#include "tr_cvar.h" +#include "tr_backend.h" +/* + + for a projection shadow: + + point[x] += light vector * ( z - shadow plane ) + point[y] += + point[z] = shadow plane + + 1 0 light[x] / light[z] + +*/ + +typedef struct { + int i2; + int facing; +} edgeDef_t; + +#define MAX_EDGE_DEFS 32 + +static edgeDef_t edgeDefs[SHADER_MAX_VERTEXES][MAX_EDGE_DEFS]; +static int numEdgeDefs[SHADER_MAX_VERTEXES]; +static int facing[SHADER_MAX_INDEXES/3]; +static vec4_t extrudedEdges[SHADER_MAX_VERTEXES * 4]; +static int numExtrudedEdges; + +static void R_AddEdgeDef( int i1, int i2, int facingDir ) { + int c = numEdgeDefs[ i1 ]; + if ( c == MAX_EDGE_DEFS ) { + return; // overflow + } + edgeDefs[ i1 ][ c ].i2 = i2; + edgeDefs[ i1 ][ c ].facing = facingDir; + + numEdgeDefs[ i1 ]++; +} + +static void R_ExtrudeShadowEdges( void ) { + int i; + int c, c2; + int j, k; + int i2; + + numExtrudedEdges = 0; + + // an edge is NOT a silhouette edge if its face doesn't face the light, + // or if it has a reverse paired edge that also faces the light. + // A well behaved polyhedron would have exactly two faces for each edge, + // but lots of models have dangling edges or overfanned edges + for ( i = 0 ; i < tess.numVertexes ; i++ ) { + c = numEdgeDefs[ i ]; + for ( j = 0 ; j < c ; j++ ) { + if ( !edgeDefs[ i ][ j ].facing ) { + continue; + } + + qboolean sil_edge = qtrue; + i2 = edgeDefs[ i ][ j ].i2; + c2 = numEdgeDefs[ i2 ]; + for ( k = 0 ; k < c2 ; k++ ) { + if ( edgeDefs[ i2 ][ k ].i2 == i && edgeDefs[ i2 ][ k ].facing) { + sil_edge = qfalse; + break; + } + } + + // if it doesn't share the edge with another front facing + // triangle, it is a sil edge + if ( sil_edge ) { + VectorCopy(tess.xyz[ i ], extrudedEdges[numExtrudedEdges * 4 + 0]); + VectorCopy(tess.xyz[ i + tess.numVertexes ], extrudedEdges[numExtrudedEdges * 4 + 1]); + VectorCopy(tess.xyz[ i2 ], extrudedEdges[numExtrudedEdges * 4 + 2]); + VectorCopy(tess.xyz[ i2 + tess.numVertexes ], extrudedEdges[numExtrudedEdges * 4 + 3]); + numExtrudedEdges++; + } + } + } +} + + + +// VULKAN +static void vk_renderShadowEdges(VkPipeline vk_pipeline) +{ + + int i = 0; + while (i < numExtrudedEdges) { + int count = numExtrudedEdges - i; + if (count > (SHADER_MAX_VERTEXES - 1) / 4) + count = (SHADER_MAX_VERTEXES - 1) / 4; + + memcpy(tess.xyz, extrudedEdges[i*4], 4 * count * sizeof(vec4_t)); + tess.numVertexes = count * 4; + int k = 0; + + for (k = 0; k < count; k++) + { + tess.indexes[k * 6 + 0] = k * 4 + 0; + tess.indexes[k * 6 + 1] = k * 4 + 2; + tess.indexes[k * 6 + 2] = k * 4 + 1; + + tess.indexes[k * 6 + 3] = k * 4 + 2; + tess.indexes[k * 6 + 4] = k * 4 + 3; + tess.indexes[k * 6 + 5] = k * 4 + 1; + } + tess.numIndexes = count * 6; + + for (k = 0; k < tess.numVertexes; k++) + { + VectorSet(tess.svars.colors[k], 50, 50, 50); + tess.svars.colors[k][3] = 255; + } + + vk_UploadXYZI(tess.xyz, tess.numVertexes, tess.indexes, tess.numIndexes); + updateMVP(backEnd.viewParms.isPortal, backEnd.projection2D, getptr_modelview_matrix()); + + vk_shade_geometry(vk_pipeline, VK_FALSE, DEPTH_RANGE_NORMAL, VK_TRUE); + + + i += count; + } +} + +/* +================= +RB_ShadowTessEnd + +triangleFromEdge[ v1 ][ v2 ] + + + set triangle from edge( v1, v2, tri ) + if ( facing[ triangleFromEdge[ v1 ][ v2 ] ] && !facing[ triangleFromEdge[ v2 ][ v1 ] ) { + } +================= +*/ +void RB_ShadowTessEnd( void ) { + int i; + int numTris; + vec3_t lightDir; + + // we can only do this if we have enough space in the vertex buffers + if ( tess.numVertexes >= SHADER_MAX_VERTEXES / 2 ) { + return; + } + + + VectorCopy( backEnd.currentEntity->lightDir, lightDir ); + + // project vertexes away from light direction + for ( i = 0 ; i < tess.numVertexes ; i++ ) { + VectorMA( tess.xyz[i], -512, lightDir, tess.xyz[i+tess.numVertexes] ); + } + + // decide which triangles face the light + memset( numEdgeDefs, 0, 4 * tess.numVertexes ); + + numTris = tess.numIndexes / 3; + for ( i = 0 ; i < numTris ; i++ ) + { + int i1, i2, i3; + vec3_t d1, d2, normal; + float *v1, *v2, *v3; + float d; + + i1 = tess.indexes[ i*3 + 0 ]; + i2 = tess.indexes[ i*3 + 1 ]; + i3 = tess.indexes[ i*3 + 2 ]; + + v1 = tess.xyz[ i1 ]; + v2 = tess.xyz[ i2 ]; + v3 = tess.xyz[ i3 ]; + + VectorSubtract( v2, v1, d1 ); + VectorSubtract( v3, v1, d2 ); + CrossProduct( d1, d2, normal ); + + d = DotProduct( normal, lightDir ); + if ( d > 0 ) { + facing[ i ] = 1; + } else { + facing[ i ] = 0; + } + + // create the edges + R_AddEdgeDef( i1, i2, facing[ i ] ); + R_AddEdgeDef( i2, i3, facing[ i ] ); + R_AddEdgeDef( i3, i1, facing[ i ] ); + } + + // draw the silhouette edges + + updateCurDescriptor( tr.whiteImage->descriptor_set, 0); + + R_ExtrudeShadowEdges(); + + // mirrors have the culling order reversed + + // VULKAN + vk_renderShadowEdges(g_stdPipelines.shadow_volume_pipelines[0][backEnd.viewParms.isMirror]); + vk_renderShadowEdges(g_stdPipelines.shadow_volume_pipelines[1][backEnd.viewParms.isMirror]); + +} + + +/* +================= +RB_ShadowFinish + +Darken everything that is is a shadow volume. +We have to delay this until everything has been shadowed, +because otherwise shadows from different body parts would +overlap and double darken. +================= +*/ +void RB_ShadowFinish( void ) +{ + if ( r_shadows->integer != 2 ) { + return; + } + + updateCurDescriptor( tr.whiteImage->descriptor_set, 0); + + // VULKAN + + tess.indexes[0] = 0; + tess.indexes[1] = 1; + tess.indexes[2] = 2; + tess.indexes[3] = 0; + tess.indexes[4] = 2; + tess.indexes[5] = 3; + tess.numIndexes = 6; + + VectorSet(tess.xyz[0], -100, 100, -10); + VectorSet(tess.xyz[1], 100, 100, -10); + VectorSet(tess.xyz[2], 100, -100, -10); + VectorSet(tess.xyz[3], -100, -100, -10); + int i = 0; + + for (i = 0; i < 4; i++) + { + VectorSet(tess.svars.colors[i], 153, 153, 153); + tess.svars.colors[i][3] = 255; + } + tess.numVertexes = 4; + + //PushModelView(); + + // Com_Memcpy(tmp, vk_world.modelview_transform, 64); + + float tmp[16] = { 1, 0 , 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1}; + + vk_UploadXYZI(tess.xyz, tess.numVertexes, tess.indexes, tess.numIndexes); + updateMVP(backEnd.viewParms.isPortal, backEnd.projection2D, tmp); + vk_shade_geometry(g_stdPipelines.shadow_finish_pipeline, VK_FALSE, DEPTH_RANGE_NORMAL, VK_TRUE); + + tess.numIndexes = 0; + tess.numVertexes = 0; +} + + +/* +================= +RB_ProjectionShadowDeform + +================= +*/ +void RB_ProjectionShadowDeform( void ) +{ + vec3_t ground; + vec3_t light; + vec3_t lightDir; + + float* xyz = ( float * ) tess.xyz; + + ground[0] = backEnd.or.axis[0][2]; + ground[1] = backEnd.or.axis[1][2]; + ground[2] = backEnd.or.axis[2][2]; + + float groundDist = backEnd.or.origin[2] - backEnd.currentEntity->e.shadowPlane; + + VectorCopy( backEnd.currentEntity->lightDir, lightDir ); + + float d = DotProduct( lightDir, ground ); + // don't let the shadows get too long or go negative + if ( d < 0.5 ) + { + VectorMA( lightDir, (0.5 - d), ground, lightDir ); + d = DotProduct( lightDir, ground ); + } + d = 1.0 / d; + + light[0] = lightDir[0] * d; + light[1] = lightDir[1] * d; + light[2] = lightDir[2] * d; + + int i; + for ( i = 0; i < tess.numVertexes; i++, xyz += 4 ) + { + float h = DotProduct( xyz, ground ) + groundDist; + + xyz[0] -= light[0] * h; + xyz[1] -= light[1] * h; + xyz[2] -= light[2] * h; + } +} diff --git a/code/renderer_vulkan/tr_sky.c b/code/renderer_vulkan/tr_sky.c new file mode 100644 index 0000000000..c9054fe22c --- /dev/null +++ b/code/renderer_vulkan/tr_sky.c @@ -0,0 +1,723 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_sky.c +#include "tr_globals.h" +#include "vk_shade_geometry.h" + +#include "vk_instance.h" +#include "vk_pipelines.h" +#include "vk_image.h" +#include "tr_cvar.h" +#include "matrix_multiplication.h" +#include "ref_import.h" +#include "tr_backend.h" +#define SKY_SUBDIVISIONS 8 +#define HALF_SKY_SUBDIVISIONS (SKY_SUBDIVISIONS/2) + + + + +static float s_cloudTexCoords[6][SKY_SUBDIVISIONS+1][SKY_SUBDIVISIONS+1][2]; + +/* +=================================================================================== + +POLYGON TO BOX SIDE PROJECTION + +=================================================================================== +*/ + +static vec3_t sky_clip[6] = +{ + {1,1,0}, + {1,-1,0}, + {0,-1,1}, + {0,1,1}, + {1,0,1}, + {-1,0,1} +}; + +static float sky_mins[2][6], sky_maxs[2][6]; +static float sky_min, sky_max; + +/* +================ +AddSkyPolygon +================ +*/ +static void AddSkyPolygon (int nump, vec3_t vecs) +{ + int i,j; + vec3_t v, av; + float s, t, dv; + int axis; + float *vp; + // s = [0]/[2], t = [1]/[2] + static int vec_to_st[6][3] = + { + {-2,3,1}, + {2,3,-1}, + + {1,3,2}, + {-1,3,-2}, + + {-2,-1,3}, + {-2,1,-3} + + // {-1,2,3}, + // {1,2,-3} + }; + + // decide which face it maps to + VectorCopy (vec3_origin, v); + for (i=0, vp=vecs ; i av[1] && av[0] > av[2]) + { + if (v[0] < 0) + axis = 1; + else + axis = 0; + } + else if (av[1] > av[2] && av[1] > av[0]) + { + if (v[1] < 0) + axis = 3; + else + axis = 2; + } + else + { + if (v[2] < 0) + axis = 5; + else + axis = 4; + } + + // project new texture coords + for (i=0 ; i 0) + dv = vecs[j - 1]; + else + dv = -vecs[-j - 1]; + if (dv < 0.001) + continue; // don't divide by zero + j = vec_to_st[axis][0]; + if (j < 0) + s = -vecs[-j -1] / dv; + else + s = vecs[j-1] / dv; + j = vec_to_st[axis][1]; + if (j < 0) + t = -vecs[-j -1] / dv; + else + t = vecs[j-1] / dv; + + if (s < sky_mins[0][axis]) + sky_mins[0][axis] = s; + if (t < sky_mins[1][axis]) + sky_mins[1][axis] = t; + if (s > sky_maxs[0][axis]) + sky_maxs[0][axis] = s; + if (t > sky_maxs[1][axis]) + sky_maxs[1][axis] = t; + } +} + +#define ON_EPSILON 0.1f // point on plane side epsilon +#define MAX_CLIP_VERTS 64 +/* +================ +ClipSkyPolygon +================ +*/ +static void ClipSkyPolygon (int nump, vec3_t vecs, int stage) +{ + float *norm; + float *v; + qboolean front, back; + float d, e; + float dists[MAX_CLIP_VERTS]; + int sides[MAX_CLIP_VERTS]; + vec3_t newv[2][MAX_CLIP_VERTS]; + int newc[2]; + int i, j; + + if (nump > MAX_CLIP_VERTS-2) + ri.Error (ERR_DROP, "ClipSkyPolygon: MAX_CLIP_VERTS"); + if (stage == 6) + { // fully clipped, so draw it + AddSkyPolygon (nump, vecs); + return; + } + + front = back = qfalse; + norm = sky_clip[stage]; + for (i=0, v = vecs ; i ON_EPSILON) + { + front = qtrue; + sides[i] = SIDE_FRONT; + } + else if (d < -ON_EPSILON) + { + back = qtrue; + sides[i] = SIDE_BACK; + } + else + sides[i] = SIDE_ON; + dists[i] = d; + } + + if (!front || !back) + { // not clipped + ClipSkyPolygon (nump, vecs, stage+1); + return; + } + + // clip it + sides[i] = sides[0]; + dists[i] = dists[0]; + VectorCopy (vecs, (vecs+(i*3)) ); + newc[0] = newc[1] = 0; + + for (i=0, v = vecs ; inumIndexes; i += 3 ) + { + for (j = 0 ; j < 3 ; j++) + { + VectorSubtract( input->xyz[input->indexes[i+j]], + backEnd.viewParms.or.origin, + p[j] ); + } + ClipSkyPolygon( 3, p[0], 0 ); + } +} + +/* +=================================================================================== + +CLOUD VERTEX GENERATION + +=================================================================================== +*/ + +/* +** MakeSkyVec +** +** Parms: s, t range from -1 to 1 +*/ +static void MakeSkyVec( float s, float t, int axis, float outSt[2], vec3_t outXYZ ) +{ + // 1 = s, 2 = t, 3 = 2048 + static int st_to_vec[6][3] = + { + {3,-1,2}, + {-3,1,2}, + + {1,3,2}, + {-1,-3,2}, + + {-2,-1,3}, // 0 degrees yaw, look straight up + {2,-1,-3} // look straight down + }; + + vec3_t b; + int j, k; + float boxSize; + + boxSize = backEnd.viewParms.zFar / 1.75; // div sqrt(3) + b[0] = s*boxSize; + b[1] = t*boxSize; + b[2] = boxSize; + + for (j=0 ; j<3 ; j++) + { + k = st_to_vec[axis][j]; + if (k < 0) + { + outXYZ[j] = -b[-k - 1]; + } + else + { + outXYZ[j] = b[k - 1]; + } + } + + // avoid bilerp seam + s = (s+1)*0.5; + t = (t+1)*0.5; + if (s < sky_min) + { + s = sky_min; + } + else if (s > sky_max) + { + s = sky_max; + } + + if (t < sky_min) + { + t = sky_min; + } + else if (t > sky_max) + { + t = sky_max; + } + + t = 1.0 - t; + + + if ( outSt ) + { + outSt[0] = s; + outSt[1] = t; + } +} + +static int sky_texorder[6] = {0,2,1,3,4,5}; +static vec3_t s_skyPoints[SKY_SUBDIVISIONS+1][SKY_SUBDIVISIONS+1]; +static float s_skyTexCoords[SKY_SUBDIVISIONS+1][SKY_SUBDIVISIONS+1][2]; + + + +static void FillCloudySkySide( const int mins[2], const int maxs[2] ) +{ + int s, t; + int vertexStart = tess.numVertexes; + int tHeight, sWidth; + + tHeight = maxs[1] - mins[1] + 1; + sWidth = maxs[0] - mins[0] + 1; + + for ( t = mins[1]+HALF_SKY_SUBDIVISIONS; t <= maxs[1]+HALF_SKY_SUBDIVISIONS; t++ ) + { + for ( s = mins[0]+HALF_SKY_SUBDIVISIONS; s <= maxs[0]+HALF_SKY_SUBDIVISIONS; s++ ) + { + VectorAdd( s_skyPoints[t][s], backEnd.viewParms.or.origin, tess.xyz[tess.numVertexes] ); + tess.texCoords[tess.numVertexes][0][0] = s_skyTexCoords[t][s][0]; + tess.texCoords[tess.numVertexes][0][1] = s_skyTexCoords[t][s][1]; + + tess.numVertexes++; + + if ( tess.numVertexes >= SHADER_MAX_VERTEXES ) + { + ri.Error( ERR_DROP, "SHADER_MAX_VERTEXES hit in FillCloudySkySide()\n" ); + } + } + } + + for ( t = 0; t < tHeight-1; t++ ) + { + for ( s = 0; s < sWidth-1; s++ ) + { + tess.indexes[tess.numIndexes] = vertexStart + s + t * ( sWidth ); + tess.numIndexes++; + tess.indexes[tess.numIndexes] = vertexStart + s + ( t + 1 ) * ( sWidth ); + tess.numIndexes++; + tess.indexes[tess.numIndexes] = vertexStart + s + 1 + t * ( sWidth ); + tess.numIndexes++; + + tess.indexes[tess.numIndexes] = vertexStart + s + ( t + 1 ) * ( sWidth ); + tess.numIndexes++; + tess.indexes[tess.numIndexes] = vertexStart + s + 1 + ( t + 1 ) * ( sWidth ); + tess.numIndexes++; + tess.indexes[tess.numIndexes] = vertexStart + s + 1 + t * ( sWidth ); + tess.numIndexes++; + } + } +} + +static void FillCloudBox(void) +{ + int i=0; + for ( i = 0; i < 5; ++i ) + { + int sky_mins_subd[2], sky_maxs_subd[2]; + int s, t; + + float MIN_T = -HALF_SKY_SUBDIVISIONS; + + sky_mins[0][i] = floor( sky_mins[0][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; + sky_mins[1][i] = floor( sky_mins[1][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; + sky_maxs[0][i] = ceil( sky_maxs[0][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; + sky_maxs[1][i] = ceil( sky_maxs[1][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; + + if ( ( sky_mins[0][i] >= sky_maxs[0][i] ) || + ( sky_mins[1][i] >= sky_maxs[1][i] ) ) + { + continue; + } + + sky_mins_subd[0] = ( sky_mins[0][i] * HALF_SKY_SUBDIVISIONS ); + sky_mins_subd[1] = ( sky_mins[1][i] * HALF_SKY_SUBDIVISIONS ); + sky_maxs_subd[0] = ( sky_maxs[0][i] * HALF_SKY_SUBDIVISIONS ); + sky_maxs_subd[1] = ( sky_maxs[1][i] * HALF_SKY_SUBDIVISIONS ); + + if ( sky_mins_subd[0] < -HALF_SKY_SUBDIVISIONS ) + sky_mins_subd[0] = -HALF_SKY_SUBDIVISIONS; + else if ( sky_mins_subd[0] > HALF_SKY_SUBDIVISIONS ) + sky_mins_subd[0] = HALF_SKY_SUBDIVISIONS; + if ( sky_mins_subd[1] < MIN_T ) + sky_mins_subd[1] = MIN_T; + else if ( sky_mins_subd[1] > HALF_SKY_SUBDIVISIONS ) + sky_mins_subd[1] = HALF_SKY_SUBDIVISIONS; + + if ( sky_maxs_subd[0] < -HALF_SKY_SUBDIVISIONS ) + sky_maxs_subd[0] = -HALF_SKY_SUBDIVISIONS; + else if ( sky_maxs_subd[0] > HALF_SKY_SUBDIVISIONS ) + sky_maxs_subd[0] = HALF_SKY_SUBDIVISIONS; + if ( sky_maxs_subd[1] < MIN_T ) + sky_maxs_subd[1] = MIN_T; + else if ( sky_maxs_subd[1] > HALF_SKY_SUBDIVISIONS ) + sky_maxs_subd[1] = HALF_SKY_SUBDIVISIONS; + + // + // iterate through the subdivisions + // + for ( t = sky_mins_subd[1]+HALF_SKY_SUBDIVISIONS; t <= sky_maxs_subd[1]+HALF_SKY_SUBDIVISIONS; t++ ) + { + for ( s = sky_mins_subd[0]+HALF_SKY_SUBDIVISIONS; s <= sky_maxs_subd[0]+HALF_SKY_SUBDIVISIONS; s++ ) + { + MakeSkyVec( ( s - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, + ( t - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, + i, + NULL, + s_skyPoints[t][s] ); + + s_skyTexCoords[t][s][0] = s_cloudTexCoords[i][t][s][0]; + s_skyTexCoords[t][s][1] = s_cloudTexCoords[i][t][s][1]; + } + } + + // only add indexes for first stage + FillCloudySkySide( sky_mins_subd, sky_maxs_subd); + } +} + +/* +** R_BuildCloudData +*/ +void R_BuildCloudData( shaderCommands_t *input ) +{ +// assert( shader->isSky ); + sky_min = 1.0 / 256.0f; // FIXME: not correct? + sky_max = 255.0 / 256.0f; + + // set up for drawing + tess.numIndexes = 0; + tess.numVertexes = 0; + + if ( input->shader->sky.cloudHeight && tess.xstages[0] ) + { + FillCloudBox(); + } +} + +/* +** R_InitSkyTexCoords +** Called when a sky shader is parsed +*/ +#define SQR( a ) ((a)*(a)) +void R_InitSkyTexCoords( float heightCloud ) +{ + int i, s, t; + float radiusWorld = 4096; + float p; + float sRad, tRad; + vec3_t skyVec; + vec3_t v; + + // init zfar so MakeSkyVec works even though + // a world hasn't been bounded + backEnd.viewParms.zFar = 1024; + + for ( i = 0; i < 6; i++ ) + { + for ( t = 0; t <= SKY_SUBDIVISIONS; t++ ) + { + for ( s = 0; s <= SKY_SUBDIVISIONS; s++ ) + { + // compute vector from view origin to sky side integral point + MakeSkyVec( ( s - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, + ( t - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, + i, + NULL, + skyVec ); + + // compute parametric value 'p' that intersects with cloud layer + p = ( 1.0f / ( 2 * DotProduct( skyVec, skyVec ) ) ) * + ( -2 * skyVec[2] * radiusWorld + + 2 * sqrt( SQR( skyVec[2] ) * SQR( radiusWorld ) + + 2 * SQR( skyVec[0] ) * radiusWorld * heightCloud + + SQR( skyVec[0] ) * SQR( heightCloud ) + + 2 * SQR( skyVec[1] ) * radiusWorld * heightCloud + + SQR( skyVec[1] ) * SQR( heightCloud ) + + 2 * SQR( skyVec[2] ) * radiusWorld * heightCloud + + SQR( skyVec[2] ) * SQR( heightCloud ) ) ); + + // compute intersection point based on p + VectorScale( skyVec, p, v ); + v[2] += radiusWorld; + + // compute vector from world origin to intersection point 'v' + VectorNormalize( v ); + + sRad = acos( v[0] ); + tRad = acos( v[1] ); + + s_cloudTexCoords[i][t][s][0] = sRad; + s_cloudTexCoords[i][t][s][1] = tRad; + } + } + } +} + +/* +================ +RB_StageIteratorSky + +All of the visible sky triangles are in tess + +Other things could be stuck in here, like birds in the sky, etc +================ +*/ +void RB_StageIteratorSky( void ) +{ + + // go through all the polygons and project them onto + // the sky box to see which blocks on each side need + // to be drawn + RB_ClipSkyPolygons( &tess ); + + // r_showsky will let all the sky blocks be drawn in + // front of everything to allow developers to see how + // much sky is getting sucked in draw the outer skybox + if ( tess.shader->sky.outerbox[0] && (tess.shader->sky.outerbox[0] != tr.defaultImage) ) + { + + int i; + + sky_min = 0; + sky_max = 1; + + memset( s_skyTexCoords, 0, sizeof( s_skyTexCoords ) ); + + for (i=0 ; i<6 ; i++) + { + int sky_mins_subd[2], sky_maxs_subd[2]; + int s, t; + + sky_mins[0][i] = floor( sky_mins[0][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; + sky_mins[1][i] = floor( sky_mins[1][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; + sky_maxs[0][i] = ceil( sky_maxs[0][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; + sky_maxs[1][i] = ceil( sky_maxs[1][i] * HALF_SKY_SUBDIVISIONS ) / HALF_SKY_SUBDIVISIONS; + + if ( ( sky_mins[0][i] >= sky_maxs[0][i] ) || + ( sky_mins[1][i] >= sky_maxs[1][i] ) ) + { + continue; + } + + sky_mins_subd[0] = sky_mins[0][i] * HALF_SKY_SUBDIVISIONS; + sky_mins_subd[1] = sky_mins[1][i] * HALF_SKY_SUBDIVISIONS; + sky_maxs_subd[0] = sky_maxs[0][i] * HALF_SKY_SUBDIVISIONS; + sky_maxs_subd[1] = sky_maxs[1][i] * HALF_SKY_SUBDIVISIONS; + + if ( sky_mins_subd[0] < -HALF_SKY_SUBDIVISIONS ) + sky_mins_subd[0] = -HALF_SKY_SUBDIVISIONS; + else if ( sky_mins_subd[0] > HALF_SKY_SUBDIVISIONS ) + sky_mins_subd[0] = HALF_SKY_SUBDIVISIONS; + if ( sky_mins_subd[1] < -HALF_SKY_SUBDIVISIONS ) + sky_mins_subd[1] = -HALF_SKY_SUBDIVISIONS; + else if ( sky_mins_subd[1] > HALF_SKY_SUBDIVISIONS ) + sky_mins_subd[1] = HALF_SKY_SUBDIVISIONS; + + if ( sky_maxs_subd[0] < -HALF_SKY_SUBDIVISIONS ) + sky_maxs_subd[0] = -HALF_SKY_SUBDIVISIONS; + else if ( sky_maxs_subd[0] > HALF_SKY_SUBDIVISIONS ) + sky_maxs_subd[0] = HALF_SKY_SUBDIVISIONS; + if ( sky_maxs_subd[1] < -HALF_SKY_SUBDIVISIONS ) + sky_maxs_subd[1] = -HALF_SKY_SUBDIVISIONS; + else if ( sky_maxs_subd[1] > HALF_SKY_SUBDIVISIONS ) + sky_maxs_subd[1] = HALF_SKY_SUBDIVISIONS; + + // + // iterate through the subdivisions + // + for ( t = sky_mins_subd[1]+HALF_SKY_SUBDIVISIONS; t <= sky_maxs_subd[1]+HALF_SKY_SUBDIVISIONS; t++ ) + { + for ( s = sky_mins_subd[0]+HALF_SKY_SUBDIVISIONS; s <= sky_maxs_subd[0]+HALF_SKY_SUBDIVISIONS; s++ ) + { + MakeSkyVec( ( s - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, + ( t - HALF_SKY_SUBDIVISIONS ) / ( float ) HALF_SKY_SUBDIVISIONS, + i, + s_skyTexCoords[t][s], + s_skyPoints[t][s] ); + } + } + + + // VULKAN: draw skybox side + + updateCurDescriptor(tess.shader->sky.outerbox[sky_texorder[i]]->descriptor_set, 0); + + tess.numVertexes = 0; + tess.numIndexes = 0; + + for ( t = sky_mins_subd[1]+HALF_SKY_SUBDIVISIONS; t < sky_maxs_subd[1]+HALF_SKY_SUBDIVISIONS; t++ ) + { + for ( s = sky_mins_subd[0]+HALF_SKY_SUBDIVISIONS; s < sky_maxs_subd[0]+HALF_SKY_SUBDIVISIONS; s++ ) + { + int ndx = tess.numVertexes; + + tess.indexes[ tess.numIndexes ] = ndx; + tess.indexes[ tess.numIndexes + 1 ] = ndx + 1; + tess.indexes[ tess.numIndexes + 2 ] = ndx + 2; + + tess.indexes[ tess.numIndexes + 3 ] = ndx + 2; + tess.indexes[ tess.numIndexes + 4 ] = ndx + 1; + tess.indexes[ tess.numIndexes + 5 ] = ndx + 3; + tess.numIndexes += 6; + + VectorCopy(s_skyPoints[t][s], tess.xyz[ndx]); + tess.svars.texcoords[0][ndx][0] = s_skyTexCoords[t][s][0]; + tess.svars.texcoords[0][ndx][1] = s_skyTexCoords[t][s][1]; + + VectorCopy(s_skyPoints[t + 1][s], tess.xyz[ndx + 1]); + tess.svars.texcoords[0][ndx + 1][0] = s_skyTexCoords[t + 1][s][0]; + tess.svars.texcoords[0][ndx + 1][1] = s_skyTexCoords[t + 1][s][1]; + + VectorCopy(s_skyPoints[t][s + 1], tess.xyz[ndx + 2]); + tess.svars.texcoords[0][ndx + 2][0] = s_skyTexCoords[t][s + 1][0]; + tess.svars.texcoords[0][ndx + 2][1] = s_skyTexCoords[t][s + 1][1]; + + VectorCopy(s_skyPoints[t + 1][s + 1], tess.xyz[ndx + 3]); + tess.svars.texcoords[0][ndx + 3][0] = s_skyTexCoords[t + 1][s + 1][0]; + tess.svars.texcoords[0][ndx + 3][1] = s_skyTexCoords[t + 1][s + 1][1]; + + tess.numVertexes += 4; + } + } + + memset( tess.svars.colors, tr.identityLightByte, tess.numVertexes * 4 ); + { + float skybox_translate[16] QALIGN(16) = { + 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + backEnd.viewParms.or.origin[0], backEnd.viewParms.or.origin[1], backEnd.viewParms.or.origin[2], 1 + }; + + float tmp[16] QALIGN(16); + MatrixMultiply4x4_SSE(skybox_translate, getptr_modelview_matrix(), tmp); + updateMVP(backEnd.viewParms.isPortal, backEnd.projection2D, tmp); + } + vk_UploadXYZI(tess.xyz, tess.numVertexes, tess.indexes, tess.numIndexes); + + vk_shade_geometry(g_stdPipelines.skybox_pipeline, VK_FALSE, r_showsky->integer ? DEPTH_RANGE_ZERO : DEPTH_RANGE_ONE, VK_TRUE); + + } + + } + + // generate the vertexes for all the clouds, which will be drawn + // by the generic shader routine + R_BuildCloudData( &tess ); + + RB_StageIteratorGeneric(); + + // draw the inner skybox +} diff --git a/code/renderer_vulkan/tr_surface.c b/code/renderer_vulkan/tr_surface.c new file mode 100644 index 0000000000..ab7fee211e --- /dev/null +++ b/code/renderer_vulkan/tr_surface.c @@ -0,0 +1,1069 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +// tr_surf.c +#include "tr_local.h" +#include "tr_flares.h" +#include "tr_globals.h" +#include "vk_image.h" +#include "tr_cvar.h" +#include "tr_backend.h" +#include "ref_import.h" +#include "matrix_multiplication.h" +#include "RB_SurfaceAnim.h" +/* + + THIS ENTIRE FILE IS BACK END + +backEnd.currentEntity will be valid. + +Tess_Begin has already been called for the surface's shader. + +The modelview matrix will be set. + +It is safe to actually issue drawing commands here if you don't want to +use the shader system. +*/ + + +//============================================================================ +void RB_CheckOverflow( int verts, int indexes ) +{ + if ( (tess.numVertexes + verts < SHADER_MAX_VERTEXES) && + (tess.numIndexes + indexes < SHADER_MAX_INDEXES) ) + { + return; + } + + RB_EndSurface(); + + if ( verts >= SHADER_MAX_VERTEXES ) { + ri.Error(ERR_DROP, "RB_CheckOverflow: verts > MAX (%d > %d)", verts, SHADER_MAX_VERTEXES ); + } + if ( indexes >= SHADER_MAX_INDEXES ) { + ri.Error(ERR_DROP, "RB_CheckOverflow: indices > MAX (%d > %d)", indexes, SHADER_MAX_INDEXES ); + } + + RB_BeginSurface(tess.shader, tess.fogNum ); +} + + +void RB_AddQuadStampExt( vec3_t origin, vec3_t left, vec3_t up, byte *color, float s1, float t1, float s2, float t2 ) +{ + + RB_CHECKOVERFLOW( 4, 6 ); + + const uint32_t ndx0 = tess.numVertexes; + const uint32_t ndx1 = ndx0 + 1; + const uint32_t ndx2 = ndx0 + 2; + const uint32_t ndx3 = ndx0 + 3; + + + // triangle indexes for a simple quad + tess.indexes[ tess.numIndexes ] = ndx0; + tess.indexes[ tess.numIndexes + 1 ] = ndx1; + tess.indexes[ tess.numIndexes + 2 ] = ndx3; + + tess.indexes[ tess.numIndexes + 3 ] = ndx3; + tess.indexes[ tess.numIndexes + 4 ] = ndx1; + tess.indexes[ tess.numIndexes + 5 ] = ndx2; + + tess.xyz[ndx0][0] = origin[0] + left[0] + up[0]; + tess.xyz[ndx0][1] = origin[1] + left[1] + up[1]; + tess.xyz[ndx0][2] = origin[2] + left[2] + up[2]; + + tess.xyz[ndx1][0] = origin[0] - left[0] + up[0]; + tess.xyz[ndx1][1] = origin[1] - left[1] + up[1]; + tess.xyz[ndx1][2] = origin[2] - left[2] + up[2]; + + tess.xyz[ndx2][0] = origin[0] - left[0] - up[0]; + tess.xyz[ndx2][1] = origin[1] - left[1] - up[1]; + tess.xyz[ndx2][2] = origin[2] - left[2] - up[2]; + + tess.xyz[ndx3][0] = origin[0] + left[0] - up[0]; + tess.xyz[ndx3][1] = origin[1] + left[1] - up[1]; + tess.xyz[ndx3][2] = origin[2] + left[2] - up[2]; + + + // constant normal all the way around + + vec3_t normal; + VectorSubtract( vec3_origin, backEnd.viewParms.or.axis[0], normal ); + tess.normal[ndx0][0] = tess.normal[ndx1][0] = tess.normal[ndx2][0] = tess.normal[ndx3][0] = normal[0]; + tess.normal[ndx0][1] = tess.normal[ndx1][1] = tess.normal[ndx2][1] = tess.normal[ndx3][1] = normal[1]; + tess.normal[ndx0][2] = tess.normal[ndx1][2] = tess.normal[ndx2][2] = tess.normal[ndx3][2] = normal[2]; + + // standard square texture coordinates + tess.texCoords[ndx0][0][0] = tess.texCoords[ndx0][1][0] = s1; + tess.texCoords[ndx0][0][1] = tess.texCoords[ndx0][1][1] = t1; + + tess.texCoords[ndx1][0][0] = tess.texCoords[ndx1][1][0] = s2; + tess.texCoords[ndx1][0][1] = tess.texCoords[ndx1][1][1] = t1; + + tess.texCoords[ndx2][0][0] = tess.texCoords[ndx2][1][0] = s2; + tess.texCoords[ndx2][0][1] = tess.texCoords[ndx2][1][1] = t2; + + tess.texCoords[ndx3][0][0] = tess.texCoords[ndx3][1][0] = s1; + tess.texCoords[ndx3][0][1] = tess.texCoords[ndx3][1][1] = t2; + + // constant color all the way around + // should this be identity and let the shader specify from entity? + // * ( unsigned int * ) &tess.vertexColors[ndx0] = + // * ( unsigned int * ) &tess.vertexColors[ndx1] = + // * ( unsigned int * ) &tess.vertexColors[ndx2] = + // * ( unsigned int * ) &tess.vertexColors[ndx3] = + // * ( unsigned int * )color; + + /* + tess.vertexColors[ndx0][0] = color[0]; + tess.vertexColors[ndx0][1] = color[1]; + tess.vertexColors[ndx0][2] = color[2]; + tess.vertexColors[ndx0][3] = color[3]; + + tess.vertexColors[ndx1][0] = color[0]; + tess.vertexColors[ndx1][1] = color[1]; + tess.vertexColors[ndx1][2] = color[2]; + tess.vertexColors[ndx1][3] = color[3]; + + tess.vertexColors[ndx2][0] = color[0]; + tess.vertexColors[ndx2][1] = color[1]; + tess.vertexColors[ndx2][2] = color[2]; + tess.vertexColors[ndx2][3] = color[3]; + + tess.vertexColors[ndx3][0] = color[0]; + tess.vertexColors[ndx3][1] = color[1]; + tess.vertexColors[ndx3][2] = color[2]; + tess.vertexColors[ndx3][3] = color[3]; + */ + memcpy(tess.vertexColors[ndx0], color, 4); + memcpy(tess.vertexColors[ndx1], color, 4); + memcpy(tess.vertexColors[ndx2], color, 4); + memcpy(tess.vertexColors[ndx3], color, 4); + + + tess.numVertexes += 4; + tess.numIndexes += 6; +} + + +void RB_AddQuadStamp( vec3_t origin, vec3_t left, vec3_t up, unsigned char *color ) +{ + RB_AddQuadStampExt( origin, left, up, color, 0, 0, 1, 1 ); +} + +static void RB_SurfaceSprite( void ) +{ + vec3_t left, up; + + // calculate the xyz locations for the four corners + float radius = backEnd.currentEntity->e.radius; + if ( backEnd.currentEntity->e.rotation == 0 ) + { + VectorScale( backEnd.viewParms.or.axis[1], radius, left ); + VectorScale( backEnd.viewParms.or.axis[2], radius, up ); + } + else + { + float ang = (M_PI / 180)* backEnd.currentEntity->e.rotation ; + float s = sin( ang ); + float c = cos( ang ); + + VectorScale( backEnd.viewParms.or.axis[1], c * radius, left ); + VectorMA( left, -s * radius, backEnd.viewParms.or.axis[2], left ); + + VectorScale( backEnd.viewParms.or.axis[2], c * radius, up ); + VectorMA( up, s * radius, backEnd.viewParms.or.axis[1], up ); + } + if ( backEnd.viewParms.isMirror ) { + VectorSubtract( vec3_origin, left, left ); + } + + RB_AddQuadStamp( backEnd.currentEntity->e.origin, left, up, backEnd.currentEntity->e.shaderRGBA ); +} + + +/* +============= +RB_SurfacePolychain +============= +*/ +void RB_SurfacePolychain( srfPoly_t *p ) { + int i; + + RB_CHECKOVERFLOW( p->numVerts, 3*(p->numVerts - 2) ); + + // fan triangles into the tess array + uint32_t numv = tess.numVertexes; + for ( i = 0; i < p->numVerts; i++ ) + { + VectorCopy( p->verts[i].xyz, tess.xyz[numv] ); + tess.texCoords[numv][0][0] = p->verts[i].st[0]; + tess.texCoords[numv][0][1] = p->verts[i].st[1]; + // *(int *)&tess.vertexColors[numv] = *(int *)p->verts[ i ].modulate; + memcpy(tess.vertexColors[numv], p->verts[ i ].modulate, 4); + + numv++; + } + + // generate fan indexes into the tess array + for ( i = 0; i < p->numVerts-2; i++ ) + { + tess.indexes[tess.numIndexes + 0] = tess.numVertexes; + tess.indexes[tess.numIndexes + 1] = tess.numVertexes + i + 1; + tess.indexes[tess.numIndexes + 2] = tess.numVertexes + i + 2; + tess.numIndexes += 3; + } + + tess.numVertexes = numv; +} + + +/* +============= +RB_SurfaceTriangles +============= +*/ +void RB_SurfaceTriangles( srfTriangles_t *srf ) { + int i; + drawVert_t *dv; + float *xyz, *normal, *texCoords; + byte *color; + int dlightBits; + qboolean needsNormal; + + dlightBits = srf->dlightBits; + tess.dlightBits |= dlightBits; + + RB_CHECKOVERFLOW( srf->numVerts, srf->numIndexes ); + + for ( i = 0 ; i < srf->numIndexes ; i += 3 ) { + tess.indexes[ tess.numIndexes + i + 0 ] = tess.numVertexes + srf->indexes[ i + 0 ]; + tess.indexes[ tess.numIndexes + i + 1 ] = tess.numVertexes + srf->indexes[ i + 1 ]; + tess.indexes[ tess.numIndexes + i + 2 ] = tess.numVertexes + srf->indexes[ i + 2 ]; + } + tess.numIndexes += srf->numIndexes; + + dv = srf->verts; + xyz = tess.xyz[ tess.numVertexes ]; + normal = tess.normal[ tess.numVertexes ]; + texCoords = tess.texCoords[ tess.numVertexes ][0]; + color = tess.vertexColors[ tess.numVertexes ]; + needsNormal = tess.shader->needsNormal; + + for ( i = 0 ; i < srf->numVerts ; i++, dv++, xyz += 4, normal += 4, texCoords += 4, color += 4 ) { + xyz[0] = dv->xyz[0]; + xyz[1] = dv->xyz[1]; + xyz[2] = dv->xyz[2]; + + if ( needsNormal ) { + normal[0] = dv->normal[0]; + normal[1] = dv->normal[1]; + normal[2] = dv->normal[2]; + } + + texCoords[0] = dv->st[0]; + texCoords[1] = dv->st[1]; + + texCoords[2] = dv->lightmap[0]; + texCoords[3] = dv->lightmap[1]; + + // *(int *)color = *(int *)dv->color; + memcpy(color, dv->color, 4); + } + + for ( i = 0 ; i < srf->numVerts ; i++ ) { + tess.vertexDlightBits[ tess.numVertexes + i] = dlightBits; + } + + tess.numVertexes += srf->numVerts; +} + + +void RB_SurfaceBeam( void ) +{ +#define NUM_BEAM_SEGS 6 + refEntity_t *e; + vec3_t direction, normalized_direction; + vec3_t oldorigin, origin; + + e = &backEnd.currentEntity->e; + + oldorigin[0] = e->oldorigin[0]; + oldorigin[1] = e->oldorigin[1]; + oldorigin[2] = e->oldorigin[2]; + + origin[0] = e->origin[0]; + origin[1] = e->origin[1]; + origin[2] = e->origin[2]; + + normalized_direction[0] = direction[0] = oldorigin[0] - origin[0]; + normalized_direction[1] = direction[1] = oldorigin[1] - origin[1]; + normalized_direction[2] = direction[2] = oldorigin[2] - origin[2]; + + if ( VectorNormalize( normalized_direction ) == 0 ) + return; + + + ri.Printf(PRINT_ALL, "RB_SurfaceBeam()? "); +} + +//================================================================================ + +static void DoRailCore( const vec3_t start, const vec3_t end, const vec3_t up, float len, float spanWidth ) +{ + float spanWidth2; + int vbase; + float t = len / 256.0f; + + vbase = tess.numVertexes; + + spanWidth2 = -spanWidth; + + // FIXME: use quad stamp? + VectorMA( start, spanWidth, up, tess.xyz[tess.numVertexes] ); + tess.texCoords[tess.numVertexes][0][0] = 0; + tess.texCoords[tess.numVertexes][0][1] = 0; + tess.vertexColors[tess.numVertexes][0] = backEnd.currentEntity->e.shaderRGBA[0] * 0.25; + tess.vertexColors[tess.numVertexes][1] = backEnd.currentEntity->e.shaderRGBA[1] * 0.25; + tess.vertexColors[tess.numVertexes][2] = backEnd.currentEntity->e.shaderRGBA[2] * 0.25; + tess.numVertexes++; + + VectorMA( start, spanWidth2, up, tess.xyz[tess.numVertexes] ); + tess.texCoords[tess.numVertexes][0][0] = 0; + tess.texCoords[tess.numVertexes][0][1] = 1; + tess.vertexColors[tess.numVertexes][0] = backEnd.currentEntity->e.shaderRGBA[0]; + tess.vertexColors[tess.numVertexes][1] = backEnd.currentEntity->e.shaderRGBA[1]; + tess.vertexColors[tess.numVertexes][2] = backEnd.currentEntity->e.shaderRGBA[2]; + tess.numVertexes++; + + VectorMA( end, spanWidth, up, tess.xyz[tess.numVertexes] ); + + tess.texCoords[tess.numVertexes][0][0] = t; + tess.texCoords[tess.numVertexes][0][1] = 0; + tess.vertexColors[tess.numVertexes][0] = backEnd.currentEntity->e.shaderRGBA[0]; + tess.vertexColors[tess.numVertexes][1] = backEnd.currentEntity->e.shaderRGBA[1]; + tess.vertexColors[tess.numVertexes][2] = backEnd.currentEntity->e.shaderRGBA[2]; + tess.numVertexes++; + + VectorMA( end, spanWidth2, up, tess.xyz[tess.numVertexes] ); + tess.texCoords[tess.numVertexes][0][0] = t; + tess.texCoords[tess.numVertexes][0][1] = 1; + tess.vertexColors[tess.numVertexes][0] = backEnd.currentEntity->e.shaderRGBA[0]; + tess.vertexColors[tess.numVertexes][1] = backEnd.currentEntity->e.shaderRGBA[1]; + tess.vertexColors[tess.numVertexes][2] = backEnd.currentEntity->e.shaderRGBA[2]; + tess.numVertexes++; + + tess.indexes[tess.numIndexes++] = vbase; + tess.indexes[tess.numIndexes++] = vbase + 1; + tess.indexes[tess.numIndexes++] = vbase + 2; + + tess.indexes[tess.numIndexes++] = vbase + 2; + tess.indexes[tess.numIndexes++] = vbase + 1; + tess.indexes[tess.numIndexes++] = vbase + 3; +} + +static void DoRailDiscs( int numSegs, const vec3_t start, const vec3_t dir, const vec3_t right, const vec3_t up ) +{ + int i; + vec3_t pos[4]; + vec3_t v; + int spanWidth = r_railWidth->integer; + float c, s; + float scale; + + if ( numSegs > 1 ) + numSegs--; + if ( !numSegs ) + return; + + scale = 0.25; + + for ( i = 0; i < 4; i++ ) + { + c = cos( DEG2RAD( 45 + i * 90 ) ); + s = sin( DEG2RAD( 45 + i * 90 ) ); + v[0] = ( right[0] * c + up[0] * s ) * scale * spanWidth; + v[1] = ( right[1] * c + up[1] * s ) * scale * spanWidth; + v[2] = ( right[2] * c + up[2] * s ) * scale * spanWidth; + VectorAdd( start, v, pos[i] ); + + if ( numSegs > 1 ) + { + // offset by 1 segment if we're doing a long distance shot + VectorAdd( pos[i], dir, pos[i] ); + } + } + + for ( i = 0; i < numSegs; i++ ) + { + int j; + + RB_CHECKOVERFLOW( 4, 6 ); + + for ( j = 0; j < 4; j++ ) + { + VectorCopy( pos[j], tess.xyz[tess.numVertexes] ); + tess.texCoords[tess.numVertexes][0][0] = ( j < 2 ); + tess.texCoords[tess.numVertexes][0][1] = ( j && j != 3 ); + tess.vertexColors[tess.numVertexes][0] = backEnd.currentEntity->e.shaderRGBA[0]; + tess.vertexColors[tess.numVertexes][1] = backEnd.currentEntity->e.shaderRGBA[1]; + tess.vertexColors[tess.numVertexes][2] = backEnd.currentEntity->e.shaderRGBA[2]; + tess.numVertexes++; + + VectorAdd( pos[j], dir, pos[j] ); + } + + tess.indexes[tess.numIndexes++] = tess.numVertexes - 4 + 0; + tess.indexes[tess.numIndexes++] = tess.numVertexes - 4 + 1; + tess.indexes[tess.numIndexes++] = tess.numVertexes - 4 + 3; + tess.indexes[tess.numIndexes++] = tess.numVertexes - 4 + 3; + tess.indexes[tess.numIndexes++] = tess.numVertexes - 4 + 1; + tess.indexes[tess.numIndexes++] = tess.numVertexes - 4 + 2; + } +} + +/* +** RB_SurfaceRailRinges +*/ +void RB_SurfaceRailRings( void ) { + refEntity_t *e; + int numSegs; + int len; + vec3_t vec; + vec3_t right, up; + vec3_t start, end; + + e = &backEnd.currentEntity->e; + + VectorCopy( e->oldorigin, start ); + VectorCopy( e->origin, end ); + + // compute variables + VectorSubtract( end, start, vec ); + len = VectorNormalize( vec ); + MakeTwoPerpVectors( vec, right, up ); + numSegs = ( len ) / r_railSegmentLength->value; + if ( numSegs <= 0 ) { + numSegs = 1; + } + + VectorScale( vec, r_railSegmentLength->value, vec ); + + DoRailDiscs( numSegs, start, vec, right, up ); +} + +/* +** RB_SurfaceRailCore +*/ +void RB_SurfaceRailCore( void ) { + refEntity_t *e; + int len; + vec3_t right; + vec3_t vec; + vec3_t start, end; + vec3_t v1, v2; + + e = &backEnd.currentEntity->e; + + VectorCopy( e->oldorigin, start ); + VectorCopy( e->origin, end ); + + VectorSubtract( end, start, vec ); + len = VectorNormalize( vec ); + + // compute side vector + VectorSubtract( start, backEnd.viewParms.or.origin, v1 ); + VectorNormalize( v1 ); + VectorSubtract( end, backEnd.viewParms.or.origin, v2 ); + VectorNormalize( v2 ); + CrossProduct( v1, v2, right ); + VectorNormalize( right ); + + DoRailCore( start, end, right, len, r_railCoreWidth->integer ); +} + +/* +** RB_SurfaceLightningBolt +*/ +void RB_SurfaceLightningBolt( void ) { + refEntity_t *e; + int len; + vec3_t right; + vec3_t vec; + vec3_t start, end; + vec3_t v1, v2; + int i; + + e = &backEnd.currentEntity->e; + + VectorCopy( e->oldorigin, end ); + VectorCopy( e->origin, start ); + + // compute variables + VectorSubtract( end, start, vec ); + len = VectorNormalize( vec ); + + // compute side vector + VectorSubtract( start, backEnd.viewParms.or.origin, v1 ); + VectorNormalize( v1 ); + VectorSubtract( end, backEnd.viewParms.or.origin, v2 ); + VectorNormalize( v2 ); + CrossProduct( v1, v2, right ); + VectorNormalize( right ); + + for ( i = 0 ; i < 4 ; i++ ) { + vec3_t temp; + + DoRailCore( start, end, right, len, 8 ); + RotatePointAroundVector( temp, vec, right, 45 ); + VectorCopy( temp, right ); + } +} + +/* +** VectorArrayNormalize +* +* The inputs to this routing seem to always be close to length = 1.0 (about 0.6 to 2.0) +* This means that we don't have to worry about zero length or enormously long vectors. +*/ +static void VectorArrayNormalize(vec4_t *normals, unsigned int count) +{ +// assert(count); + +#if idppc + { + register float half = 0.5; + register float one = 1.0; + float *components = (float *)normals; + + // Vanilla PPC code, but since PPC has a reciprocal square root estimate instruction, + // runs *much* faster than calling sqrt(). We'll use a single Newton-Raphson + // refinement step to get a little more precision. This seems to yeild results + // that are correct to 3 decimal places and usually correct to at least 4 (sometimes 5). + // (That is, for the given input range of about 0.6 to 2.0). + do { + float x, y, z; + float B, y0, y1; + + x = components[0]; + y = components[1]; + z = components[2]; + components += 4; + B = x*x + y*y + z*z; + +#ifdef __GNUC__ + asm("frsqrte %0,%1" : "=f" (y0) : "f" (B)); +#else + y0 = __frsqrte(B); +#endif + y1 = y0 + half*y0*(one - B*y0*y0); + + x = x * y1; + y = y * y1; + components[-4] = x; + z = z * y1; + components[-3] = y; + components[-2] = z; + } while(count--); + } +#else // No assembly version for this architecture, or C_ONLY defined + // given the input, it's safe to call VectorNormalizeFast + while (count--) { + VectorNorm(normals[0]); + normals++; + } +#endif + +} + + +static void LerpMeshVertexes (md3Surface_t *surf, float backlerp) +{ + short *oldXyz, *newXyz, *oldNormals, *newNormals; + float *outXyz, *outNormal; + float oldXyzScale, newXyzScale; + float oldNormalScale, newNormalScale; + int vertNum; + unsigned lat, lng; + int numVerts; + + outXyz = tess.xyz[tess.numVertexes]; + outNormal = tess.normal[tess.numVertexes]; + + newXyz = (short *)((byte *)surf + surf->ofsXyzNormals) + + (backEnd.currentEntity->e.frame * surf->numVerts * 4); + newNormals = newXyz + 3; + + newXyzScale = MD3_XYZ_SCALE * (1.0 - backlerp); + newNormalScale = 1.0 - backlerp; + + numVerts = surf->numVerts; + + if ( backlerp == 0 ) { + // + // just copy the vertexes + // + for (vertNum=0 ; vertNum < numVerts ; vertNum++, + newXyz += 4, newNormals += 4, + outXyz += 4, outNormal += 4) + { + + outXyz[0] = newXyz[0] * newXyzScale; + outXyz[1] = newXyz[1] * newXyzScale; + outXyz[2] = newXyz[2] * newXyzScale; + + lat = ( newNormals[0] >> 8 ) & 0xff; + lng = ( newNormals[0] & 0xff ); + lat *= (FUNCTABLE_SIZE/256); + lng *= (FUNCTABLE_SIZE/256); + + // decode X as cos( lat ) * sin( long ) + // decode Y as sin( lat ) * sin( long ) + // decode Z as cos( long ) + + outNormal[0] = tr.sinTable[(lat+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK] * tr.sinTable[lng]; + outNormal[1] = tr.sinTable[lat] * tr.sinTable[lng]; + outNormal[2] = tr.sinTable[(lng+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK]; + } + } else { + // + // interpolate and copy the vertex and normal + // + oldXyz = (short *)((byte *)surf + surf->ofsXyzNormals) + + (backEnd.currentEntity->e.oldframe * surf->numVerts * 4); + oldNormals = oldXyz + 3; + + oldXyzScale = MD3_XYZ_SCALE * backlerp; + oldNormalScale = backlerp; + + for (vertNum=0 ; vertNum < numVerts ; vertNum++, + oldXyz += 4, newXyz += 4, oldNormals += 4, newNormals += 4, + outXyz += 4, outNormal += 4) + { + vec3_t uncompressedOldNormal, uncompressedNewNormal; + + // interpolate the xyz + outXyz[0] = oldXyz[0] * oldXyzScale + newXyz[0] * newXyzScale; + outXyz[1] = oldXyz[1] * oldXyzScale + newXyz[1] * newXyzScale; + outXyz[2] = oldXyz[2] * oldXyzScale + newXyz[2] * newXyzScale; + + // FIXME: interpolate lat/long instead? + lat = ( newNormals[0] >> 8 ) & 0xff; + lng = ( newNormals[0] & 0xff ); + lat *= 4; + lng *= 4; + uncompressedNewNormal[0] = tr.sinTable[(lat+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK] * tr.sinTable[lng]; + uncompressedNewNormal[1] = tr.sinTable[lat] * tr.sinTable[lng]; + uncompressedNewNormal[2] = tr.sinTable[(lng+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK]; + + lat = ( oldNormals[0] >> 8 ) & 0xff; + lng = ( oldNormals[0] & 0xff ); + lat *= 4; + lng *= 4; + + uncompressedOldNormal[0] = tr.sinTable[(lat+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK] * tr.sinTable[lng]; + uncompressedOldNormal[1] = tr.sinTable[lat] * tr.sinTable[lng]; + uncompressedOldNormal[2] = tr.sinTable[(lng+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK]; + + outNormal[0] = uncompressedOldNormal[0] * oldNormalScale + uncompressedNewNormal[0] * newNormalScale; + outNormal[1] = uncompressedOldNormal[1] * oldNormalScale + uncompressedNewNormal[1] * newNormalScale; + outNormal[2] = uncompressedOldNormal[2] * oldNormalScale + uncompressedNewNormal[2] * newNormalScale; + + } + VectorArrayNormalize((vec4_t *)tess.normal[tess.numVertexes], numVerts); + } +} + +/* +============= +RB_SurfaceMesh +============= +*/ +void RB_SurfaceMesh(md3Surface_t *surface) { + int j; + float backlerp; + int *triangles; + float *texCoords; + int indexes; + int Bob, Doug; + int numVerts; + + if ( backEnd.currentEntity->e.oldframe == backEnd.currentEntity->e.frame ) { + backlerp = 0; + } else { + backlerp = backEnd.currentEntity->e.backlerp; + } + + RB_CHECKOVERFLOW( surface->numVerts, surface->numTriangles*3 ); + + LerpMeshVertexes (surface, backlerp); + + triangles = (int *) ((byte *)surface + surface->ofsTriangles); + indexes = surface->numTriangles * 3; + Bob = tess.numIndexes; + Doug = tess.numVertexes; + for (j = 0 ; j < indexes ; j++) { + tess.indexes[Bob + j] = Doug + triangles[j]; + } + tess.numIndexes += indexes; + + texCoords = (float *) ((byte *)surface + surface->ofsSt); + + numVerts = surface->numVerts; + for ( j = 0; j < numVerts; j++ ) { + tess.texCoords[Doug + j][0][0] = texCoords[j*2+0]; + tess.texCoords[Doug + j][0][1] = texCoords[j*2+1]; + // FIXME: fill in lightmapST for completeness? + } + + tess.numVertexes += surface->numVerts; + +} + + +/* +============== +RB_SurfaceFace +============== +*/ +void RB_SurfaceFace( srfSurfaceFace_t *surf ) { + int i; + unsigned *indices, *tessIndexes; + float *v; + float *normal; + int ndx; + int Bob; + int numPoints; + int dlightBits; + + RB_CHECKOVERFLOW( surf->numPoints, surf->numIndices ); + + dlightBits = surf->dlightBits; + tess.dlightBits |= dlightBits; + + indices = ( unsigned * ) ( ( ( char * ) surf ) + surf->ofsIndices ); + + Bob = tess.numVertexes; + tessIndexes = tess.indexes + tess.numIndexes; + for ( i = surf->numIndices-1 ; i >= 0 ; i-- ) { + tessIndexes[i] = indices[i] + Bob; + } + + tess.numIndexes += surf->numIndices; + + v = surf->points[0]; + + ndx = tess.numVertexes; + + numPoints = surf->numPoints; + + if ( tess.shader->needsNormal ) { + normal = surf->plane.normal; + for ( i = 0, ndx = tess.numVertexes; i < numPoints; i++, ndx++ ) { + VectorCopy( normal, tess.normal[ndx] ); + } + } + + for ( i = 0, v = surf->points[0], ndx = tess.numVertexes; i < numPoints; i++, v += VERTEXSIZE, ndx++ ) { + VectorCopy( v, tess.xyz[ndx]); + tess.texCoords[ndx][0][0] = v[3]; + tess.texCoords[ndx][0][1] = v[4]; + tess.texCoords[ndx][1][0] = v[5]; + tess.texCoords[ndx][1][1] = v[6]; + //* ( unsigned int * ) &tess.vertexColors[ndx] = * ( unsigned int * ) &v[7]; + memcpy(tess.vertexColors[ndx], &v[7], 4); + tess.vertexDlightBits[ndx] = dlightBits; + } + + + tess.numVertexes += surf->numPoints; +} + + +static float LodErrorForVolume( vec3_t local, float radius ) { + vec3_t world; + float d; + + // never let it go negative + if ( r_lodCurveError->value < 0 ) { + return 0; + } + + world[0] = local[0] * backEnd.or.axis[0][0] + local[1] * backEnd.or.axis[1][0] + + local[2] * backEnd.or.axis[2][0] + backEnd.or.origin[0]; + world[1] = local[0] * backEnd.or.axis[0][1] + local[1] * backEnd.or.axis[1][1] + + local[2] * backEnd.or.axis[2][1] + backEnd.or.origin[1]; + world[2] = local[0] * backEnd.or.axis[0][2] + local[1] * backEnd.or.axis[1][2] + + local[2] * backEnd.or.axis[2][2] + backEnd.or.origin[2]; + + VectorSubtract( world, backEnd.viewParms.or.origin, world ); + d = DotProduct( world, backEnd.viewParms.or.axis[0] ); + + if ( d < 0 ) { + d = -d; + } + d -= radius; + if ( d < 1 ) { + d = 1; + } + + return r_lodCurveError->value / d; +} + +/* +============= +RB_SurfaceGrid + +Just copy the grid of points and triangulate +============= +*/ +void RB_SurfaceGrid( srfGridMesh_t *cv ) { + int i, j; + float *xyz; + float *texCoords; + float *normal; + unsigned char *color; + drawVert_t *dv; + int rows, irows, vrows; + int used; + int widthTable[MAX_GRID_SIZE]; + int heightTable[MAX_GRID_SIZE]; + float lodError; + int lodWidth, lodHeight; + int numVertexes; + int dlightBits; + int *vDlightBits; + qboolean needsNormal; + + dlightBits = cv->dlightBits; + tess.dlightBits |= dlightBits; + + // determine the allowable discrepance + lodError = LodErrorForVolume( cv->lodOrigin, cv->lodRadius ); + + // determine which rows and columns of the subdivision + // we are actually going to use + widthTable[0] = 0; + lodWidth = 1; + for ( i = 1 ; i < cv->width-1 ; i++ ) { + if ( cv->widthLodError[i] <= lodError ) { + widthTable[lodWidth] = i; + lodWidth++; + } + } + widthTable[lodWidth] = cv->width-1; + lodWidth++; + + heightTable[0] = 0; + lodHeight = 1; + for ( i = 1 ; i < cv->height-1 ; i++ ) { + if ( cv->heightLodError[i] <= lodError ) { + heightTable[lodHeight] = i; + lodHeight++; + } + } + heightTable[lodHeight] = cv->height-1; + lodHeight++; + + + // very large grids may have more points or indexes than can be fit + // in the tess structure, so we may have to issue it in multiple passes + + used = 0; + rows = 0; + while ( used < lodHeight - 1 ) { + // see how many rows of both verts and indexes we can add without overflowing + do { + vrows = ( SHADER_MAX_VERTEXES - tess.numVertexes ) / lodWidth; + irows = ( SHADER_MAX_INDEXES - tess.numIndexes ) / ( lodWidth * 6 ); + + // if we don't have enough space for at least one strip, flush the buffer + if ( vrows < 2 || irows < 1 ) { + RB_EndSurface(); + RB_BeginSurface(tess.shader, tess.fogNum ); + } else { + break; + } + } while ( 1 ); + + rows = irows; + if ( vrows < irows + 1 ) { + rows = vrows - 1; + } + if ( used + rows > lodHeight ) { + rows = lodHeight - used; + } + + numVertexes = tess.numVertexes; + + xyz = tess.xyz[numVertexes]; + normal = tess.normal[numVertexes]; + texCoords = tess.texCoords[numVertexes][0]; + color = ( unsigned char * ) &tess.vertexColors[numVertexes]; + vDlightBits = &tess.vertexDlightBits[numVertexes]; + needsNormal = tess.shader->needsNormal; + + for ( i = 0 ; i < rows ; i++ ) { + for ( j = 0 ; j < lodWidth ; j++ ) { + dv = cv->verts + heightTable[ used + i ] * cv->width + + widthTable[ j ]; + + xyz[0] = dv->xyz[0]; + xyz[1] = dv->xyz[1]; + xyz[2] = dv->xyz[2]; + texCoords[0] = dv->st[0]; + texCoords[1] = dv->st[1]; + texCoords[2] = dv->lightmap[0]; + texCoords[3] = dv->lightmap[1]; + if ( needsNormal ) { + normal[0] = dv->normal[0]; + normal[1] = dv->normal[1]; + normal[2] = dv->normal[2]; + } + //* ( unsigned int * ) color = * ( unsigned int * ) dv->color; + memcpy(color, dv->color, 4); + *vDlightBits++ = dlightBits; + xyz += 4; + normal += 4; + texCoords += 4; + color += 4; + } + } + + + // add the indexes + { + int numIndexes; + int w, h; + + h = rows - 1; + w = lodWidth - 1; + numIndexes = tess.numIndexes; + for (i = 0 ; i < h ; i++) { + for (j = 0 ; j < w ; j++) { + int v1, v2, v3, v4; + + // vertex order to be reckognized as tristrips + v1 = numVertexes + i*lodWidth + j + 1; + v2 = v1 - 1; + v3 = v2 + lodWidth; + v4 = v3 + 1; + + tess.indexes[numIndexes] = v2; + tess.indexes[numIndexes+1] = v3; + tess.indexes[numIndexes+2] = v1; + + tess.indexes[numIndexes+3] = v1; + tess.indexes[numIndexes+4] = v3; + tess.indexes[numIndexes+5] = v4; + numIndexes += 6; + } + } + + tess.numIndexes = numIndexes; + } + + tess.numVertexes += rows * lodWidth; + + used += rows - 1; + } +} + + +/* +=========================================================================== + +NULL MODEL + +=========================================================================== +*/ + +/* +=================== +RB_SurfaceAxis + +Draws x/y/z lines from the origin for orientation debugging +=================== +*/ +void RB_SurfaceAxis( void ) +{ + // FIXME: implement this + // VK_Bind( tr.whiteImage ); + ri.Printf( PRINT_ALL, "RB_SurfaceAxis() haven't been implemented. \n" ); + +} + +//=========================================================================== + +/* +==================== +RB_SurfaceEntity + +Entities that have a single procedurally generated surface +==================== +*/ +void RB_SurfaceEntity( surfaceType_t *surfType ) +{ + switch( backEnd.currentEntity->e.reType ) { + case RT_SPRITE: + RB_SurfaceSprite(); + break; + case RT_BEAM: + RB_SurfaceBeam(); + break; + case RT_RAIL_CORE: + RB_SurfaceRailCore(); + break; + case RT_RAIL_RINGS: + RB_SurfaceRailRings(); + break; + case RT_LIGHTNING: + RB_SurfaceLightningBolt(); + break; + default: + RB_SurfaceAxis(); + break; + } + return; +} + +void RB_SurfaceBad( surfaceType_t *surfType ) { + ri.Printf( PRINT_ALL, "Bad surface tesselated.\n" ); +} + +void RB_SurfaceSkip( void *surf ) { +} + + + +void (*rb_surfaceTable[SF_NUM_SURFACE_TYPES])(void * ) = { + (void (* )(void* ))RB_SurfaceBad, // SF_BAD, + (void (* )(void* ))RB_SurfaceSkip, // SF_SKIP, + (void (* )(void* ))RB_SurfaceFace, // SF_FACE, + (void (* )(void* ))RB_SurfaceGrid, // SF_GRID, + (void (* )(void* ))RB_SurfaceTriangles, // SF_TRIANGLES, + (void (* )(void* ))RB_SurfacePolychain, // SF_POLY, + (void (* )(void* ))RB_SurfaceMesh, // SF_MD3, + (void (* )(void* ))RB_MDRSurfaceAnim, // SF_MDR, + (void (* )(void* ))RB_IQMSurfaceAnim, // SF_IQM, + (void (* )(void* ))RB_SurfaceFlare, // SF_FLARE, + (void (* )(void* ))RB_SurfaceEntity, // SF_ENTITY +}; diff --git a/code/renderer_vulkan/tr_world.c b/code/renderer_vulkan/tr_world.c new file mode 100644 index 0000000000..1af8144890 --- /dev/null +++ b/code/renderer_vulkan/tr_world.c @@ -0,0 +1,665 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Foobar; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ +#include "tr_local.h" +#include "tr_globals.h" +#include "tr_cvar.h" +#include "ref_import.h" +#include "tr_light.h" + +/* +================= +R_CullTriSurf + +Returns true if the grid is completely culled away. +Also sets the clipped hint bit in tess +================= +*/ +static qboolean R_CullTriSurf( srfTriangles_t *cv ) +{ + int boxCull = R_CullLocalBox( cv->bounds ); + + if ( boxCull == CULL_OUT ) { + return qtrue; + } + return qfalse; +} + +/* +================= +R_CullGrid + +Returns true if the grid is completely culled away. +Also sets the clipped hint bit in tess +================= +*/ +static qboolean R_CullGrid( srfGridMesh_t *cv ) { + int boxCull; + int sphereCull; + + if ( r_nocurves->integer ) { + return qtrue; + } + + if ( tr.currentEntityNum != REFENTITYNUM_WORLD ) { + sphereCull = R_CullLocalPointAndRadius( cv->localOrigin, cv->meshRadius ); + } else { + sphereCull = R_CullPointAndRadius( cv->localOrigin, cv->meshRadius ); + } + boxCull = CULL_OUT; + + // check for trivial reject + if ( sphereCull == CULL_OUT ) + { + tr.pc.c_sphere_cull_patch_out++; + return qtrue; + } + // check bounding box if necessary + else if ( sphereCull == CULL_CLIP ) + { + tr.pc.c_sphere_cull_patch_clip++; + + boxCull = R_CullLocalBox( cv->meshBounds ); + + if ( boxCull == CULL_OUT ) + { + tr.pc.c_box_cull_patch_out++; + return qtrue; + } + else if ( boxCull == CULL_IN ) + { + tr.pc.c_box_cull_patch_in++; + } + else + { + tr.pc.c_box_cull_patch_clip++; + } + } + else + { + tr.pc.c_sphere_cull_patch_in++; + } + + return qfalse; +} + + +/* +================ +R_CullSurface + +Tries to back face cull surfaces before they are lighted or +added to the sorting list. + +This will also allow mirrors on both sides of a model without recursion. +================ +*/ +static qboolean R_CullSurface( surfaceType_t *surface, shader_t *shader ) { + srfSurfaceFace_t *sface; + if ( r_nocull->integer ) { + return qfalse; + } + + if ( *surface == SF_GRID ) { + return R_CullGrid( (srfGridMesh_t *)surface ); + } + + if ( *surface == SF_TRIANGLES ) { + return R_CullTriSurf( (srfTriangles_t *)surface ); + } + + if ( *surface != SF_FACE ) { + return qfalse; + } + + if ( shader->cullType == CT_TWO_SIDED ) { + return qfalse; + } + + // face culling + if ( !r_facePlaneCull->integer ) { + return qfalse; + } + + sface = ( srfSurfaceFace_t * ) surface; + float d = DotProduct (tr.or.viewOrigin, sface->plane.normal); + + // don't cull exactly on the plane, because there are levels of rounding + // through the BSP, ICD, and hardware that may cause pixel gaps if an + // epsilon isn't allowed here + if ( shader->cullType == CT_FRONT_SIDED ) { + if ( d < sface->plane.dist - 8 ) { + return qtrue; + } + } else { + if ( d > sface->plane.dist + 8 ) { + return qtrue; + } + } + + return qfalse; +} + + +static int R_DlightFace( srfSurfaceFace_t *face, int dlightBits ) { + float d; + int i; + dlight_t *dl; + + for ( i = 0 ; i < tr.refdef.num_dlights ; i++ ) { + if ( ! ( dlightBits & ( 1 << i ) ) ) { + continue; + } + dl = &tr.refdef.dlights[i]; + d = DotProduct( dl->origin, face->plane.normal ) - face->plane.dist; + if ( d < -dl->radius || d > dl->radius ) { + // dlight doesn't reach the plane + dlightBits &= ~( 1 << i ); + } + } + + if ( !dlightBits ) { + tr.pc.c_dlightSurfacesCulled++; + } + + face->dlightBits = dlightBits; + return dlightBits; +} + +static int R_DlightGrid( srfGridMesh_t *grid, int dlightBits ) { + int i; + dlight_t *dl; + + for ( i = 0 ; i < tr.refdef.num_dlights ; i++ ) { + if ( ! ( dlightBits & ( 1 << i ) ) ) { + continue; + } + dl = &tr.refdef.dlights[i]; + if ( dl->origin[0] - dl->radius > grid->meshBounds[1][0] + || dl->origin[0] + dl->radius < grid->meshBounds[0][0] + || dl->origin[1] - dl->radius > grid->meshBounds[1][1] + || dl->origin[1] + dl->radius < grid->meshBounds[0][1] + || dl->origin[2] - dl->radius > grid->meshBounds[1][2] + || dl->origin[2] + dl->radius < grid->meshBounds[0][2] ) { + // dlight doesn't reach the bounds + dlightBits &= ~( 1 << i ); + } + } + + if ( !dlightBits ) { + tr.pc.c_dlightSurfacesCulled++; + } + + grid->dlightBits = dlightBits; + return dlightBits; +} + + +static int R_DlightTrisurf( srfTriangles_t *surf, int dlightBits ) { + // FIXME: more dlight culling to trisurfs... + surf->dlightBits = dlightBits; + return dlightBits; +#if 0 + int i; + dlight_t *dl; + + for ( i = 0 ; i < tr.refdef.num_dlights ; i++ ) { + if ( ! ( dlightBits & ( 1 << i ) ) ) { + continue; + } + dl = &tr.refdef.dlights[i]; + if ( dl->origin[0] - dl->radius > grid->meshBounds[1][0] + || dl->origin[0] + dl->radius < grid->meshBounds[0][0] + || dl->origin[1] - dl->radius > grid->meshBounds[1][1] + || dl->origin[1] + dl->radius < grid->meshBounds[0][1] + || dl->origin[2] - dl->radius > grid->meshBounds[1][2] + || dl->origin[2] + dl->radius < grid->meshBounds[0][2] ) { + // dlight doesn't reach the bounds + dlightBits &= ~( 1 << i ); + } + } + + if ( !dlightBits ) { + tr.pc.c_dlightSurfacesCulled++; + } + + grid->dlightBits = dlightBits; + return dlightBits; +#endif +} + +/* +==================== +R_DlightSurface + +The given surface is going to be drawn, and it touches a leaf +that is touched by one or more dlights, so try to throw out +more dlights if possible. +==================== +*/ +static int R_DlightSurface( msurface_t *surf, int dlightBits ) { + if ( *surf->data == SF_FACE ) { + dlightBits = R_DlightFace( (srfSurfaceFace_t *)surf->data, dlightBits ); + } else if ( *surf->data == SF_GRID ) { + dlightBits = R_DlightGrid( (srfGridMesh_t *)surf->data, dlightBits ); + } else if ( *surf->data == SF_TRIANGLES ) { + dlightBits = R_DlightTrisurf( (srfTriangles_t *)surf->data, dlightBits ); + } else { + dlightBits = 0; + } + + if ( dlightBits ) { + tr.pc.c_dlightSurfaces++; + } + + return dlightBits; +} + + + +/* +====================== +R_AddWorldSurface +====================== +*/ +static void R_AddWorldSurface( msurface_t *surf, int dlightBits ) +{ + if ( surf->viewCount == tr.viewCount ) { + return; // already in this view + } + + surf->viewCount = tr.viewCount; + // FIXME: bmodel fog? + + // try to cull before dlighting or adding + if ( R_CullSurface( surf->data, surf->shader ) ) { + return; + } + + // check for dlighting + if ( dlightBits ) { + dlightBits = R_DlightSurface( surf, dlightBits ); + dlightBits = ( dlightBits != 0 ); + } + + R_AddDrawSurf( surf->data, surf->shader, surf->fogIndex, dlightBits ); +} + +/* +============================================================= + + BRUSH MODELS + +============================================================= +*/ + +/* +================= +R_AddBrushModelSurfaces +================= +*/ +void R_AddBrushModelSurfaces ( trRefEntity_t *ent ) { + bmodel_t *bmodel; + int clip; + model_t *pModel; + int i; + + pModel = R_GetModelByHandle( ent->e.hModel ); + + bmodel = pModel->bmodel; + + clip = R_CullLocalBox( bmodel->bounds ); + if ( clip == CULL_OUT ) { + return; + } + + R_DlightBmodel( bmodel ); + + for ( i = 0 ; i < bmodel->numSurfaces ; i++ ) { + R_AddWorldSurface( bmodel->firstSurface + i, tr.currentEntity->needDlights ); + } +} + + +/* +============================================================= + + WORLD MODEL + +============================================================= +*/ + + +/* +================ +R_RecursiveWorldNode +================ +*/ +static void R_RecursiveWorldNode( mnode_t *node, int planeBits, int dlightBits ) +{ + + do { + int newDlights[2]; + + // if the node wasn't marked as potentially visible, exit + if (node->visframe != tr.visCount) { + return; + } + + // if the bounding volume is outside the frustum, nothing + // inside can be visible OPTIMIZE: don't do this all the way to leafs? + + if ( !r_nocull->integer ) { + int r; + + if ( planeBits & 1 ) { + r = BoxOnPlaneSide(node->mins, node->maxs, &tr.viewParms.frustum[0]); + if (r == 2) { + return; // culled + } + if ( r == 1 ) { + planeBits &= ~1; // all descendants will also be in front + } + } + + if ( planeBits & 2 ) { + r = BoxOnPlaneSide(node->mins, node->maxs, &tr.viewParms.frustum[1]); + if (r == 2) { + return; // culled + } + if ( r == 1 ) { + planeBits &= ~2; // all descendants will also be in front + } + } + + if ( planeBits & 4 ) { + r = BoxOnPlaneSide(node->mins, node->maxs, &tr.viewParms.frustum[2]); + if (r == 2) { + return; // culled + } + if ( r == 1 ) { + planeBits &= ~4; // all descendants will also be in front + } + } + + if ( planeBits & 8 ) { + r = BoxOnPlaneSide(node->mins, node->maxs, &tr.viewParms.frustum[3]); + if (r == 2) { + return; // culled + } + if ( r == 1 ) { + planeBits &= ~8; // all descendants will also be in front + } + } + + } + + if ( node->contents != -1 ) { + break; + } + + // node is just a decision point, so go down both sides + // since we don't care about sort orders, just go positive to negative + + // determine which dlights are needed + newDlights[0] = 0; + newDlights[1] = 0; + if ( dlightBits ) { + int i; + + for ( i = 0 ; i < tr.refdef.num_dlights ; i++ ) { + dlight_t *dl; + float dist; + + if ( dlightBits & ( 1 << i ) ) { + dl = &tr.refdef.dlights[i]; + dist = DotProduct( dl->origin, node->plane->normal ) - node->plane->dist; + + if ( dist > -dl->radius ) { + newDlights[0] |= ( 1 << i ); + } + if ( dist < dl->radius ) { + newDlights[1] |= ( 1 << i ); + } + } + } + } + + // recurse down the children, front side first + R_RecursiveWorldNode (node->children[0], planeBits, newDlights[0] ); + + // tail recurse + node = node->children[1]; + dlightBits = newDlights[1]; + } while ( 1 ); + + { + // leaf node, so add mark surfaces + int c; + msurface_t *surf, **mark; + + tr.pc.c_leafs++; + + // add to z buffer bounds + if ( node->mins[0] < tr.viewParms.visBounds[0][0] ) { + tr.viewParms.visBounds[0][0] = node->mins[0]; + } + if ( node->mins[1] < tr.viewParms.visBounds[0][1] ) { + tr.viewParms.visBounds[0][1] = node->mins[1]; + } + if ( node->mins[2] < tr.viewParms.visBounds[0][2] ) { + tr.viewParms.visBounds[0][2] = node->mins[2]; + } + + if ( node->maxs[0] > tr.viewParms.visBounds[1][0] ) { + tr.viewParms.visBounds[1][0] = node->maxs[0]; + } + if ( node->maxs[1] > tr.viewParms.visBounds[1][1] ) { + tr.viewParms.visBounds[1][1] = node->maxs[1]; + } + if ( node->maxs[2] > tr.viewParms.visBounds[1][2] ) { + tr.viewParms.visBounds[1][2] = node->maxs[2]; + } + + // add the individual surfaces + mark = node->firstmarksurface; + c = node->nummarksurfaces; + while (c--) { + // the surface may have already been added if it + // spans multiple leafs + surf = *mark; + R_AddWorldSurface( surf, dlightBits ); + mark++; + } + } + +} + + +/* +=============== +R_PointInLeaf +=============== +*/ +static mnode_t *R_PointInLeaf( const vec3_t p ) { + mnode_t *node; + float d; + cplane_t *plane; + + if ( !tr.world ) { + ri.Error (ERR_DROP, "R_PointInLeaf: bad model"); + } + + node = tr.world->nodes; + while( 1 ) { + if (node->contents != -1) { + break; + } + plane = node->plane; + d = DotProduct (p,plane->normal) - plane->dist; + if (d > 0) { + node = node->children[0]; + } else { + node = node->children[1]; + } + } + + return node; +} + +/* +============== +R_ClusterPVS +============== +*/ +static const byte *R_ClusterPVS (int cluster) { + if (!tr.world || !tr.world->vis || cluster < 0 || cluster >= tr.world->numClusters ) { + return tr.world->novis; + } + + return tr.world->vis + cluster * tr.world->clusterBytes; +} + + +qboolean RE_inPVS( const vec3_t p1, const vec3_t p2 ) +{ + mnode_t *leaf = R_PointInLeaf( p1 ); + unsigned char* vis = ri.CM_ClusterPVS( leaf->cluster ); + leaf = R_PointInLeaf( p2 ); + + if ( !(vis[leaf->cluster>>3] & (1<<(leaf->cluster&7))) ) { + return qfalse; + } + return qtrue; +} + +/* +=============== +R_MarkLeaves + +Mark the leaves and nodes that are in the PVS for the current +cluster +=============== +*/ +static void R_MarkLeaves (void) +{ + const byte *vis; + mnode_t *leaf, *parent; + int i; + int cluster; + + // lockpvs lets designers walk around to determine the + // extent of the current pvs + if ( r_lockpvs->integer ) { + return; + } + + // current viewcluster + leaf = R_PointInLeaf( tr.viewParms.pvsOrigin ); + cluster = leaf->cluster; + + // if the cluster is the same and the area visibility matrix + // hasn't changed, we don't need to mark everything again + + // if r_showcluster was just turned on, remark everything + if ( tr.viewCluster == cluster && !tr.refdef.AreamaskModified && !r_showcluster->modified ) + { + return; + } + + if ( r_showcluster->modified || r_showcluster->integer ) { + r_showcluster->modified = qfalse; + if ( r_showcluster->integer ) { + ri.Printf( PRINT_ALL, "cluster:%i area:%i\n", cluster, leaf->area ); + } + } + + tr.visCount++; + tr.viewCluster = cluster; + + if ( r_novis->integer || tr.viewCluster == -1 ) { + for (i=0 ; inumnodes ; i++) { + if (tr.world->nodes[i].contents != CONTENTS_SOLID) { + tr.world->nodes[i].visframe = tr.visCount; + } + } + return; + } + + vis = R_ClusterPVS (tr.viewCluster); + + for (i=0,leaf=tr.world->nodes ; inumnodes ; i++, leaf++) { + cluster = leaf->cluster; + if ( cluster < 0 || cluster >= tr.world->numClusters ) { + continue; + } + + // check general pvs + if ( !(vis[cluster>>3] & (1<<(cluster&7))) ) { + continue; + } + + // check for door connection + if ( (tr.refdef.rd.areamask[leaf->area>>3] & (1<<(leaf->area&7)) ) ) { + continue; // not visible + } + + parent = leaf; + do { + if (parent->visframe == tr.visCount) + break; + parent->visframe = tr.visCount; + parent = parent->parent; + } while (parent); + } +} + + +/* +============= +R_AddWorldSurfaces +============= +*/ +void R_AddWorldSurfaces (void) +{ + if ( !r_drawworld->integer ) { + return; + } + + if ( tr.refdef.rd.rdflags & RDF_NOWORLDMODEL ) { + return; + } + + tr.currentEntityNum = REFENTITYNUM_WORLD; + tr.shiftedEntityNum = tr.currentEntityNum << QSORT_ENTITYNUM_SHIFT; + + // determine which leaves are in the PVS / areamask + R_MarkLeaves (); + + // clear out the visible min/max + ClearBounds( tr.viewParms.visBounds[0], tr.viewParms.visBounds[1] ); + + // perform frustum culling and add all the potentially visible surfaces + if ( tr.refdef.num_dlights > 32 ) { + tr.refdef.num_dlights = 32 ; + } + R_RecursiveWorldNode( tr.world->nodes, 15, ( 1 << tr.refdef.num_dlights ) - 1 ); +} diff --git a/code/renderer_vulkan/vk_cmd.c b/code/renderer_vulkan/vk_cmd.c new file mode 100644 index 0000000000..689e6a646e --- /dev/null +++ b/code/renderer_vulkan/vk_cmd.c @@ -0,0 +1,193 @@ +#include "VKimpl.h" +#include "vk_instance.h" +// =============== Command Buffer Lifecycle ==================== +// Each command buffer is always in one of the following states: +// +// Initial +// +// When a command buffer is allocated, it is in the initial state. Some commands are able to +// reset a command buffer, or a set of command buffers, back to this state from any of the +// executable, recording or invalid state. Command buffers in the initial state can only be +// moved to the recording state, or freed. +// +// Recording +// +// vkBeginCommandBuffer changes the state of a command buffer from the initial state to the +// recording state. Once a command buffer is in the recording state, vkCmd* commands can be +// used to record to the command buffer. +// +// Executable +// +// vkEndCommandBuffer ends the recording of a command buffer, and moves it from the recording +// state to the executable state. Executable command buffers can be submitted, reset, or +// recorded to another command buffer. +// +// Pending +// +// Queue submission of a command buffer changes the state of a command buffer from the +// executable state to the pending state. Whilst in the pending state, applications must +// not attempt to modify the command buffer in any way - as the device may be processing +// the commands recorded to it. Once execution of a command buffer completes, the command +// buffer reverts back to either the executable state, or the invalid state if it was +// recorded with VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT. A synchronization command +// should be used to detect when this occurs. +// +// Invalid +// +// Some operations, such as modifying or deleting a resource that was used in a command +// recorded to a command buffer, will transition the state of that command buffer into +// the invalid state. Command buffers in the invalid state can only be reset or freed. +// +// Any given command that operates on a command buffer has its own requirements on what +// state a command buffer must be in, which are detailed in the valid usage constraints +// for that command. +// +// Resetting a command buffer is an operation that discards any previously recorded +// commands and puts a command buffer in the initial state. Resetting occurs as a +// result of vkResetCommandBuffer or vkResetCommandPool, or as part of +// vkBeginCommandBuffer (which additionally puts the command buffer in the recording state). +// +// Secondary command buffers can be recorded to a primary command buffer via +// vkCmdExecuteCommands. This partially ties the lifecycle of the two command +// buffers together - if the primary is submitted to a queue, both the primary +// and any secondaries recorded to it move to the pending state. Once execution +// of the primary completes, so does any secondary recorded within it, and once all +// executions of each command buffer complete, they move to the executable state. +// If a secondary moves to any other state whilst it is recorded to another command buffer, +// the primary moves to the invalid state. A primary moving to any other state does +// not affect the state of the secondary. Resetting or freeing a primary command buffer +// removes the linkage to any secondary command buffers that were recorded to it. +// +// + +void vk_create_command_pool(VkCommandPool* pPool) +{ + // Command pools are opaque objects that command buffer memory is allocated from, + // and which allow the implementation to amortize the cost of resource creation + // across multiple command buffers. Command pools are externally synchronized, + // meaning that a command pool must not be used concurrently in multiple threads. + // That includes use via recording commands on any command buffers allocated from + // the pool, as well as operations that allocate, free, and reset command buffers + // or the pool itself. + + + VkCommandPoolCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; + desc.pNext = NULL; + // VK_COMMAND_POOL_CREATE_TRANSIENT_BIT specifies that command buffers + // allocated from the pool will be short-lived, meaning that they will + // be reset or freed in a relatively short timeframe. This flag may be + // used by the implementation to control memory allocation behavior + // within the pool. + // + // VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT allows any command + // buffer allocated from a pool to be individually reset to the initial + // state; either by calling vkResetCommandBuffer, or via the implicit + // reset when calling vkBeginCommandBuffer. If this flag is not set on + // a pool, then vkResetCommandBuffer must not be called for any command + // buffer allocated from that pool. + desc.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; + desc.queueFamilyIndex = vk.queue_family_index; + + VK_CHECK(qvkCreateCommandPool(vk.device, &desc, NULL, pPool)); +} + + +void vk_create_command_buffer(VkCommandPool pool, VkCommandBuffer* pBuf) +{ + // Command buffers are objects used to record commands which can be + // subsequently submitted to a device queue for execution. There are + // two levels of command buffers: + // - primary command buffers, which can execute secondary command buffers, + // and which are submitted to queues. + // - secondary command buffers, which can be executed by primary command buffers, + // and which are not directly submitted to queues. + // + // Recorded commands include commands to bind pipelines and descriptor sets + // to the command buffer, commands to modify dynamic state, commands to draw + // (for graphics rendering), commands to dispatch (for compute), commands to + // execute secondary command buffers (for primary command buffers only), + // commands to copy buffers and images, and other commands. + // + // Each command buffer manages state independently of other command buffers. + // There is no inheritance of state across primary and secondary command + // buffers, or between secondary command buffers. + // + // When a command buffer begins recording, all state in that command buffer is undefined. + // When secondary command buffer(s) are recorded to execute on a primary command buffer, + // the secondary command buffer inherits no state from the primary command buffer, + // and all state of the primary command buffer is undefined after an execute secondary + // command buffer command is recorded. There is one exception to this rule - if the primary + // command buffer is inside a render pass instance, then the render pass and subpass state + // is not disturbed by executing secondary command buffers. Whenever the state of a command + // buffer is undefined, the application must set all relevant state on the command buffer + // before any state dependent commands such as draws and dispatches are recorded, otherwise + // the behavior of executing that command buffer is undefined. + // + // Unless otherwise specified, and without explicit synchronization, the various commands + // submitted to a queue via command buffers may execute in arbitrary order relative to + // each other, and/or concurrently. Also, the memory side-effects of those commands may + // not be directly visible to other commands without explicit memory dependencies. + // This is true within a command buffer, and across command buffers submitted to a given + // queue. See the synchronization chapter for information on implicit and explicit + // synchronization between commands. + + + VkCommandBufferAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.commandPool = pool; + // Can be submitted to a queue for execution, + // but cannnot be called from other command buffers. + alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; + alloc_info.commandBufferCount = 1; + VK_CHECK(qvkAllocateCommandBuffers(vk.device, &alloc_info, pBuf)); +} + +void vk_destroy_commands(void) +{ + // Command buffers will be automatically freed when their + // command pool is destroyed, so it don't need an explicit + // cleanup. + ri.Printf( PRINT_ALL, " Free command buffers: vk.command_buffer. \n" ); + qvkFreeCommandBuffers(vk.device, vk.command_pool, 1, &vk.command_buffer); + ri.Printf( PRINT_ALL, " Destroy command pool: vk.command_pool. \n" ); + qvkDestroyCommandPool(vk.device, vk.command_pool, NULL); +} + +void record_image_layout_transition( + VkCommandBuffer cmdBuf, + VkImage image, + VkImageAspectFlags image_aspect_flags, + VkAccessFlags src_access_flags, + VkImageLayout old_layout, + VkAccessFlags dst_access_flags, + VkImageLayout new_layout ) +{ + + VkImageMemoryBarrier barrier = {0}; + barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + barrier.pNext = NULL; + barrier.srcAccessMask = src_access_flags; + barrier.dstAccessMask = dst_access_flags; + barrier.oldLayout = old_layout; + barrier.newLayout = new_layout; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.image = image; + barrier.subresourceRange.aspectMask = image_aspect_flags; + barrier.subresourceRange.baseMipLevel = 0; + barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS; + +// vkCmdPipelineBarrier is a synchronization command that inserts a dependency between +// commands submitted to the same queue, or between commands in the same subpass. +// When vkCmdPipelineBarrier is submitted to a queue, it defines a memory dependency +// between commands that were submitted before it, and those submitted after it. + + // cmdBuf is the command buffer into which the command is recorded. + qvkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, &barrier); +} + diff --git a/code/renderer_vulkan/vk_cmd.h b/code/renderer_vulkan/vk_cmd.h new file mode 100644 index 0000000000..457ba80b1e --- /dev/null +++ b/code/renderer_vulkan/vk_cmd.h @@ -0,0 +1,13 @@ +#ifndef VK_CMD_H_ +#define VK_CMD_H_ + + +void vk_create_command_pool(VkCommandPool* pPool); +void vk_create_command_buffer(VkCommandPool pool, VkCommandBuffer* pBuf); +void vk_destroy_commands(void); + +void record_image_layout_transition(VkCommandBuffer command_buffer, VkImage image, VkImageAspectFlags image_aspect_flags, + VkAccessFlags src_access_flags, VkImageLayout old_layout, VkAccessFlags dst_access_flags, VkImageLayout new_layout); + + +#endif diff --git a/code/renderer_vulkan/vk_create_window_SDL.c b/code/renderer_vulkan/vk_create_window_SDL.c new file mode 100644 index 0000000000..f4bf006a0f --- /dev/null +++ b/code/renderer_vulkan/vk_create_window_SDL.c @@ -0,0 +1,428 @@ +/* +=========================================================================== +Copyright (C) 1999-2005 Id Software, Inc. + +This file is part of Quake III Arena source code. + +Quake III Arena source code is free software; you can redistribute it +and/or modify it under the terms of the GNU General Public License as +published by the Free Software Foundation; either version 2 of the License, +or (at your option) any later version. + +Quake III Arena source code is distributed in the hope that it will be +useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Quake III Arena source code; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +=========================================================================== +*/ + + +// the window surface needs to be createdd right after the instance creation +// because it can actually influence the physical device selection + +#include "VKimpl.h" +#include "vk_instance.h" + +#include "tr_cvar.h" +#include "icon_oa.h" +#include "glConfig.h" + + +#ifdef USE_INTERNAL_SDL_HEADERS +# include "SDL.h" +# include "SDL_vulkan.h" +#else +# include +# include +#endif + + +static SDL_Window* window_sdl = NULL; + + +// TODO: multi display support +static cvar_t* r_displayIndex; + + +static void VKimp_DetectAvailableModes(void) +{ + int i, j; + char buf[ MAX_STRING_CHARS ] = { 0 }; + + SDL_DisplayMode windowMode; + + // If a window exists, note its display index + if( window_sdl != NULL ) + { + r_displayIndex->integer = SDL_GetWindowDisplayIndex( window_sdl ); + if( r_displayIndex->integer < 0 ) + { + ri.Printf(PRINT_ALL, "SDL_GetWindowDisplayIndex() failed: %s\n", SDL_GetError() ); + return; + } + } + + int numSDLModes = SDL_GetNumDisplayModes( r_displayIndex->integer ); + + if( SDL_GetWindowDisplayMode( window_sdl, &windowMode ) < 0 || numSDLModes <= 0 ) + { + ri.Printf(PRINT_ALL, "Couldn't get window display mode, no resolutions detected: %s\n", SDL_GetError() ); + return; + } + + int numModes = 0; + SDL_Rect* modes = SDL_calloc(numSDLModes, sizeof( SDL_Rect )); + if ( !modes ) + { + //////////////////////////////////// + ri.Error(ERR_FATAL, "Out of memory" ); + //////////////////////////////////// + } + + for( i = 0; i < numSDLModes; i++ ) + { + SDL_DisplayMode mode; + + if( SDL_GetDisplayMode( r_displayIndex->integer, i, &mode ) < 0 ) + continue; + + if( !mode.w || !mode.h ) + { + ri.Printf(PRINT_ALL, " Display supports any resolution\n" ); + SDL_free( modes ); + return; + } + + if( windowMode.format != mode.format ) + continue; + + // SDL can give the same resolution with different refresh rates. + // Only list resolution once. + for( j = 0; j < numModes; j++ ) + { + if( (mode.w == modes[ j ].w) && (mode.h == modes[ j ].h) ) + break; + } + + if( j != numModes ) + continue; + + modes[ numModes ].w = mode.w; + modes[ numModes ].h = mode.h; + numModes++; + } + + for( i = 0; i < numModes; i++ ) + { + const char *newModeString = va( "%ux%u ", modes[ i ].w, modes[ i ].h ); + + if( strlen( newModeString ) < (int)sizeof( buf ) - strlen( buf ) ) + Q_strcat( buf, sizeof( buf ), newModeString ); + else + ri.Printf(PRINT_ALL, " Skipping mode %ux%u, buffer too small\n", modes[ i ].w, modes[ i ].h ); + } + + if( *buf ) + { + buf[ strlen( buf ) - 1 ] = 0; + ri.Printf(PRINT_ALL, " Available modes: '%s'\n", buf ); + ri.Cvar_Set( "r_availableModes", buf ); + } + SDL_free( modes ); +} + + +static int VKimp_SetMode(int mode, qboolean fullscreen) +{ + SDL_DisplayMode desktopMode; + + Uint32 flags = SDL_WINDOW_SHOWN | SDL_WINDOW_VULKAN; + + if ( r_allowResize->integer ) + flags |= SDL_WINDOW_RESIZABLE; + + + ri.Printf(PRINT_ALL, "\n...VKimp_SetMode()...\n"); + + SDL_GetNumVideoDisplays(); + + int display_mode_count = SDL_GetNumDisplayModes(r_displayIndex->integer); + if (display_mode_count < 1) + { + ri.Printf(PRINT_ALL, " SDL_GetNumDisplayModes failed: %s", SDL_GetError()); + } + + + int tmp = SDL_GetDesktopDisplayMode(r_displayIndex->integer, &desktopMode); + if( (tmp == 0) && (desktopMode.h > 0) ) + { + Uint32 f = desktopMode.format; + ri.Printf(PRINT_ALL, " bpp %i\t%s\t%i x %i, refresh_rate: %dHz\n", SDL_BITSPERPIXEL(f), SDL_GetPixelFormatName(f), desktopMode.w, desktopMode.h, desktopMode.refresh_rate); + } + else if (SDL_GetDisplayMode(r_displayIndex->integer, 0, &desktopMode) != 0) + { + //mode = 0: use the first display mode SDL return; + ri.Printf(PRINT_ALL," SDL_GetDisplayMode failed: %s\n", SDL_GetError()); + mode = 3; + desktopMode.w = 640; + desktopMode.h = 480; + desktopMode.refresh_rate = 60; + fullscreen = 0; + } + + if(fullscreen) + { + // prevent crush the OS + r_mode->integer = mode = -2; + + flags |= SDL_WINDOW_FULLSCREEN; + flags |= SDL_WINDOW_BORDERLESS; + } + + R_SetWinMode( mode, desktopMode.w, desktopMode.h, desktopMode.refresh_rate ); + + + + if( window_sdl != NULL ) + { + // SDL_GetWindowPosition( window_sdl, &x, &y ); + SDL_DestroyWindow( window_sdl ); + window_sdl = NULL; + ri.Printf(PRINT_ALL, "Existing window being destroyed\n"); + } + + int width = 640; + int height = 480; + + R_GetWinResolution(&width, &height); + + window_sdl = SDL_CreateWindow( + CLIENT_WINDOW_TITLE, + SDL_WINDOWPOS_CENTERED, + SDL_WINDOWPOS_CENTERED, + width, + height, + flags ); + + + if( window_sdl ) + { + VKimp_DetectAvailableModes(); + return 0; + } + + ri.Printf(PRINT_WARNING, " Couldn't create a window: %s\n", SDL_GetError() ); + return -1; +} + + + +/* + * This routine is responsible for initializing the OS specific portions of Vulkan + */ +void vk_createWindow(void) +{ + ri.Printf(PRINT_ALL, "\n...Creating window (using SDL2)...\n"); + + // Print SDL2 Version .... + SDL_version v; + SDL_version *sdl_version = &v; + SDL_GetVersion(&v); + ri.Printf(PRINT_ALL, " Found SDL version %i.%i.%i\n", + sdl_version->major,sdl_version->minor,sdl_version->patch); + + r_displayIndex = ri.Cvar_Get( "r_displayIndex", "0", CVAR_ARCHIVE | CVAR_LATCH ); + + SDL_Surface* icon = SDL_CreateRGBSurfaceFrom( + (void *)CLIENT_WINDOW_ICON.pixel_data, + CLIENT_WINDOW_ICON.width, + CLIENT_WINDOW_ICON.height, + CLIENT_WINDOW_ICON.bytes_per_pixel * 8, + CLIENT_WINDOW_ICON.bytes_per_pixel * CLIENT_WINDOW_ICON.width, +#ifdef Q3_LITTLE_ENDIAN + 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000 +#else + 0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF +#endif + ); + + if (icon == NULL) + { + ri.Printf(PRINT_ALL, " SDL_CreateRGBSurface Failed. \n" ); + } + + if (ri.Cvar_VariableIntegerValue( "com_abnormalExit" ) ) + { + ri.Cvar_Set( "r_fullscreen", "0" ); + ri.Cvar_Set( "r_mode", "3" ); + ri.Cvar_Set( "com_abnormalExit", "0" ); + } + + // Use this function to get a mask of the specified + // subsystems which have previously been initialized. + // If flags is 0 it returns a mask of all initialized subsystems, + // otherwise it returns the initialization status of the specified subsystems. + if (0 == SDL_WasInit(SDL_INIT_VIDEO)) + { + ri.Printf(PRINT_ALL, " Video is not initialized before, so initial it.\n"); + + if (SDL_Init(SDL_INIT_VIDEO) != 0) + { + ri.Printf(PRINT_ALL, " SDL_Init( SDL_INIT_VIDEO ) FAILED (%s)\n", SDL_GetError()); + } + else + { + ri.Printf(PRINT_ALL, " SDL using driver \"%s\"\n", SDL_GetCurrentVideoDriver( )); + } + } + else + { + ri.Printf(PRINT_ALL, " Video is already initialized.\n"); + } + + if( 0 == VKimp_SetMode(r_mode->integer, r_fullscreen->integer) ) + { + goto success; + } + else + { + ri.Printf(PRINT_ALL, " Setting r_mode=%d, r_fullscreen=%d failed, falling back on r_mode=%d\n", + r_mode->integer, r_fullscreen->integer, 3 ); + + if( 0 == VKimp_SetMode(3, qfalse) ) + { + goto success; + } + else + { + ri.Error(ERR_FATAL, "VKimp_Init() - could not load Vulkan subsystem: %s", SDL_GetError()); + } + } + + +success: + + SDL_SetWindowIcon( window_sdl, icon ); + + SDL_FreeSurface( icon ); + + // This depends on SDL_INIT_VIDEO, hence having it here + ri.IN_Init(window_sdl); +} + + +void vk_getInstanceProcAddrImpl(void) +{ + ri.Printf(PRINT_ALL, " *** Vulkan Initialization ***\n"); + int code = SDL_Vulkan_LoadLibrary(NULL); + if (code) { + ri.Error(ERR_FATAL, "Failed to load Vulkan library (code %d): %s", code, SDL_GetError()); + } + // Create the window + + qvkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr) SDL_Vulkan_GetVkGetInstanceProcAddr(); + if( qvkGetInstanceProcAddr == NULL) + { + ri.Error(ERR_FATAL, "Failed to find entrypoint vkGetInstanceProcAddr\n"); + } + + ri.Printf(PRINT_ALL, " Get instance proc address. (using SDL2)\n"); +} + + +void vk_destroyWindow( void ) +{ + ri.Printf(PRINT_ALL, " Destroy Window Subsystem.\n"); + + ri.IN_Shutdown(); + SDL_QuitSubSystem( SDL_INIT_VIDEO ); + + SDL_DestroyWindow( window_sdl ); + window_sdl = NULL; +} + + +void vk_createSurfaceImpl(void) +{ + ri.Printf(PRINT_ALL, " Create Surface: vk.surface.\n"); + + if(!SDL_Vulkan_CreateSurface(window_sdl, vk.instance, &vk.surface)) + { + vk.surface = VK_NULL_HANDLE; + ri.Error(ERR_FATAL, "SDL_Vulkan_CreateSurface(): %s\n", SDL_GetError()); + } +} + + + + +/* +=============== +Minimize the game so that user is back at the desktop +=============== +*/ +void vk_minimizeWindow( void ) +{ + VkBool32 toggleWorked = 1; + ri.Printf( PRINT_ALL, " Minimizing Window (SDL). \n"); + + VkBool32 isWinFullscreen = ( SDL_GetWindowFlags( window_sdl ) & SDL_WINDOW_FULLSCREEN ); + + + if( isWinFullscreen ) + { + toggleWorked = (SDL_SetWindowFullscreen( window_sdl, 0 ) >= 0); + } + + // SDL_WM_ToggleFullScreen didn't work, so do it the slow way + if( toggleWorked ) + { + // ri.IN_Shutdown( ); + SDL_MinimizeWindow( window_sdl ); + // SDL_HideWindow( window_sdl ); + } + else + { + ri.Printf( PRINT_ALL, " SDL_SetWindowFullscreen didn't work, so do it the slow way \n"); + + ri.Cmd_ExecuteText(EXEC_APPEND, "vid_restart\n"); + } +} + +/* + if( r_fullscreen->modified ) + { + qboolean needToToggle; + qboolean sdlToggled = qfalse; + + // Find out the current state + int fullscreen = !!( SDL_GetWindowFlags( window_sdl ) & SDL_WINDOW_FULLSCREEN ); + + if( r_fullscreen->integer && ri.Cvar_VariableIntegerValue( "in_nograb" ) ) + { + ri.Printf( PRINT_ALL, "Fullscreen not allowed with in_nograb 1\n"); + ri.Cvar_Set( "r_fullscreen", "0" ); + r_fullscreen->modified = qfalse; + } + + // Is the state we want different from the current state? + needToToggle = !!r_fullscreen->integer != fullscreen; + + if( needToToggle ) + { + sdlToggled = SDL_SetWindowFullscreen( window_sdl, r_fullscreen->integer ) >= 0; + + // SDL_WM_ToggleFullScreen didn't work, so do it the slow way + if( !sdlToggled ) + ri.Cmd_ExecuteText(EXEC_APPEND, "vid_restart\n"); + + ri.IN_Restart( ); + } + + r_fullscreen->modified = qfalse; + } +*/ diff --git a/code/renderer_vulkan/vk_depth_attachment.c b/code/renderer_vulkan/vk_depth_attachment.c new file mode 100644 index 0000000000..e6fc0fbd03 --- /dev/null +++ b/code/renderer_vulkan/vk_depth_attachment.c @@ -0,0 +1,127 @@ +#include "tr_local.h" +#include "vk_instance.h" +#include "vk_image.h" +#include "vk_cmd.h" +#include "vk_depth_attachment.h" + + +void vk_createDepthAttachment(int Width, int Height) +{ + // A depth attachment is based on an image, just like the color attachment + // The difference is that the swap chain will not automatically create + // depth image for us. We need only s single depth image, because only + // one draw operation is running at once. + ri.Printf(PRINT_ALL, " Create depth image: vk.depth_image, %d x %d. \n", Width, Height); + { + VkImageCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.imageType = VK_IMAGE_TYPE_2D; + desc.format = vk.fmt_DepthStencil; + desc.extent.width = Width; + desc.extent.height = Height; + desc.extent.depth = 1; + desc.mipLevels = 1; + desc.arrayLayers = 1; + desc.samples = VK_SAMPLE_COUNT_1_BIT; + desc.tiling = VK_IMAGE_TILING_OPTIMAL; + desc.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; + desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + desc.queueFamilyIndexCount = 0; + desc.pQueueFamilyIndices = NULL; + desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + VK_CHECK(qvkCreateImage(vk.device, &desc, NULL, &vk.depth_image)); + } + + ri.Printf(PRINT_ALL, " Allocate device local memory for depth image: vk.depth_image_memory. \n"); + { + VkMemoryRequirements memory_requirements; + qvkGetImageMemoryRequirements(vk.device, vk.depth_image, &memory_requirements); + + VkMemoryAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.allocationSize = memory_requirements.size; + alloc_info.memoryTypeIndex = find_memory_type( memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + // = vk.idx_depthImgMem; + VK_CHECK(qvkAllocateMemory(vk.device, &alloc_info, NULL, &vk.depth_image_memory)); + VK_CHECK(qvkBindImageMemory(vk.device, vk.depth_image, vk.depth_image_memory, 0)); + } + + + ri.Printf(PRINT_ALL, " Create image view for depth image: vk.depth_image_view. \n"); + { + VkImageViewCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.image = vk.depth_image; + desc.viewType = VK_IMAGE_VIEW_TYPE_2D; + desc.format = vk.fmt_DepthStencil; + desc.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.g = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.a = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; + desc.subresourceRange.baseMipLevel = 0; + desc.subresourceRange.levelCount = 1; + desc.subresourceRange.baseArrayLayer = 0; + desc.subresourceRange.layerCount = 1; + VK_CHECK(qvkCreateImageView(vk.device, &desc, NULL, &vk.depth_image_view)); + } + + VkImageAspectFlags image_aspect_flags = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + + + VkCommandBufferAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.commandPool = vk.command_pool; + alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; + alloc_info.commandBufferCount = 1; + + VkCommandBuffer pCB; + VK_CHECK(qvkAllocateCommandBuffers(vk.device, &alloc_info, &pCB)); + + VkCommandBufferBeginInfo begin_info; + begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + begin_info.pNext = NULL; + begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; + begin_info.pInheritanceInfo = NULL; + + VK_CHECK(qvkBeginCommandBuffer(pCB, &begin_info)); + + record_image_layout_transition(pCB, vk.depth_image, + image_aspect_flags, 0, VK_IMAGE_LAYOUT_UNDEFINED, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); + + + VK_CHECK(qvkEndCommandBuffer(pCB)); + + VkSubmitInfo submit_info; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.pNext = NULL; + submit_info.waitSemaphoreCount = 0; + submit_info.pWaitSemaphores = NULL; + submit_info.pWaitDstStageMask = NULL; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &pCB; + submit_info.signalSemaphoreCount = 0; + submit_info.pSignalSemaphores = NULL; + + VK_CHECK(qvkQueueSubmit(vk.queue, 1, &submit_info, VK_NULL_HANDLE)); + VK_CHECK(qvkQueueWaitIdle(vk.queue)); + qvkFreeCommandBuffers(vk.device, vk.command_pool, 1, &pCB); +} + + + +void vk_destroyDepthAttachment(void) +{ + qvkDestroyImageView(vk.device, vk.depth_image_view, NULL); + qvkDestroyImage(vk.device, vk.depth_image, NULL); + qvkFreeMemory(vk.device, vk.depth_image_memory, NULL); +} diff --git a/code/renderer_vulkan/vk_depth_attachment.h b/code/renderer_vulkan/vk_depth_attachment.h new file mode 100644 index 0000000000..dc670d7bca --- /dev/null +++ b/code/renderer_vulkan/vk_depth_attachment.h @@ -0,0 +1,8 @@ +#ifndef VK_DEPTH_IMAGE_H_ +#define VK_DEPTH_IMAGE_H_ + +void vk_createDepthAttachment(int Width, int Height); +void vk_destroyDepthAttachment(void); + + +#endif diff --git a/code/renderer_vulkan/vk_frame.c b/code/renderer_vulkan/vk_frame.c new file mode 100644 index 0000000000..25ecf1fda0 --- /dev/null +++ b/code/renderer_vulkan/vk_frame.c @@ -0,0 +1,671 @@ +#include "tr_local.h" +#include "tr_cvar.h" +#include "vk_instance.h" +#include "vk_shade_geometry.h" +#include "vk_frame.h" +#include "vk_swapchain.h" + +// Synchronization of access to resources is primarily the responsibility +// of the application in Vulkan. The order of execution of commands with +// respect to the host and other commands on the device has few implicit +// guarantees, and needs to be explicitly specified. Memory caches and +// other optimizations are also explicitly managed, requiring that the +// flow of data through the system is largely under application control. +// Whilst some implicit guarantees exist between commands, five explicit +// synchronization mechanisms are exposed by Vulkan: +// +// +// Fences +// +// Fences can be used to communicate to the host that execution of some +// task on the device has completed. +// +// Fences are a synchronization primitive that can be used to insert a dependency +// from a queue to the host. Fences have two states - signaled and unsignaled. +// A fence can be signaled as part of the execution of a queue submission command. +// Fences can be unsignaled on the host with vkResetFences. Fences can be waited +// on by the host with the vkWaitForFences command, and the current state can be +// queried with vkGetFenceStatus. +// +// +// Semaphores +// +// Semaphores can be used to control resource access across multiple queues. +// +// Events +// +// Events provide a fine-grained synchronization primitive which can be +// signaled either within a command buffer or by the host, and can be +// waited upon within a command buffer or queried on the host. +// +// Pipeline Barriers +// +// Pipeline barriers also provide synchronization control within a command buffer, +// but at a single point, rather than with separate signal and wait operations. +// +// Render Passes +// +// Render passes provide a useful synchronization framework for most rendering tasks, +// built upon the concepts in this chapter. Many cases that would otherwise need an +// application to use other synchronization primitives can be expressed more +// efficiently as part of a render pass. +// +// + +VkSemaphore sema_imageAvailable; +VkSemaphore sema_renderFinished; +VkFence fence_renderFinished; + +/* + Use of a presentable image must occur only after the image is + returned by vkAcquireNextImageKHR, and before it is presented by + vkQueuePresentKHR. This includes transitioning the image layout + and rendering commands. + + + The presentation engine is an abstraction for the platform¡¯s compositor + or display engine. The presentation engine controls the order in which + presentable images are acquired for use by the application. + + This allows the platform to handle situations which require out-of-order + return of images after presentation. At the same time, it allows the + application to generate command buffers referencing all of the images in + the swapchain at initialization time, rather than in its main loop. + + Host access to fence must be externally synchronized. + + When a fence is submitted to a queue as part of a queue submission command, + it defines a memory dependency on the batches that were submitted as part + of that command, and defines a fence signal operation which sets the fence + to the signaled state. +*/ + +void vk_create_sync_primitives(void) +{ + VkSemaphoreCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + + // We need one semaphone to signal that an image has been acquired and + // is ready for rendering; and another one to signal that rendering has + // finished and presentation can happen. + + // vk.device is the logical device that creates the semaphore. + // &desc is a pointer to an instance of the VkSemaphoreCreateInfo structure + // which contains information about how the semaphore is to be created. + // When created, the semaphore is in the unsignaled state. + VK_CHECK(qvkCreateSemaphore(vk.device, &desc, NULL, &sema_imageAvailable)); + VK_CHECK(qvkCreateSemaphore(vk.device, &desc, NULL, &sema_renderFinished)); + + + VkFenceCreateInfo fence_desc; + fence_desc.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; + fence_desc.pNext = NULL; + + // VK_FENCE_CREATE_SIGNALED_BIT specifies that the fence object + // is created in the signaled state. Otherwise, it is created + // in the unsignaled state. + fence_desc.flags = VK_FENCE_CREATE_SIGNALED_BIT; + + // vk.device is the logical device that creates the fence. + // fence_desc is an instance of the VkFenceCreateInfo structure + // pAllocator controls host memory allocation as described + // in the Memory Allocation chapter. which contains information + // about how the fence is to be created. + // "fence_renderFinished" is a handle in which the resulting + // fence object is returned. + + VK_CHECK(qvkCreateFence(vk.device, &fence_desc, NULL, &fence_renderFinished)); +} + + +void vk_destroy_sync_primitives(void) +{ + ri.Printf(PRINT_ALL, " Destroy sema_imageAvailable sema_renderFinished fence_renderFinished\n"); + + qvkDestroySemaphore(vk.device, sema_imageAvailable, NULL); + qvkDestroySemaphore(vk.device, sema_renderFinished, NULL); + + // To destroy a fence, + qvkDestroyFence(vk.device, fence_renderFinished, NULL); +} + + + +// NOTE: Render Pass Compatibility +// Framebuffers and graphics pipelines are created based on +// a specific render pass object. They must only be used with +// that render pass object, or one compatible with it. +// +// Two attachment references are compatible if they have matching +// format and sample count, or are both VK_ATTACHMENT_UNUSED +// or the pointer that would contain the reference is NULL. +// +// Two arrays of attachment references are compatible if all +// corresponding pairs of attachments are compatible. If the arrays +// are of different lengths, attachment references not present in +// the smaller array are treated as VK_ATTACHMENT_UNUSED. +// +// Two render passes are compatible if their corresponding color, +// input, resolve, and depth/stencil attachment references are +// compatible and if they are otherwise identical except for: +// 1) Initial and final image layout in attachment descriptions +// 2) Load and store operations in attachment descriptions +// 3) Image layout in attachment references +// +// A framebuffer is compatible with a render pass if it was created +// using the same render pass or a compatible render pass. +static void vk_createRenderPass(VkDevice device) +{ + +// Before we can finish creating the pipeline, we need to tell vulkan +// about the framebuffer attachment that will be used while rendering. +// We need to specify how many color and depth buffers there will be, +// how many samples to use for each of them and how their contents +// should be handled throughout the rendering operations. + + VkAttachmentDescription attachments[2]; + attachments[0].flags = 0; + +// The format of the color attachment should match the format of the +// swap chain images. + attachments[0].format = vk.surface_format.format; +// have something with the multisampling + attachments[0].samples = VK_SAMPLE_COUNT_1_BIT; + attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; + attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; + attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + attachments[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + attachments[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + + attachments[1].flags = 0; + attachments[1].format = vk.fmt_DepthStencil; + attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; + +// Attachments can also be cleared at the beginning of a render pass +// instance by setting loadOp/stencilLoadOp of VkAttachmentDescription +// to VK_ATTACHMENT_LOAD_OP_CLEAR, as described for vkCreateRenderPass. +// loadOp and stencilLoadOp, specifying how the contents of the attachment +// are treated before rendering and after rendering. + attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + +// specifies that the contents within the render area will be cleared to +// a uniform value, which is specified when a render pass instance is begun + attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; + attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; + attachments[1].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + + // Textueres and framebuffers in Vulkan are represented by VkImage + // objects with a certain pixel format. however the layout of the + // pixels in memory can change based on what you're trying to do + // with an image. + + // Images used as color attachment + VkAttachmentReference color_attachment_ref; + color_attachment_ref.attachment = 0; + color_attachment_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + + VkAttachmentReference depth_attachment_ref; + depth_attachment_ref.attachment = 1; + depth_attachment_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + + VkSubpassDescription subpass; + subpass.flags = 0; + subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; + subpass.inputAttachmentCount = 0; + subpass.pInputAttachments = NULL; + subpass.colorAttachmentCount = 1; + subpass.pColorAttachments = &color_attachment_ref; + subpass.pResolveAttachments = NULL; + subpass.pDepthStencilAttachment = &depth_attachment_ref; + subpass.preserveAttachmentCount = 0; + subpass.pPreserveAttachments = NULL; + + VkRenderPassCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + + desc.attachmentCount = 2; + desc.pAttachments = attachments; + + desc.subpassCount = 1; + desc.pSubpasses = &subpass; + + // Subpass dependencies + // Remember that the subpasses in a render pass automatically take care of + // image layout transitions. These transitions are controlled by subpass + // dependensies, which specify memory and execution dependencies between + // subpasses. Operations right before and right after this subpass also + // count as inplicit "subpasses". + + desc.dependencyCount = 0; + desc.pDependencies = NULL; + + VK_CHECK(qvkCreateRenderPass(device, &desc, NULL, &vk.render_pass)); +} + + + +void vk_createFrameBuffers(uint32_t w, uint32_t h) +{ + ri.Printf(PRINT_ALL, " Create RenderPass: vk.render_pass \n"); + // + // Renderpass. + // A render pass represents a collection of attachments, subpasses, + // and dependencies between the subpasses, and describes how the + // attachments are used over the course of the subpasses. The use + // of a render pass in a command buffer is a render pass instance. + // + // An attachment description describes the properties of an attachment + // including its format, sample count, and how its contents are treated + // at the beginning and end of each render pass instance. + // + // A subpass represents a phase of rendering that reads and writes + // a subset of the attachments in a render pass. Rendering commands + // are recorded into a particular subpass of a render pass instance. + // + // A subpass description describes the subset of attachments that + // is involved in the execution of a subpass. Each subpass can read + // from some attachments as input attachments, write to some as + // color attachments or depth/stencil attachments, and perform + // multisample resolve operations to resolve attachments. A subpass + // description can also include a set of preserve attachments, + // which are attachments that are not read or written by the subpass + // but whose contents must be preserved throughout the subpass. + // + // A subpass uses an attachment if the attachment is a color, + // depth/stencil, resolve, or input attachment for that subpass + // (as determined by the pColorAttachments, pDepthStencilAttachment, + // pResolveAttachments, and pInputAttachments members of + // VkSubpassDescription, respectively). A subpass does not use an + // attachment if that attachment is preserved by the subpass. + // The first use of an attachment is in the lowest numbered subpass + // that uses that attachment. Similarly, the last use of an attachment + // is in the highest numbered subpass that uses that attachment. + // + // The subpasses in a render pass all render to the same dimensions, + // and fragments for pixel (x,y,layer) in one subpass can only read + // attachment contents written by previous subpasses at that same + // (x,y,layer) location. + // + // By describing a complete set of subpasses in advance, render passes + // provide the implementation an opportunity to optimize the storage + // and transfer of attachment data between subpasses. In practice, + // this means that subpasses with a simple framebuffer-space dependency + // may be merged into a single tiled rendering pass, keeping the + // attachment data on-chip for the duration of a render pass instance. + // However, it is also quite common for a render pass to only contain + // a single subpass. + // + // Subpass dependencies describe execution and memory dependencies + // between subpasses. A subpass dependency chain is a sequence of + // subpass dependencies in a render pass, where the source subpass + // of each subpass dependency (after the first) equals the destination + // subpass of the previous dependency. + // + // Execution of subpasses may overlap or execute out of order with + // regards to other subpasses, unless otherwise enforced by an + // execution dependency. Each subpass only respects submission order + // for commands recorded in the same subpass, and the vkCmdBeginRenderPass + // and vkCmdEndRenderPass commands that delimit the render pass - + // commands within other subpasses are not included. This affects + // most other implicit ordering guarantees. + // + // A render pass describes the structure of subpasses and attachments + // independent of any specific image views for the attachments. + // The specific image views that will be used for the attachments, + // and their dimensions, are specified in VkFramebuffer objects. + // Framebuffers are created with respect to a specific render pass + // that the framebuffer is compatible with (see Render Pass Compatibility). + // Collectively, a render pass and a framebuffer define the complete + // render target state for one or more subpasses as well as the + // algorithmic dependencies between the subpasses. + // + // The various pipeline stages of the drawing commands for a given + // subpass may execute concurrently and/or out of order, both within + // and across drawing commands, whilst still respecting pipeline order. + // However for a given (x,y,layer,sample) sample location, certain + // per-sample operations are performed in rasterization order. + + vk_createRenderPass(vk.device); + + + // Framebuffers for each swapchain image. + // The attachments specified during render pass creation are bound + // by wrapping them into a VkFramebuffer object. A framebuffer object + // references all of the VkImageView objects that represent the attachments + // The image that we have to use as attachment depends on which image + // the the swap chain returns when we retrieve one for presentation + // this means that we have to create a framebuffer for all of the images + // in the swap chain and use the one that corresponds to the retrieved + // image at draw time. + + ri.Printf(PRINT_ALL, " Create vk.framebuffers \n"); + + // Render passes operate in conjunction with framebuffers. + // Framebuffers represent a collection of specific memory + // attachments that a render pass instance uses. + + uint32_t i; + for (i = 0; i < vk.swapchain_image_count; i++) + { + // set color and depth attachment + VkImageView attachments[2] = { + vk.swapchain_image_views[i], vk.depth_image_view }; + + VkFramebufferCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + + // renderPass is a render pass that defines what render + // passes the framebuffer will be compatible with. See + // Render Pass Compatibility for details. + desc.renderPass = vk.render_pass; + desc.attachmentCount = 2; + // pAttachments is an array of VkImageView handles, each + // of which will be used as the corresponding attachment + // in a render pass instance. + desc.pAttachments = attachments; + desc.width = w; + desc.height = h; + desc.layers = 1; + + VK_CHECK(qvkCreateFramebuffer(vk.device, &desc, NULL, &vk.framebuffers[i])); + + // Applications must ensure that all accesses to memory that backs + // image subresources used as attachments in a given renderpass instance + // either happen-before the load operations for those attachments, + // or happen-after the store operations for those attachments. + // + // For depth/stencil attachments, each aspect can be used separately + // as attachments and non-attachments as long as the non-attachment + // accesses are also via an image subresource in either the + // VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL layout + // or the VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL + // layout, and the attachment resource uses whichever of those two + // layouts the image accesses do not. Use of non-attachment aspects + // in this case is only well defined if the attachment is used in the + // subpass where the non-attachment access is being made, or the layout + // of the image subresource is constant throughout the entire render + // pass instance, including the initialLayout and finalLayout. + // + // These restrictions mean that the render pass has full knowledge of + // all uses of all of the attachments, so that the implementation is + // able to make correct decisions about when and how to perform layout + // transitions, when to overlap execution of subpasses, etc. + } +} + + +void vk_destroyFrameBuffers(void) +{ + // we should delete the framebuffers before the image views + // and the render pass that they are based on. + ri.Printf(PRINT_ALL, " Destroy vk.framebuffers vk.swapchain_image_views vk.swapchain\n"); + + uint32_t i; + for (i = 0; i < vk.swapchain_image_count; i++) + { + qvkDestroyFramebuffer(vk.device, vk.framebuffers[i], NULL); + qvkDestroyImageView(vk.device, vk.swapchain_image_views[i], NULL); + } + + qvkDestroySwapchainKHR(vk.device, vk.swapchain, NULL); + + qvkDestroyRenderPass(vk.device, vk.render_pass, NULL); +} + + +void vk_begin_frame(void) +{ + + // An application can acquire use of a presentable image with vkAcquireNextImageKHR. + // After acquiring a presentable image and before modifying it, the application must + // use a synchronization primitive to ensure that the presentation engine has + // finished reading from the image. The application can then transition the image's + // layout, queue rendering commands to it, etc. Finally, the application presents + // the image with vkQueuePresentKHR, which releases the acquisition of the image. + + // To acquire an available presentable image to use, and retrieve the index of + // that image If timeout is UINT64_MAX, the timeout period is treated as infinite, + // and vkAcquireNextImageKHR will block until an image is acquired or an error occurs. + + // An application must wait until either the semaphore or fence is signaled + // before accessing the image's data. + // + // VK_SUBOPTIMAL_KHR is a success code, meaning the swapchain can still be + // used to present but the surface properties no longer match exactly. + // This commonly happens on Wayland when the window is resized. + { + VkResult result = qvkAcquireNextImageKHR(vk.device, vk.swapchain, UINT64_MAX, + sema_imageAvailable, VK_NULL_HANDLE, &vk.idx_swapchain_image); + if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_ERROR_SURFACE_LOST_KHR) + { + qvkDeviceWaitIdle(vk.device); + vk_recreateSwapChain(); + result = qvkAcquireNextImageKHR(vk.device, vk.swapchain, UINT64_MAX, + sema_imageAvailable, VK_NULL_HANDLE, &vk.idx_swapchain_image); + } + // VK_SUBOPTIMAL_KHR is acceptable - swapchain is still usable + if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) + { + ri.Printf(PRINT_ALL, "Vulkan: error %s returned by qvkAcquireNextImageKHR\n", + cvtResToStr(result)); + } + } + + + // User could call method vkWaitForFences to wait for completion. A fence is a + // very heavyweight synchronization primitive as it requires the GPU to flush + // all caches at least, and potentially some additional synchronization. Due to + // those costs, fences should be used sparingly. In particular, try to group + // per-frame resources and track them together. To wait for one or more fences + // to enter the signaled state on the host, call qvkWaitForFences. + + // If the condition is satisfied when vkWaitForFences is called, then + // vkWaitForFences returns immediately. If the condition is not satisfied at + // the time vkWaitForFences is called, then vkWaitForFences will block and + // wait up to timeout nanoseconds for the condition to become satisfied. + + VK_CHECK(qvkWaitForFences(vk.device, 1, &fence_renderFinished, VK_FALSE, 1e9)); + + // To set the state of fences to unsignaled from the host + // "1" is the number of fences to reset. + // "fence_renderFinished" is the fence handle to reset. + VK_CHECK(qvkResetFences(vk.device, 1, &fence_renderFinished)); + + // commandBuffer must not be in the recording or pending state. + + // begin_info is an instance of the VkCommandBufferBeginInfo structure, + // which defines additional information about how the command buffer + // begins recording. + VkCommandBufferBeginInfo begin_info; + begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + begin_info.pNext = NULL; + // VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT specifies that + // each recording of the command buffer will only be submitted + // once, and the command buffer will be reset and recorded again + // between each submission. + begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; + begin_info.pInheritanceInfo = NULL; + + // To begin recording a command buffer + VK_CHECK(qvkBeginCommandBuffer(vk.command_buffer, &begin_info)); + + // Ensure visibility of geometry buffers writes. + + +{ + + // vkCmdPipelineBarrier is a synchronization command that inserts + // a dependency between commands submitted to the same queue, or + // between commands in the same subpass. When vkCmdPipelineBarrier + // is submitted to a queue, it defines a memory dependency between + // commands that were submitted before it, and those submitted + // after it. + VkBufferMemoryBarrier barrier; + barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; + barrier.pNext = NULL; + barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.buffer = vk_getIndexBuffer(); + barrier.offset = 0; + barrier.size = VK_WHOLE_SIZE; + + // If vkCmdPipelineBarrier was recorded outside a render pass instance, + // the first synchronization scope includes all commands that occur earlier + // in submission order. The second synchronization scope includes all + // commands that occur later in submission order. + // + + // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT specifies read access + // to a vertex buffer as part of a drawing command, bound by + // vkCmdBindVertexBuffers. + barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT; + + // To record a pipeline barrier + qvkCmdPipelineBarrier(vk.command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, NULL, 1, &barrier, 0, NULL); + + // VK_ACCESS_INDEX_READ_BIT specifies read access to an index buffer + // as part of an indexed drawing command, bound by vkCmdBindIndexBuffer. + barrier.dstAccessMask = VK_ACCESS_INDEX_READ_BIT; + qvkCmdPipelineBarrier(vk.command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, NULL, 1, &barrier, 0, NULL); + +} + + + // Begin render pass. + VkClearValue clear_values[2]; + /// ignore clear_values[0] which corresponds to color attachment + clear_values[1].depthStencil.depth = 1.0; + clear_values[1].depthStencil.stencil = 0; + + VkRenderPassBeginInfo renderPass_beginInfo; + renderPass_beginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; + renderPass_beginInfo.pNext = NULL; + renderPass_beginInfo.renderPass = vk.render_pass; + renderPass_beginInfo.framebuffer = vk.framebuffers[vk.idx_swapchain_image]; + + renderPass_beginInfo.renderArea = get_scissor_rect(); + + renderPass_beginInfo.clearValueCount = 2; + renderPass_beginInfo.pClearValues = clear_values; + + qvkCmdBeginRenderPass(vk.command_buffer, &renderPass_beginInfo, VK_SUBPASS_CONTENTS_INLINE); + +} + + +void vk_end_frame(void) +{ + qvkCmdEndRenderPass(vk.command_buffer); + + VK_CHECK(qvkEndCommandBuffer(vk.command_buffer)); + + + VkPipelineStageFlags wait_dst_stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + + // Queue submission and synchronization + VkSubmitInfo submit_info; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.pNext = NULL; + submit_info.waitSemaphoreCount = 1; + submit_info.pWaitSemaphores = &sema_imageAvailable; + submit_info.pWaitDstStageMask = &wait_dst_stage_mask; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &vk.command_buffer; + submit_info.signalSemaphoreCount = 1; + // specify which semaphones to signal once the command buffers + // have finished execution + submit_info.pSignalSemaphores = &sema_renderFinished; + + + // queue is the queue that the command buffers will be submitted to. + // 1 is the number of elements in the pSubmits array. + // pSubmits is a pointer to an array of VkSubmitInfo structures, + // each specifying a command buffer submission batch. + // + // fence_renderFinished is an optional handle to a fence to be signaled + // once all submitted command buffers have completed execution. + // If fence is not VK_NULL_HANDLE, it defines a fence signal operation. + // + // Submission can be a high overhead operation, and applications should + // attempt to batch work together into as few calls to vkQueueSubmit as possible. + // + // vkQueueSubmit is a queue submission command, with each batch defined + // by an element of pSubmits as an instance of the VkSubmitInfo structure. + // Batches begin execution in the order they appear in pSubmits, but may + // complete out of order. + // + // Fence and semaphore operations submitted with vkQueueSubmit + // have additional ordering constraints compared to other + // submission commands, with dependencies involving previous and + // subsequent queue operations. + // + // The order that batches appear in pSubmits is used to determine + // submission order, and thus all the implicit ordering guarantees + // that respect it. Other than these implicit ordering guarantees + // and any explicit synchronization primitives, these batches may + // overlap or otherwise execute out of order. If any command buffer + // submitted to this queue is in the executable state, it is moved + // to the pending state. Once execution of all submissions of a + // command buffer complete, it moves from the pending state, + // back to the executable state. + // + // If a command buffer was recorded with the + // VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT flag, + // it instead moves back to the invalid state. + + // To submit command buffers to a queue + + VK_CHECK(qvkQueueSubmit(vk.queue, 1, &submit_info, fence_renderFinished)); + + VkPresentInfoKHR present_info; + present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; + present_info.pNext = NULL; + present_info.waitSemaphoreCount = 1; + present_info.pWaitSemaphores = &sema_renderFinished; + + // specify the swap chains to present images to + present_info.swapchainCount = 1; + present_info.pSwapchains = &vk.swapchain; + // specify the index of the image for each swap chain + present_info.pImageIndices = &vk.idx_swapchain_image; + present_info.pResults = NULL; + + // Each element of pSwapchains member of pPresentInfo must be a + // swapchain that is created for a surface for which presentation + // is supported from queue as determined using a call to + // vkGetPhysicalDeviceSurfaceSupportKHR + + + // After queueing all rendering commands and transitioning the + // image to the correct layout, to queue an image for presentation. + // queue is a queue that is capable of presentation to the target + // surface's platform on the same device as the image's swapchain. + VkResult result = qvkQueuePresentKHR(vk.queue, &present_info); + // VK_SUCCESS and VK_SUBOPTIMAL_KHR are both acceptable results. + // VK_SUBOPTIMAL_KHR means the swapchain can still be used but doesn't + // match the surface properties exactly (common on Wayland). + if(result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) + { + return; + } + else if( (result == VK_ERROR_OUT_OF_DATE_KHR) || (result == VK_ERROR_SURFACE_LOST_KHR)) + { + // we first call vkDeviceWaitIdle because we + // shouldn't touch resources that still be in use + qvkDeviceWaitIdle(vk.device); + // recreate the objects that depend on the swap chain and the window size + + vk_recreateSwapChain(); + } +} + diff --git a/code/renderer_vulkan/vk_frame.h b/code/renderer_vulkan/vk_frame.h new file mode 100644 index 0000000000..3591610205 --- /dev/null +++ b/code/renderer_vulkan/vk_frame.h @@ -0,0 +1,16 @@ +#ifndef VK_FRAME_H_ +#define VK_FRAME_H_ + +void vk_begin_frame(void); +void vk_end_frame(void); + + +void vk_createFrameBuffers(uint32_t w, uint32_t h); +void vk_destroyFrameBuffers(void); + +void vk_createSwapChain(VkDevice device, VkSurfaceKHR surface, VkSurfaceFormatKHR surface_format); + +void vk_create_sync_primitives(void); +void vk_destroy_sync_primitives(void); + +#endif diff --git a/code/renderer_vulkan/vk_image.c b/code/renderer_vulkan/vk_image.c new file mode 100644 index 0000000000..d0cd19baa6 --- /dev/null +++ b/code/renderer_vulkan/vk_image.c @@ -0,0 +1,1051 @@ +#include "tr_local.h" +#include "vk_image_sampler.h" +#include "R_ImageProcess.h" +#include "vk_image.h" +#include "vk_cmd.h" +#include "vk_instance.h" +#include "tr_globals.h" +#include "tr_cvar.h" +#include "tr_fog.h" + + +#define IMAGE_CHUNK_SIZE (64 * 1024 * 1024) + + + + +struct StagingBuffer_t +{ + // Vulkan supports two primary resource types: buffers and images. + // Resources are views of memory with associated formatting and dimensionality. + // Buffers are essentially unformatted arrays of bytes whereas images contain + // format information, can be multidimensional and may have associated metadata. + // + // Buffers represent linear arrays of data which are used for various purposes + // by binding them to a graphics or compute pipeline via descriptor sets or via + // certain commands, or by directly specifying them as parameters to certain commands. + VkBuffer buff; + // Host visible memory used to copy image data to device local memory. + VkDeviceMemory mappableMem; +}; + +struct ImageChunk_t { + VkDeviceMemory block; + uint32_t Used; + // uint32_t typeIndex; +}; + + +struct deviceLocalMemory_t { + // One large device device local memory allocation, assigned to multiple images + struct ImageChunk_t Chunks[8]; + uint32_t Index; // number of chunks used +}; + +static struct StagingBuffer_t StagBuf; +static struct deviceLocalMemory_t devMemImg; + +void gpuMemUsageInfo_f(void) +{ + // approm for debug info + ri.Printf(PRINT_ALL, "Number of image: %d chuck memory(device local) used: %d M \n", + tr.numImages, devMemImg.Index * (IMAGE_CHUNK_SIZE>>20) ); +} + + +//////////////////////////////////////// + +uint32_t find_memory_type(uint32_t memory_type_bits, VkMemoryPropertyFlags properties) +{ + uint32_t i; + for (i = 0; i < vk.devMemProperties.memoryTypeCount; i++) + { + if ( ((memory_type_bits & (1 << i)) != 0) && (vk.devMemProperties.memoryTypes[i].propertyFlags & properties) == properties) + { + return i; + } + } + + ri.Error(ERR_FATAL, "Vulkan: failed to find matching memory type with requested properties"); + return -1; +} + + +static void vk_createStagingBuffer(uint32_t size) +{ + + memset(&StagBuf, 0, sizeof(StagBuf)); + + + ri.Printf(PRINT_ALL, " Create Staging Buffer: %d\n", size); + + { + VkBufferCreateInfo buffer_desc; + buffer_desc.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + buffer_desc.pNext = NULL; + // flags is a bitmask of VkBufferCreateFlagBits specifying additional parameters of the buffer. + buffer_desc.flags = 0; + buffer_desc.size = size; + // VK_BUFFER_USAGE_TRANSFER_SRC_BIT specifies that the buffer + // can be used as the source of a transfer command + buffer_desc.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + // sharingMode is a VkSharingMode value specifying the sharing mode of the buffer + // when it will be accessed by multiple queue families. + buffer_desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + // queueFamilyIndexCount is the number of entries in the pQueueFamilyIndices array. + buffer_desc.queueFamilyIndexCount = 0; + // pQueueFamilyIndices is a list of queue families that will access this buffer + // (ignored if sharingMode is not VK_SHARING_MODE_CONCURRENT). + buffer_desc.pQueueFamilyIndices = NULL; + + VK_CHECK(qvkCreateBuffer(vk.device, &buffer_desc, NULL, &StagBuf.buff)); + } + + // To determine the memory requirements for a buffer resource + + // typedef struct VkMemoryRequirements { + // VkDeviceSize size; + // VkDeviceSize alignment; + // uint32_t memoryTypeBits; + // } VkMemoryRequirements; + + { + VkMemoryRequirements memory_requirements; + qvkGetBufferMemoryRequirements(vk.device, StagBuf.buff, &memory_requirements); + + VkMemoryAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.allocationSize = memory_requirements.size; + alloc_info.memoryTypeIndex = find_memory_type(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + VK_CHECK(qvkAllocateMemory(vk.device, &alloc_info, NULL, &StagBuf.mappableMem)); + + VK_CHECK(qvkBindBufferMemory(vk.device, StagBuf.buff, StagBuf.mappableMem, 0)); + + ri.Printf(PRINT_ALL, " Stagging buffer alignment: %llu, memoryTypeBits: 0x%x, Type Index: %d. \n", + (unsigned long long)memory_requirements.alignment, memory_requirements.memoryTypeBits, alloc_info.memoryTypeIndex); + } +} + + +static void vk_destroy_staging_buffer(void) +{ + ri.Printf(PRINT_ALL, " Destroy staging buffer. \n"); + + if (StagBuf.buff != VK_NULL_HANDLE) + { + qvkDestroyBuffer(vk.device, StagBuf.buff, NULL); + StagBuf.buff = VK_NULL_HANDLE; + } + + if (StagBuf.mappableMem != VK_NULL_HANDLE) + { + qvkFreeMemory(vk.device, StagBuf.mappableMem, NULL); + StagBuf.mappableMem = VK_NULL_HANDLE; + } + + memset(&StagBuf, 0, sizeof(StagBuf)); +} + + +static void vk_stagBufferToDeviceLocalMem(VkImage image, VkBufferImageCopy* pRegion, uint32_t num_region) +{ + // An application can copy buffer and image data using several methods + // depending on the type of data transfer. Data can be copied between + // buffer objects with vkCmdCopyBuffer and a portion of an image can + // be copied to another image with vkCmdCopyImage. + // + // Image data can also be copied to and from buffer memory using + // vkCmdCopyImageToBuffer and vkCmdCopyBufferToImage. + // + // Image data can be blitted (with or without scaling and filtering) + // with vkCmdBlitImage. Multisampled images can be resolved to a + // non-multisampled image with vkCmdResolveImage. + // + VkCommandBuffer cmd_buf; + { + VkCommandBufferAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.commandPool = vk.command_pool; + alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; + alloc_info.commandBufferCount = 1; + VK_CHECK(qvkAllocateCommandBuffers(vk.device, &alloc_info, &cmd_buf)); + + VkCommandBufferBeginInfo begin_info; + begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + begin_info.pNext = NULL; + begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; + begin_info.pInheritanceInfo = NULL; + VK_CHECK(qvkBeginCommandBuffer(cmd_buf, &begin_info)); + } + + + VkBufferMemoryBarrier barrier; + barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; + barrier.pNext = NULL; + barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; + barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + barrier.buffer = StagBuf.buff; + barrier.offset = 0; + barrier.size = VK_WHOLE_SIZE; + + qvkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 1, &barrier, 0, NULL); + + record_image_layout_transition(cmd_buf, image, VK_IMAGE_ASPECT_COLOR_BIT, + 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_ACCESS_TRANSFER_WRITE_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + + + // To copy data from a buffer object to an image object + + // cmd_buf is the command buffer into which the command will be recorded. + // StagBuf.buff is the source buffer. + // image is the destination image. + // dstImageLayout is the layout of the destination image subresources. + // curLevel is the number of regions to copy. + // pRegions is a pointer to an array of VkBufferImageCopy structures + // specifying the regions to copy. + qvkCmdCopyBufferToImage(cmd_buf, StagBuf.buff, image, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, num_region, pRegion); + + record_image_layout_transition(cmd_buf, image, + VK_IMAGE_ASPECT_COLOR_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_SHADER_READ_BIT, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + + + VK_CHECK(qvkEndCommandBuffer(cmd_buf)); + + VkSubmitInfo submit_info; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.pNext = NULL; + submit_info.waitSemaphoreCount = 0; + submit_info.pWaitSemaphores = NULL; + submit_info.pWaitDstStageMask = NULL; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &cmd_buf; + submit_info.signalSemaphoreCount = 0; + submit_info.pSignalSemaphores = NULL; + + VK_CHECK(qvkQueueSubmit(vk.queue, 1, &submit_info, VK_NULL_HANDLE)); + VK_CHECK(qvkQueueWaitIdle(vk.queue)); + + qvkFreeCommandBuffers(vk.device, vk.command_pool, 1, &cmd_buf); +} + + +#define FILE_HASH_SIZE 1024 +static image_t* hashTable[FILE_HASH_SIZE]; + +static int generateHashValue( const char *fname ) +{ + uint32_t i = 0; + int hash = 0; + + while (fname[i] != '\0') { + char letter = tolower(fname[i]); + if (letter =='.') break; // don't include extension + if (letter =='\\') letter = '/'; // damn path names + hash+=(long)(letter)*(i+119); + i++; + } + hash &= (FILE_HASH_SIZE-1); + return hash; +} + + +/* +================ +This is the only way any image_t are created +================ +*/ +static void vk_createImageAndBindWithMemory(image_t* pImg) +{ + + VkImageCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.imageType = VK_IMAGE_TYPE_2D; + desc.format = VK_FORMAT_R8G8B8A8_UNORM; + desc.extent.width = pImg->uploadWidth; + desc.extent.height = pImg->uploadHeight; + desc.extent.depth = 1; + desc.mipLevels = pImg->mipLevels; + desc.arrayLayers = 1; + desc.samples = VK_SAMPLE_COUNT_1_BIT; + desc.tiling = VK_IMAGE_TILING_OPTIMAL; + desc.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + desc.queueFamilyIndexCount = 0; + desc.pQueueFamilyIndices = NULL; + desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + + VK_CHECK(qvkCreateImage(vk.device, &desc, NULL, &pImg->handle)); + + // ======================================================= + // Binding it with device local memory + // ======================================================= + VkMemoryRequirements memory_requirements; + qvkGetImageMemoryRequirements(vk.device, pImg->handle, &memory_requirements); + + // ensure that memory region has proper alignment + uint32_t mask = (memory_requirements.alignment - 1); + + + uint32_t i = 0; + for (i = 0; i < devMemImg.Index; i++) + { + // ensure that memory region has proper alignment + VkDeviceSize offset_aligned = (devMemImg.Chunks[i].Used + mask) & (~mask); + VkDeviceSize end = offset_aligned + memory_requirements.size; + if (end <= IMAGE_CHUNK_SIZE) + { + VK_CHECK(qvkBindImageMemory(vk.device, pImg->handle, + devMemImg.Chunks[i].block, offset_aligned)); + + devMemImg.Chunks[i].Used = end; + return; + } + } + + // Couldn't find suitable in existing chunk. + // Allocate a new chunk + + VkMemoryAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.allocationSize = IMAGE_CHUNK_SIZE; + alloc_info.memoryTypeIndex = find_memory_type(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); + + VkDeviceMemory memory; + VK_CHECK(qvkAllocateMemory(vk.device, &alloc_info, NULL, &memory)); + VK_CHECK(qvkBindImageMemory(vk.device, pImg->handle, memory, 0)); + + devMemImg.Chunks[devMemImg.Index].block = memory; + devMemImg.Chunks[devMemImg.Index].Used = memory_requirements.size; + ++devMemImg.Index; + + + ri.Printf(PRINT_ALL, " --- Device memory allocation --- \n"); + + ri.Printf(PRINT_ALL, "alignment: %llu, Type Index: %d. \n", + (unsigned long long)memory_requirements.alignment, alloc_info.memoryTypeIndex); + + ri.Printf(PRINT_ALL, "Image chuck memory consumed: %d M \n", + devMemImg.Index * (IMAGE_CHUNK_SIZE >> 20) ); + + ri.Printf(PRINT_ALL, " --- ------------------------ --- \n"); +} + + + +static void vk_createImageViewAndDescriptorSet(image_t* pImage) +{ + + VkImageView imageView; + + VkImageViewCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.image = pImage->handle; + desc.viewType = VK_IMAGE_VIEW_TYPE_2D; + // format is a VkFormat describing the format and type used + // to interpret data elements in the image. + desc.format = VK_FORMAT_R8G8B8A8_UNORM; + + // the components field allows you to swizzle the color channels around + desc.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.g = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.a = VK_COMPONENT_SWIZZLE_IDENTITY; + + // The subresourceRange field describes what the image's purpose is + // and which part of the image should be accessed. + // + // selecting the set of mipmap levels and array layers to be accessible to the view. + desc.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + desc.subresourceRange.baseMipLevel = 0; + desc.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS; + desc.subresourceRange.baseArrayLayer = 0; + desc.subresourceRange.layerCount = 1; + // Image objects are not directly accessed by pipeline shaders for reading or writing image data. + // Instead, image views representing contiguous ranges of the image subresources and containing + // additional metadata are used for that purpose. Views must be created on images of compatible + // types, and must represent a valid subset of image subresources. + // + // Some of the image creation parameters are inherited by the view. In particular, image view + // creation inherits the implicit parameter usage specifying the allowed usages of the image + // view that, by default, takes the value of the corresponding usage parameter specified in + // VkImageCreateInfo at image creation time. + // + // This implicit parameter can be overriden by chaining a VkImageViewUsageCreateInfo structure + // through the pNext member to VkImageViewCreateInfo. + VK_CHECK(qvkCreateImageView(vk.device, &desc, NULL, &imageView)); + + ///// save it just for destroy ??? + pImage->view = imageView; + + + /////////////////////////////////////////////////////// + // create associated descriptor set + /////////////////////////////////////////////////////// + // Allocate a descriptor set from the pool. + // Note that we have to provide the descriptor set layout that + // we defined in the pipeline_layout sample. + // This layout describes how the descriptor set is to be allocated. + + VkDescriptorSet desSet; + + VkDescriptorSetAllocateInfo descSetAllocInfo; + descSetAllocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + descSetAllocInfo.pNext = NULL; + descSetAllocInfo.descriptorPool = vk.descriptor_pool; + descSetAllocInfo.descriptorSetCount = 1; + descSetAllocInfo.pSetLayouts = &vk.set_layout; + + VK_CHECK(qvkAllocateDescriptorSets(vk.device, &descSetAllocInfo, &desSet)); + + ///// save it for destroy and update current descriptor + pImage->descriptor_set = desSet; + + //ri.Printf(PRINT_ALL, " Allocate Descriptor Sets \n"); + VkWriteDescriptorSet descriptor_write; + + VkDescriptorImageInfo image_info; + image_info.sampler = vk_find_sampler(pImage->mipmap, pImage->wrapClampMode == GL_REPEAT); + image_info.imageView = imageView; + image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + + descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descriptor_write.dstSet = desSet; + descriptor_write.dstBinding = 0; + descriptor_write.dstArrayElement = 0; + descriptor_write.descriptorCount = 1; + descriptor_write.pNext = NULL; + descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + descriptor_write.pImageInfo = &image_info; + descriptor_write.pBufferInfo = NULL; + descriptor_write.pTexelBufferView = NULL; + + + qvkUpdateDescriptorSets(vk.device, 1, &descriptor_write, 0, NULL); + + // The above steps essentially copy the VkDescriptorBufferInfo + // to the descriptor, which is likely in the device memory. + // + // This buffer info includes the handle to the uniform buffer + // as well as the offset and size of the data that is accessed + // in the uniform buffer. In this case, the uniform buffer + // contains only the MVP transform, so the offset is 0 and + // the size is the size of the MVP. +} + + + +image_t* R_CreateImage( const char *name, unsigned char* pic, const uint32_t width, const uint32_t height, + VkBool32 isMipMap, VkBool32 allowPicmip, int glWrapClampMode) +{ + if (strlen(name) >= MAX_QPATH ) { + ri.Error (ERR_DROP, "CreateImage: \"%s\" is too long\n", name); + } + + + ri.Printf( PRINT_ALL, " Create Image: %s\n", name); + + // Create image_t object. + + image_t* pImage = (image_t*) ri.Hunk_Alloc( sizeof( image_t ), h_low ); + + strncpy (pImage->imgName, name, sizeof(pImage->imgName)); + pImage->index = tr.numImages; + pImage->mipmap = isMipMap; + pImage->mipLevels = 1; + pImage->allowPicmip = allowPicmip; + pImage->wrapClampMode = glWrapClampMode; + pImage->width = width; + pImage->height = height; + pImage->isLightmap = (strncmp(name, "*lightmap", 9) == 0); + // Create corresponding GPU resource, lightmaps are always allocated on TMU 1 . + // A texture mapping unit (TMU) is a component in modern graphics processing units (GPUs). + // Historically it was a separate physical processor. A TMU is able to rotate, resize, + // and distort a bitmap image (performing texture sampling), to be placed onto an arbitrary + // plane of a given 3D model as a texture. This process is called texture mapping. + // In modern graphics cards it is implemented as a discrete stage in a graphics pipeline, + // whereas when first introduced it was implemented as a separate processor, + // e.g. as seen on the Voodoo2 graphics card. + // + // The TMU came about due to the compute demands of sampling and transforming a flat + // image (as the texture map) to the correct angle and perspective it would need to + // be in 3D space. The compute operation is a large matrix multiply, + // which CPUs of the time (early Pentiums) could not cope with at acceptable performance. + // + // Today (2013), TMUs are part of the shader pipeline and decoupled from the + // Render Output Pipelines (ROPs). For example, in AMD's Cypress GPU, + // each shader pipeline (of which there are 20) has four TMUs, giving the GPU 80 TMUs. + // This is done by chip designers to closely couple shaders and the texture engines + // they will be working with. + // + // 3D scenes are generally composed of two things: 3D geometry, and the textures + // that cover that geometry. Texture units in a video card take a texture and 'map' it + // to a piece of geometry. That is, they wrap the texture around the geometry and + // produce textured pixels which can then be written to the screen. + // + // Textures can be an actual image, a lightmap, or even normal maps for advanced + // surface lighting effects. + + + // convert to exact power of 2 sizes + // GetScaledDimension(width, height, &pImage->uploadWidth, &pImage->uploadHeight, allowPicmip); + + const unsigned int max_texture_size = 2048; + + unsigned int scaled_width, scaled_height; + + for(scaled_width = max_texture_size; scaled_width > width; scaled_width>>=1) + ; + + for (scaled_height = max_texture_size; scaled_height > height; scaled_height>>=1) + ; + + + if ( allowPicmip ) + { + scaled_width >>= r_picmip->integer; + scaled_height >>= r_picmip->integer; + } + + pImage->uploadWidth = scaled_width; + pImage->uploadHeight = scaled_height; + + uint32_t buffer_size = 4 * pImage->uploadWidth * pImage->uploadHeight; + unsigned char * const pUploadBuffer = (unsigned char*) malloc ( 2 * buffer_size); + + if ((scaled_width != width) || (scaled_height != height) ) + { + // just info + // ri.Printf( PRINT_WARNING, "ResampleTexture: inwidth: %d, inheight: %d, outwidth: %d, outheight: %d\n", + // width, height, scaled_width, scaled_height ); + + //go down from [width, height] to [scaled_width, scaled_height] + ResampleTexture (pUploadBuffer, width, height, pic, scaled_width, scaled_height); + } + else + { + memcpy(pUploadBuffer, pic, buffer_size); + } + + + // perform optional picmip operation + + + //////////////////////////////////////////////////////////////////// + // 2^12 = 4096 + // The set of all bytes bound to all the source regions must not overlap + // the set of all bytes bound to the destination regions. + // + // The set of all bytes bound to each destination region must not overlap + // the set of all bytes bound to another destination region. + + VkBufferImageCopy regions[12]; + + regions[0].bufferOffset = 0; + regions[0].bufferRowLength = 0; + regions[0].bufferImageHeight = 0; + regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + regions[0].imageSubresource.mipLevel = 0; + regions[0].imageSubresource.baseArrayLayer = 0; + regions[0].imageSubresource.layerCount = 1; + regions[0].imageOffset.x = 0; + regions[0].imageOffset.y = 0; + regions[0].imageOffset.z = 0; + regions[0].imageExtent.width = pImage->uploadWidth; + regions[0].imageExtent.height = pImage->uploadHeight; + regions[0].imageExtent.depth = 1; + + + if(isMipMap) + { + uint32_t curMipMapLevel = 1; + uint32_t base_width = pImage->uploadWidth; + uint32_t base_height = pImage->uploadHeight; + + unsigned char* in_ptr = pUploadBuffer; + unsigned char* dst_ptr = in_ptr + buffer_size; + + R_LightScaleTexture(pUploadBuffer, pUploadBuffer, buffer_size); + + // Use the normal mip-mapping to go down from [scaled_width, scaled_height] to [1,1] dimensions. + + while (1) + { + + if ( r_simpleMipMaps->integer ) + { + R_MipMap(in_ptr, base_width, base_height, dst_ptr); + } + else + { + R_MipMap2(in_ptr, base_width, base_height, dst_ptr); + } + + + if ((base_width == 1) && (base_height == 1)) + break; + + base_width >>= 1; + if (base_width == 0) + base_width = 1; + + base_height >>= 1; + if (base_height == 0) + base_height = 1; + + regions[curMipMapLevel].bufferOffset = buffer_size; + regions[curMipMapLevel].bufferRowLength = 0; + regions[curMipMapLevel].bufferImageHeight = 0; + regions[curMipMapLevel].imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + regions[curMipMapLevel].imageSubresource.mipLevel = curMipMapLevel; + regions[curMipMapLevel].imageSubresource.baseArrayLayer = 0; + regions[curMipMapLevel].imageSubresource.layerCount = 1; + regions[curMipMapLevel].imageOffset.x = 0; + regions[curMipMapLevel].imageOffset.y = 0; + regions[curMipMapLevel].imageOffset.z = 0; + + regions[curMipMapLevel].imageExtent.width = base_width; + regions[curMipMapLevel].imageExtent.height = base_height; + regions[curMipMapLevel].imageExtent.depth = 1; + + + uint32_t curLevelSize = base_width * base_height * 4; + + buffer_size += curLevelSize; + + // Regions must not extend outside the bounds of the buffer or image level, + // except that regions of compressed images can extend as far as the + // dimension of the image level rounded up to a complete compressed texel block. + + assert(buffer_size <= IMAGE_CHUNK_SIZE); + + if ( r_colorMipLevels->integer ) { + R_BlendOverTexture( in_ptr, base_width * base_height, curMipMapLevel ); + } + + + ++curMipMapLevel; + + in_ptr = dst_ptr; + dst_ptr += curLevelSize; + } + pImage->mipLevels = curMipMapLevel; + // ri.Printf( PRINT_WARNING, "curMipMapLevel: %d, base_width: %d, base_height: %d, buffer_size: %d, name: %s\n", + // curMipMapLevel, scaled_width, scaled_height, buffer_size, name); + } + + + vk_createImageAndBindWithMemory(pImage); + vk_createImageViewAndDescriptorSet(pImage); + + + void* data; + VK_CHECK(qvkMapMemory(vk.device, StagBuf.mappableMem, 0, VK_WHOLE_SIZE, 0, &data)); + memcpy(data, pUploadBuffer, buffer_size); + qvkUnmapMemory(vk.device, StagBuf.mappableMem); + + free(pUploadBuffer); + + vk_stagBufferToDeviceLocalMem(pImage->handle, regions, pImage->mipLevels); + + const int hash = generateHashValue(name); + pImage->next = hashTable[hash]; + hashTable[hash] = pImage; + + tr.images[tr.numImages] = pImage; + if ( ++tr.numImages == MAX_DRAWIMAGES ) + { + ri.Error( ERR_DROP, "CreateImage: MAX_DRAWIMAGES hit\n"); + } + + return pImage; +} + + + +image_t* R_FindImageFile(const char *name, VkBool32 mipmap, VkBool32 allowPicmip, int glWrapClampMode) +{ + image_t* image; + + if (name == NULL) + { + ri.Printf( PRINT_WARNING, "Find Image File: NULL\n"); + return NULL; + } + // ri.Printf( PRINT_WARNING, "Find Image File: %s\n", name); + + int hash = generateHashValue(name); + + // see if the image is already loaded + + for (image=hashTable[hash]; image; image=image->next) + { + if ( !strcmp( name, image->imgName ) ) + { + // the white image can be used with any set of parms, + // but other mismatches are errors + if ( strcmp( name, "*white" ) ) + { + if ( image->mipmap != mipmap ) { + ri.Printf( PRINT_WARNING, "WARNING: reused image %s with mixed mipmap parm\n", name ); + } + if ( image->allowPicmip != allowPicmip ) { + ri.Printf( PRINT_WARNING, "WARNING: reused image %s with mixed allowPicmip parm\n", name ); + } + if ( image->wrapClampMode != glWrapClampMode ) { + ri.Printf( PRINT_WARNING, "WARNING: reused image %s with mixed glWrapClampMode parm\n", name ); + } + } + return image; + } + } + + // + // load the pic from disk + // + uint32_t width = 0, height = 0; + unsigned char* pic = NULL; + + if(r_loadImgAPI->integer) + R_LoadImage2( name, &pic, &width, &height ); + else + R_LoadImage( name, &pic, &width, &height ); + + + if (pic == NULL) + { + ri.Printf( PRINT_WARNING, "R_FindImageFile: Fail loading %s the from disk\n", name); + return NULL; + } + + image = R_CreateImage( name, pic, width, height, mipmap, allowPicmip, glWrapClampMode); + + ri.Free( pic ); + + return image; +} + + + +void RE_UploadCinematic (int w, int h, int cols, int rows, const unsigned char *data, int client, VkBool32 dirty) +{ + + image_t* prtImage = tr.scratchImage[client]; + + // if the scratchImage isn't in the format we want, specify it as a new texture + if ( (cols != prtImage->uploadWidth) || (rows != prtImage->uploadHeight) ) + { + ri.Printf(PRINT_ALL, "w=%d, h=%d, cols=%d, rows=%d, client=%d, prtImage->width=%d, prtImage->height=%d\n", + w, h, cols, rows, client, prtImage->uploadWidth, prtImage->uploadHeight); + + prtImage->uploadWidth = cols; + prtImage->uploadHeight = rows; + prtImage->mipLevels = 1; + // VULKAN + + qvkDestroyImage(vk.device, prtImage->handle, NULL); + qvkDestroyImageView(vk.device, prtImage->view, NULL); + qvkFreeDescriptorSets(vk.device, vk.descriptor_pool, 1, &prtImage->descriptor_set); + + vk_createImageAndBindWithMemory(prtImage); + + vk_createImageViewAndDescriptorSet(prtImage); + + + VkBufferImageCopy region; + region.bufferOffset = 0; + region.bufferRowLength = 0; + region.bufferImageHeight = 0; + region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + region.imageSubresource.mipLevel = 0; + region.imageSubresource.baseArrayLayer = 0; + region.imageSubresource.layerCount = 1; + region.imageOffset.x = 0; + region.imageOffset.y = 0; + region.imageOffset.z = 0; + region.imageExtent.width = cols; + region.imageExtent.height = rows; + region.imageExtent.depth = 1; + + const uint32_t buffer_size = cols * rows * 4; + + void* pDat; + VK_CHECK(qvkMapMemory(vk.device, StagBuf.mappableMem, 0, VK_WHOLE_SIZE, 0, &pDat)); + memcpy(pDat, data, buffer_size); + qvkUnmapMemory(vk.device, StagBuf.mappableMem); + + vk_stagBufferToDeviceLocalMem(tr.scratchImage[client]->handle, ®ion, 1); + } + else if (dirty) + { + // otherwise, just subimage upload it so that + // drivers can tell we are going to be changing + // it and don't try and do a texture compression + // vk_uploadSingleImage(prtImage->handle, cols, rows, data); + + VkBufferImageCopy region; + region.bufferOffset = 0; + region.bufferRowLength = 0; + region.bufferImageHeight = 0; + region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + region.imageSubresource.mipLevel = 0; + region.imageSubresource.baseArrayLayer = 0; + region.imageSubresource.layerCount = 1; + region.imageOffset.x = 0; + region.imageOffset.y = 0; + region.imageOffset.z = 0; + region.imageExtent.width = cols; + region.imageExtent.height = rows; + region.imageExtent.depth = 1; + + const uint32_t buffer_size = cols * rows * 4; + + void* pDat; + VK_CHECK(qvkMapMemory(vk.device, StagBuf.mappableMem, 0, VK_WHOLE_SIZE, 0, &pDat)); + memcpy(pDat, data, buffer_size); + qvkUnmapMemory(vk.device, StagBuf.mappableMem); + + vk_stagBufferToDeviceLocalMem(tr.scratchImage[client]->handle, ®ion, 1); + } +} + + +static void R_CreateDefaultImage( void ) +{ + #define DEFAULT_SIZE 16 + + unsigned char data[DEFAULT_SIZE][DEFAULT_SIZE][4]; + + // the default image will be a box, to allow you to see the mapping coordinates + memset( data, 32, sizeof( data ) ); + + uint32_t x; + for ( x = 0; x < DEFAULT_SIZE; x++ ) + { + data[0][x][0] = + data[0][x][1] = + data[0][x][2] = + data[0][x][3] = 255; + + data[x][0][0] = + data[x][0][1] = + data[x][0][2] = + data[x][0][3] = 255; + + data[DEFAULT_SIZE-1][x][0] = + data[DEFAULT_SIZE-1][x][1] = + data[DEFAULT_SIZE-1][x][2] = + data[DEFAULT_SIZE-1][x][3] = 255; + + data[x][DEFAULT_SIZE-1][0] = + data[x][DEFAULT_SIZE-1][1] = + data[x][DEFAULT_SIZE-1][2] = + data[x][DEFAULT_SIZE-1][3] = 255; + } + tr.defaultImage = R_CreateImage("*default", (unsigned char *)data, DEFAULT_SIZE, DEFAULT_SIZE, qtrue, qfalse, GL_REPEAT); +} + + + +static void R_CreateWhiteImage(void) +{ + // we use a solid white image instead of disabling texturing + unsigned char data[DEFAULT_SIZE][DEFAULT_SIZE][4]; + memset( data, 255, sizeof( data ) ); + tr.whiteImage = R_CreateImage("*white", (unsigned char *)data, 8, 8, qfalse, qfalse, GL_REPEAT); +} + + +static void R_CreateIdentityLightImage(void) +{ + uint32_t x,y; + unsigned char data[DEFAULT_SIZE][DEFAULT_SIZE][4]; + + // with overbright bits active, we need an image which is some fraction of full color, + // for default lightmaps, etc + for (x=0 ; x 255) { + b = 255; + } else if ( b < 75 ) { + b = 0; + } + + data[y][x][0] = + data[y][x][1] = + data[y][x][2] = b; + data[y][x][3] = 255; + } + } + tr.dlightImage = R_CreateImage("*dlight", (unsigned char *)data, DLIGHT_SIZE, DLIGHT_SIZE, qfalse, qfalse, GL_CLAMP); +} + + +static void R_CreateFogImage( void ) +{ + #define FOG_S 256 + #define FOG_T 32 + + unsigned int x,y; + + unsigned char* const data = (unsigned char*) malloc( FOG_S * FOG_T * 4 ); + + // S is distance, T is depth + for (x=0 ; ximgName); + if(pImg->descriptor_set != VK_NULL_HANDLE) + { + //To free allocated descriptor sets + qvkFreeDescriptorSets(vk.device, vk.descriptor_pool, 1, &pImg->descriptor_set); + pImg->descriptor_set = VK_NULL_HANDLE; + } + + if (pImg->handle != VK_NULL_HANDLE) + { + qvkDestroyImageView(vk.device, pImg->view, NULL); + qvkDestroyImage(vk.device, pImg->handle, NULL); + pImg->handle = VK_NULL_HANDLE; + } +} + + +void vk_destroyImageRes(void) +{ + vk_free_sampler(); + + uint32_t i = 0; + + for (i = 0; i < tr.numImages; i++) + { + vk_destroySingleImage(tr.images[i]); + } + + for (i = 0; i < devMemImg.Index; i++) + { + qvkFreeMemory(vk.device, devMemImg.Chunks[i].block, NULL); + devMemImg.Chunks[i].Used = 0; + } + + devMemImg.Index = 0; + + + vk_destroy_staging_buffer(); + // Destroying a pool object implicitly frees all objects allocated from that pool. + // Specifically, destroying VkCommandPool frees all VkCommandBuffer objects that + // were allocated from it, and destroying VkDescriptorPool frees all + // VkDescriptorSet objects that were allocated from it. + VK_CHECK(qvkResetDescriptorPool(vk.device, vk.descriptor_pool, 0)); + + memset( tr.images, 0, sizeof( tr.images ) ); + tr.numImages = 0; + + memset(hashTable, 0, sizeof(hashTable)); +} diff --git a/code/renderer_vulkan/vk_image.h b/code/renderer_vulkan/vk_image.h new file mode 100644 index 0000000000..4cd7bca182 --- /dev/null +++ b/code/renderer_vulkan/vk_image.h @@ -0,0 +1,35 @@ +#ifndef VK_IMAGE_H_ +#define VK_IMAGE_H_ + +#include "tr_image.h" + +// work around, will be removed in the future + +#ifndef GL_REPEAT +#define GL_REPEAT 0x2901 +#endif + +#ifndef GL_CLAMP +#define GL_CLAMP 0x2900 +#endif + +uint32_t find_memory_type(uint32_t memory_type_bits, VkMemoryPropertyFlags properties); + + +void vk_destroyImageRes(void); + +image_t* R_FindImageFile(const char *name, VkBool32 mipmap, VkBool32 allowPicmip, int glWrapClampMode); + +image_t* R_CreateImage( const char *name, unsigned char* pic, uint32_t width, uint32_t height, + VkBool32 mipmap, VkBool32 allowPicmip, int glWrapClampMode); + + +void R_LoadImage(const char *name, unsigned char **pic, uint32_t* width, uint32_t* height ); +void R_LoadImage2(const char *name, unsigned char **pic, uint32_t* width, uint32_t* height ); + + + +void gpuMemUsageInfo_f(void); + + +#endif diff --git a/code/renderer_vulkan/vk_image_sampler.c b/code/renderer_vulkan/vk_image_sampler.c new file mode 100644 index 0000000000..1de29cbe45 --- /dev/null +++ b/code/renderer_vulkan/vk_image_sampler.c @@ -0,0 +1,191 @@ +#include "VKimpl.h" +#include "vk_image_sampler.h" +#include "vk_instance.h" +#include "ref_import.h" + + +//static cvar_t* r_textureMode; + +/* + VkSampler objects represent the state of an image sampler + which is used by the implementation to read image data and + apply filtering and other transformations for the shader. +*/ + + +/* + The maximum number of sampler objects which can be simultaneously + created on a device is implementation-dependent and specified by the + maxSamplerAllocationCount member of the VkPhysicalDeviceLimits structure. + If maxSamplerAllocationCount is exceeded, vkCreateSampler will return + VK_ERROR_TOO_MANY_OBJECTS. + + 4000 + + Since VkSampler is a non-dispatchable handle type, implementations may + return the same handle for sampler state vectors that are identical. + In such cases, all such objects would only count once against the + maxSamplerAllocationCount limit. +*/ + + +#define MAX_VK_SAMPLERS 8 + +struct Vk_Sampler_Def +{ + VkBool32 repeat_texture; // clamp/repeat texture addressing mode + VkBool32 mipmap; + + VkSampler ImgSampler; +}; + +static struct Vk_Sampler_Def s_SamplerDefs[MAX_VK_SAMPLERS] = {0}; +static int s_NumSamplers = 0; + + + +void vk_free_sampler(void) +{ + int i = 0; + for (i = 0; i < s_NumSamplers; i++) + { + if(s_SamplerDefs[i].ImgSampler != VK_NULL_HANDLE) + { + qvkDestroySampler(vk.device, s_SamplerDefs[i].ImgSampler, NULL); + } + + memset(&s_SamplerDefs[i], 0, sizeof(struct Vk_Sampler_Def)); + } + + s_NumSamplers = 0; +} + + + +VkSampler vk_find_sampler( VkBool32 isMipmap, VkBool32 isRepeatTexture ) +{ + + // Look for sampler among existing samplers. + int i; + for (i = 0; i < s_NumSamplers; i++) + { + if( ( s_SamplerDefs[i].repeat_texture == isRepeatTexture ) && + ( s_SamplerDefs[i].mipmap == isMipmap ) ) + { + return s_SamplerDefs[i].ImgSampler; + } + } + + + // Create it, if not exist! + // VK_SAMPLER_ADDRESS_MODE_REPEAT specifies that the repeat wrap mode will be used. + // VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE specifies that the clamp to edge wrap mode will be used. + // specifying the behavior of sampling with coordinates outside + // the range [0,1] for the respective u, v, or w coordinate as defined + // in the Wrapping Operation section + VkSamplerAddressMode address_mode = isRepeatTexture ? + VK_SAMPLER_ADDRESS_MODE_REPEAT : VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + + // magFilter is a VkFilter value specifying + // the magnification filter to apply to lookups. + + VkFilter mag_filter = VK_FILTER_LINEAR; +// VkFilter mag_filter = VK_FILTER_NEAREST; + + // minFilter is a VkFilter value specifying + // the minification filter to apply to lookups. + VkFilter min_filter = VK_FILTER_LINEAR; +// VkFilter min_filter = VK_FILTER_NEAREST; + + + //used to emulate OpenGL's GL_LINEAR/GL_NEAREST minification filter + VkBool32 max_lod_0_25 = 0; + + // mipmapMode is a VkSamplerMipmapMode value specifying + // the mipmap filter to apply to lookups. + VkSamplerMipmapMode mipmap_mode; + if (isMipmap) + { + mipmap_mode = VK_SAMPLER_MIPMAP_MODE_LINEAR; + max_lod_0_25 = 0; + } + else + { + mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST; + max_lod_0_25 = 1; + } + //VK_SAMPLER_MIPMAP_MODE_LINEAR; + + + VkSamplerCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.magFilter = mag_filter; + desc.minFilter = min_filter; + + // mipmapMode is a VkSamplerMipmapMode value specifying the mipmap filter + // to apply to lookups + desc.mipmapMode = mipmap_mode; + + desc.addressModeU = address_mode; + desc.addressModeV = address_mode; + desc.addressModeW = address_mode; + + // mipLodBias is the bias to be added to mipmap LOD calculation + // and bias provided by image sampling functions in SPIR-V, + // as described in the Level-of-Detail Operation section. + desc.mipLodBias = 0.0f; + + // anisotropyEnable is VK_TRUE to enable anisotropic filtering, + // or VK_FALSE otherwise. + desc.anisotropyEnable = VK_TRUE; + + // maxAnisotropy is the anisotropy value clamp used by the sampler + // when anisotropyEnable is VK_TRUE. If anisotropyEnable is VK_FALSE, + // maxAnisotropy is ignored. + desc.maxAnisotropy = 16; + + // compareEnable is VK_TRUE to enable comparison against a reference value + // during lookups, or VK_FALSE otherwise. + // compareOp is a VkCompareOp value specifying the comparison function + // to apply to fetched data before filtering as described in the + // Depth Compare Operation section. + desc.compareEnable = VK_FALSE; + desc.compareOp = VK_COMPARE_OP_ALWAYS; + + // minLod and maxLod are the values used to clamp the computed LOD value, + // as described in the Level-of-Detail Operation section. + desc.minLod = 0.0f; + desc.maxLod = max_lod_0_25 ? 0.25f : 12.0f; + + // borderColor is a VkBorderColor value specifying + // the predefined border color to use. + desc.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK; + + // unnormalizedCoordinates controls whether to use unnormalized or normalized + // texel coordinates to address texels of the image. + // + // When set to VK_TRUE, the range of the image coordinates used + // to lookup the texel is in the range of zero to the image dimensions + // for x, y and z. + // + // When set to VK_FALSE the range of image coordinates is zero to one. + desc.unnormalizedCoordinates = VK_FALSE; + + + // To create a sampler object + VkSampler sampler; + VK_CHECK(qvkCreateSampler(vk.device, &desc, NULL, &sampler)); + + s_SamplerDefs[s_NumSamplers].repeat_texture = isRepeatTexture; + s_SamplerDefs[s_NumSamplers].mipmap = isMipmap; + s_SamplerDefs[s_NumSamplers].ImgSampler = sampler; + s_NumSamplers++; + if (s_NumSamplers >= MAX_VK_SAMPLERS) + { + ri.Error(ERR_DROP, "MAX_VK_SAMPLERS hit\n"); + } + + return sampler; +} diff --git a/code/renderer_vulkan/vk_image_sampler.h b/code/renderer_vulkan/vk_image_sampler.h new file mode 100644 index 0000000000..600600e31b --- /dev/null +++ b/code/renderer_vulkan/vk_image_sampler.h @@ -0,0 +1,9 @@ +#ifndef IMAGE_SAMPLER_H_ +#define IMAGE_SAMPLER_H_ + +void vk_free_sampler(void); +VkSampler vk_find_sampler( VkBool32 isMipmap, VkBool32 isRepeatTexture ); + +//void vk_set_sampler(int m); + +#endif diff --git a/code/renderer_vulkan/vk_image_sampler2.c b/code/renderer_vulkan/vk_image_sampler2.c new file mode 100644 index 0000000000..b5c4cc2ccd --- /dev/null +++ b/code/renderer_vulkan/vk_image_sampler2.c @@ -0,0 +1,216 @@ +#include "VKimpl.h" +#include "vk_image_sampler.h" +#include "vk_instance.h" +#include "ref_import.h" + +typedef struct { + char *name; + int minimize, maximize; +} textureMode_t; + +struct Vk_Sampler_Def +{ + VkBool32 repeat_texture; // clamp/repeat texture addressing mode + int gl_mag_filter; // GL_XXX mag filter + int gl_min_filter; // GL_XXX min filter +}; + +#ifndef GL_NEAREST +#define GL_NEAREST 0x2600 +#endif + +#ifndef GL_LINEAR +#define GL_LINEAR 0x2601 +#endif + +#ifndef GL_NEAREST_MIPMAP_NEAREST +#define GL_NEAREST_MIPMAP_NEAREST 0x2700 +#endif + +#ifndef GL_NEAREST_MIPMAP_LINEAR +#define GL_NEAREST_MIPMAP_LINEAR 0x2702 +#endif + +#ifndef GL_LINEAR_MIPMAP_NEAREST +#define GL_LINEAR_MIPMAP_NEAREST 0x2701 +#endif + +#ifndef GL_LINEAR_MIPMAP_LINEAR +#define GL_LINEAR_MIPMAP_LINEAR 0x2703 +#endif + +const static textureMode_t texModes[] = { + {"GL_NEAREST", GL_NEAREST, GL_NEAREST}, + {"GL_LINEAR", GL_LINEAR, GL_LINEAR}, + {"GL_NEAREST_MIPMAP_NEAREST", GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST}, + {"GL_LINEAR_MIPMAP_NEAREST", GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR}, + {"GL_NEAREST_MIPMAP_LINEAR", GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST}, + {"GL_LINEAR_MIPMAP_LINEAR", GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR} +}; + + +/* + The maximum number of sampler objects which can be simultaneously created on a device + is implementation-dependent and specified by the maxSamplerAllocationCount member of + the VkPhysicalDeviceLimits structure. If maxSamplerAllocationCount is exceeded, + vkCreateSampler will return VK_ERROR_TOO_MANY_OBJECTS. +*/ + + +#define MAX_VK_SAMPLERS 32 +static struct Vk_Sampler_Def s_SamplerDefs[MAX_VK_SAMPLERS] = {0}; + +static uint32_t s_NumSamplers = 0; +static VkSampler s_ImgSamplers[MAX_VK_SAMPLERS] = {0}; + + + +void vk_free_sampler(void) +{ + uint32_t i = 0; + for (i = 0; i < s_NumSamplers; i++) + { + if(s_ImgSamplers[i] != VK_NULL_HANDLE) + { + qvkDestroySampler(vk.device, s_ImgSamplers[i], NULL); + s_ImgSamplers[i] = VK_NULL_HANDLE; + } + + memset(&s_SamplerDefs[i], 0, sizeof(struct Vk_Sampler_Def)); + } + + s_NumSamplers = 0; +} + + +void vk_set_sampler(int m) +{ + if ( m >= 6 ) { + ri.Printf (PRINT_ALL, "bad filter name\n"); + return; + } + + ri.Cvar_Set( "r_textureMode", texModes[m].name); +} + + +VkSampler vk_find_sampler(VkBool32 mipmap, VkBool32 repeat_texture) +{ + struct Vk_Sampler_Def sampler_def; + memset(&sampler_def, 0, sizeof(sampler_def)); + + sampler_def.repeat_texture = repeat_texture; + if (mipmap) { + sampler_def.gl_mag_filter = GL_LINEAR; + sampler_def.gl_min_filter = GL_LINEAR_MIPMAP_NEAREST; + } else { + sampler_def.gl_mag_filter = GL_LINEAR; + sampler_def.gl_min_filter = GL_LINEAR; + } + + // Look for sampler among existing samplers. + uint32_t i; + for (i = 0; i < s_NumSamplers; i++) + { + if (( s_SamplerDefs[i].repeat_texture == sampler_def.repeat_texture) && + ( s_SamplerDefs[i].gl_mag_filter == sampler_def.gl_mag_filter) && + ( s_SamplerDefs[i].gl_min_filter == sampler_def.gl_min_filter) ) + { + return s_ImgSamplers[i]; + } + } + + s_SamplerDefs[s_NumSamplers] = sampler_def; + // Create new sampler. + if (s_NumSamplers >= MAX_VK_SAMPLERS) + { + ri.Error(ERR_DROP, "vk_find_sampler: MAX_VK_SAMPLERS hit\n"); + } + + VkSamplerAddressMode address_mode = repeat_texture ? + VK_SAMPLER_ADDRESS_MODE_REPEAT : VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; + + VkFilter mag_filter; + if (sampler_def.gl_mag_filter == GL_NEAREST) + { + mag_filter = VK_FILTER_NEAREST; + } + else if(sampler_def.gl_mag_filter == GL_LINEAR) + { + mag_filter = VK_FILTER_LINEAR; + } + else + { + ri.Error(ERR_FATAL, "vk_find_sampler: invalid gl_mag_filter"); + } + + VkFilter min_filter; + VkSamplerMipmapMode mipmap_mode; + qboolean max_lod_0_25 = qfalse; // used to emulate OpenGL's GL_LINEAR/GL_NEAREST minification filter + if (sampler_def.gl_min_filter == GL_NEAREST) { + min_filter = VK_FILTER_NEAREST; + mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST; + max_lod_0_25 = qtrue; + } + else if (sampler_def.gl_min_filter == GL_LINEAR) + { + min_filter = VK_FILTER_LINEAR; + mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST; + max_lod_0_25 = qtrue; + } + else if (sampler_def.gl_min_filter == GL_NEAREST_MIPMAP_NEAREST) + { + min_filter = VK_FILTER_NEAREST; + mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST; + } + else if (sampler_def.gl_min_filter == GL_LINEAR_MIPMAP_NEAREST) + { + min_filter = VK_FILTER_LINEAR; + mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST; + } + else if (sampler_def.gl_min_filter == GL_NEAREST_MIPMAP_LINEAR) + { + min_filter = VK_FILTER_NEAREST; + mipmap_mode = VK_SAMPLER_MIPMAP_MODE_LINEAR; + } + else if (sampler_def.gl_min_filter == GL_LINEAR_MIPMAP_LINEAR) + { + min_filter = VK_FILTER_LINEAR; + mipmap_mode = VK_SAMPLER_MIPMAP_MODE_LINEAR; + } + else { + ri.Error(ERR_FATAL, "vk_find_sampler: invalid gl_min_filter"); + } + + VkSamplerCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.magFilter = mag_filter; + desc.minFilter = min_filter; + desc.mipmapMode = mipmap_mode; + desc.addressModeU = address_mode; + desc.addressModeV = address_mode; + desc.addressModeW = address_mode; + desc.mipLodBias = 0.0f; + desc.anisotropyEnable = VK_FALSE; + desc.maxAnisotropy = 1; + desc.compareEnable = VK_FALSE; + desc.compareOp = VK_COMPARE_OP_ALWAYS; + desc.minLod = 0.0f; + desc.maxLod = max_lod_0_25 ? 0.25f : 12.0f; + desc.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK; + desc.unnormalizedCoordinates = VK_FALSE; + + VkSampler sampler; + VK_CHECK(qvkCreateSampler(vk.device, &desc, NULL, &sampler)); + + + s_ImgSamplers[s_NumSamplers++] = sampler; + if (s_NumSamplers >= MAX_VK_SAMPLERS) + { + ri.Error(ERR_DROP, "MAX_VK_SAMPLERS hit\n"); + } + return sampler; +} + diff --git a/code/renderer_vulkan/vk_init.c b/code/renderer_vulkan/vk_init.c new file mode 100644 index 0000000000..a85b248d23 --- /dev/null +++ b/code/renderer_vulkan/vk_init.c @@ -0,0 +1,112 @@ +#include "VKimpl.h" +#include "vk_instance.h" +#include "vk_frame.h" +#include "vk_cmd.h" +#include "vk_depth_attachment.h" +#include "vk_pipelines.h" +#include "vk_shade_geometry.h" +#include "vk_shaders.h" +#include "glConfig.h" +#include "tr_backend.h" + +// vk_init have nothing to do with tr_init +// vk_instance should be small +// +// After calling this function we get fully functional vulkan subsystem. +void vk_initialize(void) +{ + // This function is responsible for initializing a valid Vulkan subsystem. + vk_createWindow(); + + vk_getProcAddress(); + + // Swapchain. vk.physical_device required to be init. + vk_createSwapChain(vk.device, vk.surface, vk.surface_format); + + // Sync primitives. + vk_create_sync_primitives(); + + // we have to create a command pool before we can create command buffers + // command pools manage the memory that is used to store the buffers and + // command buffers are allocated from them. + ri.Printf(PRINT_ALL, " Create command pool: vk.command_pool \n"); + vk_create_command_pool(&vk.command_pool); + + ri.Printf(PRINT_ALL, " Create command buffer: vk.command_buffer \n"); + vk_create_command_buffer(vk.command_pool, &vk.command_buffer); + + + int width; + int height; + + R_GetWinResolution(&width, &height); + + backEnd.viewParms.viewportWidth = width; + backEnd.viewParms.viewportHeight = height; + + // Depth attachment image. + vk_createDepthAttachment(width, height); + + vk_createFrameBuffers(width, height); + + // Pipeline layout. + // You can use uniform values in shaders, which are globals similar to + // dynamic state variables that can be changes at the drawing time to + // alter the behavior of your shaders without having to recreate them. + // They are commonly used to create texture samplers in the fragment + // shader. The uniform values need to be specified during pipeline + // creation by creating a VkPipelineLayout object. + + vk_createPipelineLayout(); + + // + vk_createVertexBuffer(); + vk_createIndexBuffer();; + // + // Shader modules. + // + vk_loadShaderModules(); + + // + // Standard pipelines. + // + create_standard_pipelines(); + + vk.isInitialized = VK_TRUE; +} + + +VkBool32 isVKinitialied(void) +{ + return vk.isInitialized; +} + +// Shutdown vulkan subsystem by releasing resources acquired by Vk_Instance. +void vk_shutdown(void) +{ + ri.Printf( PRINT_ALL, "vk_shutdown()\n" ); + + vk_destroyDepthAttachment(); + + vk_destroyFrameBuffers(); + + vk_destroy_shading_data(); + + vk_destroy_sync_primitives(); + + vk_destroyShaderModules(); + +// + vk_destroyGlobalStagePipeline(); +// + vk_destroy_commands(); + + vk_clearProcAddress(); + + ri.Printf( PRINT_ALL, " clear vk struct: vk \n" ); + memset(&vk, 0, sizeof(vk)); + + + vk.isInitialized = VK_FALSE; +} + diff --git a/code/renderer_vulkan/vk_init.h b/code/renderer_vulkan/vk_init.h new file mode 100644 index 0000000000..b98e0c7b93 --- /dev/null +++ b/code/renderer_vulkan/vk_init.h @@ -0,0 +1,10 @@ +#ifndef VK_INIT_H_ +#define VK_INIT_H_ + + +void vk_shutdown(void); +VkBool32 isVKinitialied(void); +void vk_initialize(void); + + +#endif diff --git a/code/renderer_vulkan/vk_instance.c b/code/renderer_vulkan/vk_instance.c new file mode 100644 index 0000000000..59c17cd6e1 --- /dev/null +++ b/code/renderer_vulkan/vk_instance.c @@ -0,0 +1,970 @@ +#include +#include + +#include "VKimpl.h" +#include "vk_instance.h" +#include "tr_globals.h" +#include "vk_image.h" +#include "vk_instance.h" +#include "vk_shade_geometry.h" +#include "vk_pipelines.h" +#include "vk_frame.h" +#include "vk_shaders.h" +#include "vk_depth_attachment.h" + + +struct Vk_Instance vk; + +// +// Vulkan API functions used by the renderer. +// +PFN_vkGetInstanceProcAddr qvkGetInstanceProcAddr; + +PFN_vkCreateInstance qvkCreateInstance; +PFN_vkEnumerateInstanceExtensionProperties qvkEnumerateInstanceExtensionProperties; + +PFN_vkCreateDevice qvkCreateDevice; +PFN_vkDestroyInstance qvkDestroyInstance; +PFN_vkEnumerateDeviceExtensionProperties qvkEnumerateDeviceExtensionProperties; +PFN_vkEnumeratePhysicalDevices qvkEnumeratePhysicalDevices; +PFN_vkGetDeviceProcAddr qvkGetDeviceProcAddr; +PFN_vkGetPhysicalDeviceFeatures qvkGetPhysicalDeviceFeatures; +PFN_vkGetPhysicalDeviceFormatProperties qvkGetPhysicalDeviceFormatProperties; +PFN_vkGetPhysicalDeviceMemoryProperties qvkGetPhysicalDeviceMemoryProperties; +PFN_vkGetPhysicalDeviceProperties qvkGetPhysicalDeviceProperties; +PFN_vkGetPhysicalDeviceQueueFamilyProperties qvkGetPhysicalDeviceQueueFamilyProperties; + + +PFN_vkDestroySurfaceKHR qvkDestroySurfaceKHR; +PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR qvkGetPhysicalDeviceSurfaceCapabilitiesKHR; +PFN_vkGetPhysicalDeviceSurfaceFormatsKHR qvkGetPhysicalDeviceSurfaceFormatsKHR; +PFN_vkGetPhysicalDeviceSurfacePresentModesKHR qvkGetPhysicalDeviceSurfacePresentModesKHR; +PFN_vkGetPhysicalDeviceSurfaceSupportKHR qvkGetPhysicalDeviceSurfaceSupportKHR; + +#ifndef NDEBUG +PFN_vkCreateDebugReportCallbackEXT qvkCreateDebugReportCallbackEXT; +PFN_vkDestroyDebugReportCallbackEXT qvkDestroyDebugReportCallbackEXT; +#endif + +PFN_vkAllocateCommandBuffers qvkAllocateCommandBuffers; +PFN_vkAllocateDescriptorSets qvkAllocateDescriptorSets; +PFN_vkAllocateMemory qvkAllocateMemory; +PFN_vkBeginCommandBuffer qvkBeginCommandBuffer; +PFN_vkBindBufferMemory qvkBindBufferMemory; +PFN_vkBindImageMemory qvkBindImageMemory; +PFN_vkCmdBeginRenderPass qvkCmdBeginRenderPass; +PFN_vkCmdBindDescriptorSets qvkCmdBindDescriptorSets; +PFN_vkCmdBindIndexBuffer qvkCmdBindIndexBuffer; +PFN_vkCmdBindPipeline qvkCmdBindPipeline; +PFN_vkCmdBindVertexBuffers qvkCmdBindVertexBuffers; +PFN_vkCmdBlitImage qvkCmdBlitImage; +PFN_vkCmdClearAttachments qvkCmdClearAttachments; +PFN_vkCmdCopyBufferToImage qvkCmdCopyBufferToImage; +PFN_vkCmdCopyImage qvkCmdCopyImage; +PFN_vkCmdCopyImageToBuffer qvkCmdCopyImageToBuffer; +PFN_vkCmdDraw qvkCmdDraw; +PFN_vkCmdDrawIndexed qvkCmdDrawIndexed; +PFN_vkCmdEndRenderPass qvkCmdEndRenderPass; +PFN_vkCmdPipelineBarrier qvkCmdPipelineBarrier; +PFN_vkCmdPushConstants qvkCmdPushConstants; +PFN_vkCmdSetDepthBias qvkCmdSetDepthBias; +PFN_vkCmdSetScissor qvkCmdSetScissor; +PFN_vkCmdSetViewport qvkCmdSetViewport; +PFN_vkCreateBuffer qvkCreateBuffer; +PFN_vkCreateCommandPool qvkCreateCommandPool; +PFN_vkCreateDescriptorPool qvkCreateDescriptorPool; +PFN_vkCreateDescriptorSetLayout qvkCreateDescriptorSetLayout; +PFN_vkCreateFence qvkCreateFence; +PFN_vkCreateFramebuffer qvkCreateFramebuffer; +PFN_vkCreateGraphicsPipelines qvkCreateGraphicsPipelines; +PFN_vkCreateImage qvkCreateImage; +PFN_vkCreateImageView qvkCreateImageView; +PFN_vkCreatePipelineLayout qvkCreatePipelineLayout; +PFN_vkCreateRenderPass qvkCreateRenderPass; +PFN_vkCreateSampler qvkCreateSampler; +PFN_vkCreateSemaphore qvkCreateSemaphore; +PFN_vkCreateShaderModule qvkCreateShaderModule; +PFN_vkDestroyBuffer qvkDestroyBuffer; +PFN_vkDestroyCommandPool qvkDestroyCommandPool; +PFN_vkDestroyDescriptorPool qvkDestroyDescriptorPool; +PFN_vkDestroyDescriptorSetLayout qvkDestroyDescriptorSetLayout; +PFN_vkDestroyDevice qvkDestroyDevice; +PFN_vkDestroyFence qvkDestroyFence; +PFN_vkDestroyFramebuffer qvkDestroyFramebuffer; +PFN_vkDestroyImage qvkDestroyImage; +PFN_vkDestroyImageView qvkDestroyImageView; +PFN_vkDestroyPipeline qvkDestroyPipeline; +PFN_vkDestroyPipelineLayout qvkDestroyPipelineLayout; +PFN_vkDestroyRenderPass qvkDestroyRenderPass; +PFN_vkDestroySampler qvkDestroySampler; +PFN_vkDestroySemaphore qvkDestroySemaphore; +PFN_vkDestroyShaderModule qvkDestroyShaderModule; +PFN_vkDeviceWaitIdle qvkDeviceWaitIdle; +PFN_vkEndCommandBuffer qvkEndCommandBuffer; +PFN_vkFreeCommandBuffers qvkFreeCommandBuffers; +PFN_vkFreeDescriptorSets qvkFreeDescriptorSets; +PFN_vkFreeMemory qvkFreeMemory; +PFN_vkGetBufferMemoryRequirements qvkGetBufferMemoryRequirements; +PFN_vkGetDeviceQueue qvkGetDeviceQueue; +PFN_vkGetImageMemoryRequirements qvkGetImageMemoryRequirements; +PFN_vkGetImageSubresourceLayout qvkGetImageSubresourceLayout; +PFN_vkMapMemory qvkMapMemory; +PFN_vkUnmapMemory qvkUnmapMemory; +PFN_vkQueueSubmit qvkQueueSubmit; +PFN_vkQueueWaitIdle qvkQueueWaitIdle; +PFN_vkResetDescriptorPool qvkResetDescriptorPool; +PFN_vkResetFences qvkResetFences; +PFN_vkUpdateDescriptorSets qvkUpdateDescriptorSets; +PFN_vkWaitForFences qvkWaitForFences; +PFN_vkAcquireNextImageKHR qvkAcquireNextImageKHR; +PFN_vkCreateSwapchainKHR qvkCreateSwapchainKHR; +PFN_vkDestroySwapchainKHR qvkDestroySwapchainKHR; +PFN_vkGetSwapchainImagesKHR qvkGetSwapchainImagesKHR; +PFN_vkQueuePresentKHR qvkQueuePresentKHR; + + + +#ifndef NDEBUG + +VKAPI_ATTR VkBool32 VKAPI_CALL vk_DebugCallback( + VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT object_type, + uint64_t object, size_t location, int32_t message_code, + const char* layer_prefix, const char* message, void* user_data ) +{ + ri.Printf(PRINT_WARNING, "%s\n", message); + return VK_FALSE; +} + + +static void vk_createDebugCallback( PFN_vkDebugReportCallbackEXT qvkDebugCB) +{ + ri.Printf( PRINT_ALL, " vk_createDebugCallback() \n" ); + + VkDebugReportCallbackCreateInfoEXT desc; + desc.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT; + desc.pNext = NULL; + desc.flags = VK_DEBUG_REPORT_WARNING_BIT_EXT | + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | + VK_DEBUG_REPORT_ERROR_BIT_EXT; + desc.pfnCallback = qvkDebugCB; + desc.pUserData = NULL; + + VK_CHECK(qvkCreateDebugReportCallbackEXT(vk.instance, &desc, NULL, &vk.h_debugCB)); +} + +#endif + + +static void vk_createInstance(void) +{ + // There is no global state in Vulkan and all per-application state + // is stored in a VkInstance object. Creating a VkInstance object + // initializes the Vulkan library and allows the application to pass + // information about itself to the implementation. + ri.Printf(PRINT_ALL, " Creating instance: vk.instance\n"); + + // The version of Vulkan that is supported by an instance may be + // different than the version of Vulkan supported by a device or + // physical device. Because Vulkan 1.0 implementations may fail + // with VK_ERROR_INCOMPATIBLE_DRIVER, applications should determine + // the version of Vulkan available before calling vkCreateInstance. + // If the vkGetInstanceProcAddr returns NULL for vkEnumerateInstanceVersion, + // it is a Vulkan 1.0 implementation. Otherwise, the application can + // call vkEnumerateInstanceVersion to determine the version of Vulkan. + + VkApplicationInfo appInfo; + appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; + appInfo.pNext = NULL; + appInfo.pApplicationName = "OpenArena"; + appInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0); + appInfo.pEngineName = "VulkanArena"; + appInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0); + // apiVersion must be the highest version of Vulkan that the + // application is designed to use, encoded as described in the + // API Version Numbers and Semantics section. The patch version + // number specified in apiVersion is ignored when creating an + // instance object. Only the major and minor versions of the + // instance must match those requested in apiVersion. + appInfo.apiVersion = VK_MAKE_VERSION(1, 0, 0); + + + VkInstanceCreateInfo instanceCreateInfo; + + instanceCreateInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; + // pNext is NULL or a pointer to an extension-specific structure. + instanceCreateInfo.pNext = NULL; + // flags is set below after scanning instance extensions for + // VK_KHR_portability_enumeration (needed for MoltenVK on macOS). + // pApplicationInfo is NULL or a pointer to an instance of + // VkApplicationInfo. If not NULL, this information helps + // implementations recognize behavior inherent to classes + // of applications. + instanceCreateInfo.pApplicationInfo = &appInfo; + + + // check extensions availability + // + // Extensions may define new Vulkan commands, structures, and enumerants. + // For compilation purposes, the interfaces defined by registered extensions, + // including new structures and enumerants as well as function pointer types + // for new commands, are defined in the Khronos-supplied vulkan_core.h + // together with the core API. However, commands defined by extensions may + // not be available for static linking - in which case function pointers to + // these commands should be queried at runtime as described in Command Function + // Pointers. + // Extensions may be provided by layers as well as by a Vulkan implementation. + // + // check extensions availability + uint32_t nInsExt = 0; + // To retrieve a list of supported extensions before creating an instance + VK_CHECK( qvkEnumerateInstanceExtensionProperties( NULL, &nInsExt, NULL) ); + + assert(nInsExt > 0); + + ri.Printf(PRINT_ALL, "--- Total %d instance extensions. --- \n", nInsExt); + + VkExtensionProperties *pInsExt = (VkExtensionProperties *) malloc(sizeof(VkExtensionProperties) * nInsExt); + + const char** ppInstanceExt = malloc( sizeof(char *) * (nInsExt) ); + + VK_CHECK(qvkEnumerateInstanceExtensionProperties( NULL, &nInsExt, pInsExt)); + + uint32_t i = 0; + + // Each platform-specific extension is an instance extension. + // The application must enable instance extensions with vkCreateInstance + // before using them. + + VkInstanceCreateFlags flags = 0; + + for (i = 0; i < nInsExt; i++) + { + ppInstanceExt[i] = pInsExt[i].extensionName; + + // Required for MoltenVK (macOS/iOS) to enumerate portability + // subset devices. Without this flag, vkEnumeratePhysicalDevices() + // may return zero devices on these platforms. + if (!strcmp(pInsExt[i].extensionName, VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME)) + { + flags |= VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR; + } + } + + instanceCreateInfo.flags = flags; + instanceCreateInfo.enabledExtensionCount = nInsExt; + instanceCreateInfo.ppEnabledExtensionNames = ppInstanceExt; + +#ifndef NDEBUG + ri.Printf(PRINT_ALL, "Using VK_LAYER_KHRONOS_validation\n"); + + const char* const validation_layer_name = "VK_LAYER_KHRONOS_validation"; + instanceCreateInfo.enabledLayerCount = 1; + instanceCreateInfo.ppEnabledLayerNames = &validation_layer_name; +#else + instanceCreateInfo.enabledLayerCount = 0; + instanceCreateInfo.ppEnabledLayerNames = NULL; +#endif + + + VkResult e = qvkCreateInstance(&instanceCreateInfo, NULL, &vk.instance); + if(e == VK_SUCCESS) + { + ri.Printf(PRINT_ALL, "--- Vulkan create instance success! ---\n\n"); + } + else if (e == VK_ERROR_INCOMPATIBLE_DRIVER) + { + // The requested version of Vulkan is not supported by the driver + // or is otherwise incompatible for implementation-specific reasons. + ri.Error(ERR_FATAL, + "The requested version of Vulkan is not supported by the driver." ); + } + else if (e == VK_ERROR_EXTENSION_NOT_PRESENT) + { + ri.Error(ERR_FATAL, "Cannot find a specified extension library."); + } + else + { + ri.Error(ERR_FATAL, "%d, returned by qvkCreateInstance.", e); + } + + free(ppInstanceExt); + + free(pInsExt); +} + + + +static void vk_loadGlobalFunctions(void) +{ + ri.Printf(PRINT_ALL, " Loading vulkan instance functions \n"); + + vk_getInstanceProcAddrImpl(); + + #define INIT_INSTANCE_FUNCTION(func) \ + q##func = (PFN_ ## func)qvkGetInstanceProcAddr(vk.instance, #func); \ + if (q##func == NULL) { \ + ri.Error(ERR_FATAL, "Failed to find entrypoint %s", #func); \ + } + + INIT_INSTANCE_FUNCTION(vkCreateInstance) + INIT_INSTANCE_FUNCTION(vkEnumerateInstanceExtensionProperties) + + // + // Get instance level functions. + // + vk_createInstance(); + + + INIT_INSTANCE_FUNCTION(vkCreateDevice) + INIT_INSTANCE_FUNCTION(vkDestroyInstance) + INIT_INSTANCE_FUNCTION(vkDestroySurfaceKHR) + INIT_INSTANCE_FUNCTION(vkEnumerateDeviceExtensionProperties) + INIT_INSTANCE_FUNCTION(vkEnumeratePhysicalDevices) + INIT_INSTANCE_FUNCTION(vkGetDeviceProcAddr) + + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceFeatures) + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceProperties) + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceFormatProperties) + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceMemoryProperties) + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties) + + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceCapabilitiesKHR) + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceFormatsKHR) + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfacePresentModesKHR) + INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceSupportKHR) + + +#ifndef NDEBUG + INIT_INSTANCE_FUNCTION(vkCreateDebugReportCallbackEXT) + INIT_INSTANCE_FUNCTION(vkDestroyDebugReportCallbackEXT) // +#endif + + #undef INIT_INSTANCE_FUNCTION + + ri.Printf(PRINT_ALL, " Init global functions done. \n"); +} + +//////////////////////////////// + + +static void vk_selectPhysicalDevice(void) +{ + // After initializing the Vulkan library through a VkInstance + // we need to look for and select a graphics card in the system + // that supports the features we need. In fact we can select any + // number of graphics cards and use them simultaneously. + uint32_t gpu_count = 0; + + // Initial call to query gpu_count, then second call for gpu info. + qvkEnumeratePhysicalDevices(vk.instance, &gpu_count, NULL); + + if (gpu_count <= 0) + ri.Error(ERR_FATAL, "Vulkan: no physical device found"); + + VkPhysicalDevice *pPhyDev = (VkPhysicalDevice *) malloc (sizeof(VkPhysicalDevice) * gpu_count); + + // TODO: multi graphic cards selection support + VK_CHECK(qvkEnumeratePhysicalDevices(vk.instance, &gpu_count, pPhyDev)); + // For demo app we just grab the first physical device + vk.physical_device = pPhyDev[0]; + + free(pPhyDev); + + ri.Printf(PRINT_ALL, " Total %d graphics card, the first one is choosed. \n", gpu_count); + + ri.Printf(PRINT_ALL, " Get physical device memory properties: vk.devMemProperties \n"); + qvkGetPhysicalDeviceMemoryProperties(vk.physical_device, &vk.devMemProperties); +} + + + +static void vk_selectSurfaceFormat(void) +{ + uint32_t nSurfmt; + + ri.Printf(PRINT_ALL, "\n -------- vk_selectSurfaceFormat() -------- \n"); + + + // Get the numbers of VkFormat's that are supported + // "vk.surface" is the surface that will be associated with the swapchain. + // "vk.surface" must be a valid VkSurfaceKHR handle + VK_CHECK(qvkGetPhysicalDeviceSurfaceFormatsKHR(vk.physical_device, vk.surface, &nSurfmt, NULL)); + assert(nSurfmt > 0); + + VkSurfaceFormatKHR *pSurfFmts = + (VkSurfaceFormatKHR *) malloc ( nSurfmt * sizeof(VkSurfaceFormatKHR) ); + + // To query the supported swapchain format-color space pairs for a surface + VK_CHECK(qvkGetPhysicalDeviceSurfaceFormatsKHR(vk.physical_device, vk.surface, &nSurfmt, pSurfFmts)); + + // If the format list includes just one entry of VK_FORMAT_UNDEFINED, the surface + // has no preferred format. Otherwise, at least one supported format will be returned. + if ( (nSurfmt == 1) && (pSurfFmts[0].format == VK_FORMAT_UNDEFINED) ) + { + // special case that means we can choose any format + vk.surface_format.format = VK_FORMAT_B8G8R8A8_UNORM; + vk.surface_format.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR; + ri.Printf(PRINT_ALL, "VK_FORMAT_R8G8B8A8_UNORM\n"); + ri.Printf(PRINT_ALL, "VK_COLORSPACE_SRGB_NONLINEAR_KHR\n"); + } + else + { + uint32_t i; + ri.Printf(PRINT_ALL, " Total %d surface formats supported, we choose: \n", nSurfmt); + + for( i = 0; i < nSurfmt; i++) + { + if( ( pSurfFmts[i].format == VK_FORMAT_B8G8R8A8_UNORM) && + ( pSurfFmts[i].colorSpace == VK_COLORSPACE_SRGB_NONLINEAR_KHR) ) + { + + ri.Printf(PRINT_ALL, " format = VK_FORMAT_B8G8R8A8_UNORM \n"); + ri.Printf(PRINT_ALL, " colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR \n"); + + vk.surface_format = pSurfFmts[i]; + break; + } + } + + if (i == nSurfmt) + vk.surface_format = pSurfFmts[0]; + } + + free(pSurfFmts); + + + // To query the basic capabilities of a surface, needed in order to create a swapchain + VK_CHECK(qvkGetPhysicalDeviceSurfaceCapabilitiesKHR(vk.physical_device, vk.surface, &vk.surface_caps)); + + // VK_IMAGE_USAGE_TRANSFER_DST_BIT is required by image clear operations. + if ((vk.surface_caps.supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) + ri.Error(ERR_FATAL, "VK_IMAGE_USAGE_TRANSFER_DST_BIT is not supported by you GPU."); + + // VK_IMAGE_USAGE_TRANSFER_SRC_BIT is required in order to take screenshots. + if ((vk.surface_caps.supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0) + ri.Error(ERR_FATAL, "VK_IMAGE_USAGE_TRANSFER_SRC_BIT is not supported by you GPU."); + + + // To query supported format features which are properties of the physical device + + VkFormatProperties props; + + + // To determine the set of valid usage bits for a given format, + // call vkGetPhysicalDeviceFormatProperties. + + // ========================= color ================ + qvkGetPhysicalDeviceFormatProperties(vk.physical_device, vk.surface_format.format, &props); + + // Check if the device supports blitting to linear images + if ( props.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT ) + ri.Printf(PRINT_ALL, "--- Linear TilingFeatures supported. ---\n"); + + if ( props.linearTilingFeatures & VK_FORMAT_FEATURE_BLIT_DST_BIT ) + { + ri.Printf(PRINT_ALL, "--- Blitting from linear tiled images supported. ---\n"); + } + + if ( props.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT ) + { + ri.Printf(PRINT_ALL, "--- Blitting from optimal tiled images supported. ---\n"); + vk.isBlitSupported = VK_TRUE; + } + + + //=========================== depth ===================================== + qvkGetPhysicalDeviceFormatProperties(vk.physical_device, VK_FORMAT_D24_UNORM_S8_UINT, &props); + if ( props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT ) + { + ri.Printf(PRINT_ALL, " VK_FORMAT_D24_UNORM_S8_UINT optimal Tiling feature supported.\n"); + vk.fmt_DepthStencil = VK_FORMAT_D24_UNORM_S8_UINT; + } + else + { + qvkGetPhysicalDeviceFormatProperties(vk.physical_device, VK_FORMAT_D32_SFLOAT_S8_UINT, &props); + + if ( props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT ) + { + ri.Printf(PRINT_ALL, " VK_FORMAT_D32_SFLOAT_S8_UINT optimal Tiling feature supported.\n"); + vk.fmt_DepthStencil = VK_FORMAT_D32_SFLOAT_S8_UINT; + } + else + { + //formats[0] = VK_FORMAT_X8_D24_UNORM_PACK32; + //formats[1] = VK_FORMAT_D32_SFLOAT; + // never get here. + ri.Error(ERR_FATAL, " Failed to find depth attachment format."); + } + } + + ri.Printf(PRINT_ALL, " -------- --------------------------- --------\n"); +} + + +static void vk_selectQueueFamilyForPresentation(void) +{ + // Almosty every operation in Vulkan, anything from drawing textures, + // requires commands to be submitted to a queue. There are different + // types of queues that originate from differnet queue families and + // each family of queues allows only a subset of commands. + // For example, there could be a queue family allows processing of + // compute commands or one that only allows memory thansfer related + // commands. We need to check which queue families are supported by + // the device and which one of these supports the commands that we use. + + + uint32_t nSurfmt; + qvkGetPhysicalDeviceQueueFamilyProperties(vk.physical_device, &nSurfmt, NULL); + + assert(nSurfmt > 0); + + VkQueueFamilyProperties* pQueueFamilies = (VkQueueFamilyProperties *) malloc ( + nSurfmt * sizeof(VkQueueFamilyProperties) ); + + // To query properties of queues available on a physical device + qvkGetPhysicalDeviceQueueFamilyProperties(vk.physical_device, &nSurfmt, pQueueFamilies); + + // Select queue family with presentation and graphics support + // Iterate over each queue to learn whether it supports presenting: + vk.queue_family_index = -1; + + uint32_t i; + for (i = 0; i < nSurfmt; ++i) + { + // To look for a queue family that has the capability of presenting + // to our window surface + + VkBool32 presentation_supported = VK_FALSE; + VK_CHECK(qvkGetPhysicalDeviceSurfaceSupportKHR( + vk.physical_device, i, vk.surface, &presentation_supported)); + + if (presentation_supported && + (pQueueFamilies[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) + { + vk.queue_family_index = i; + + ri.Printf(PRINT_ALL, " Queue family for presentation selected: %d\n", + vk.queue_family_index); + + break; + } + } + + free(pQueueFamilies); + + if (vk.queue_family_index == -1) + ri.Error(ERR_FATAL, "Vulkan: failed to find queue family"); +} + + +static void vk_createLogicalDevice(void) +{ + const char* device_extensions[2]; + uint32_t device_extension_count = 0; + + device_extensions[device_extension_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME; + + // Not all graphics cards are capble of presenting images directly + // to a screen for various reasons, for example because they are + // designed for servers and don't have any display outputs. + // Secondly, since image presentation is heavily tied into the + // window system and the surfaces associated with windows, it is + // not actually part of the vulkan core. You have to enable the + // VK_KHR_swapchain device extension after querying for its support. + uint32_t nDevExts = 0; + VkBool32 swapchainExtFound = 0; + + // To query the numbers of extensions available to a given physical device + ri.Printf( PRINT_ALL, " Check for VK_KHR_swapchain extension. \n" ); + + qvkEnumerateDeviceExtensionProperties( vk.physical_device, NULL, &nDevExts, NULL); + + VkExtensionProperties* pDeviceExt = + (VkExtensionProperties *) malloc(sizeof(VkExtensionProperties) * nDevExts); + + qvkEnumerateDeviceExtensionProperties( vk.physical_device, NULL, &nDevExts, pDeviceExt); + + + uint32_t j; + for (j = 0; j < nDevExts; j++) + { + if (!strcmp(VK_KHR_SWAPCHAIN_EXTENSION_NAME, pDeviceExt[j].extensionName)) + { + swapchainExtFound = VK_TRUE; + } + // VK_KHR_portability_subset must be enabled on devices that + // report it (required by the Vulkan spec). This is the case + // for MoltenVK on macOS/iOS. + if (!strcmp("VK_KHR_portability_subset", pDeviceExt[j].extensionName)) + { + device_extensions[device_extension_count++] = "VK_KHR_portability_subset"; + ri.Printf( PRINT_ALL, " Enabling %s device extension.\n", "VK_KHR_portability_subset" ); + } + } + if (VK_FALSE == swapchainExtFound) + ri.Error(ERR_FATAL, "VK_KHR_SWAPCHAIN_EXTENSION_NAME is not available"); + + free(pDeviceExt); + + + const float priority = 1.0; + VkDeviceQueueCreateInfo queue_desc; + queue_desc.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; + queue_desc.pNext = NULL; + queue_desc.flags = 0; + queue_desc.queueFamilyIndex = vk.queue_family_index; + queue_desc.queueCount = 1; + queue_desc.pQueuePriorities = &priority; + + + // Query fine-grained feature support for this device. If APP + // has specific feature requirements it should check supported + // features based on this query. + qvkGetPhysicalDeviceFeatures(vk.physical_device, &vk.features); + + VkDeviceCreateInfo device_desc; + device_desc.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; + device_desc.pNext = NULL; + device_desc.flags = 0; + device_desc.queueCreateInfoCount = 1; + device_desc.pQueueCreateInfos = &queue_desc; + device_desc.enabledLayerCount = 0; + device_desc.ppEnabledLayerNames = NULL; + device_desc.enabledExtensionCount = device_extension_count; + device_desc.ppEnabledExtensionNames = device_extensions; + device_desc.pEnabledFeatures = &vk.features; + + + // After selecting a physical device to use we need to set up a + // logical device to interface with it. The logical device + // creation process id similar to the instance creation process + // and describes the features we want to use. we also need to + // specify which queues to create now that we've queried which + // queue families are available. You can create multiple logical + // devices from the same physical device if you have varying requirements. + ri.Printf( PRINT_ALL, " Create logical device: vk.device \n" ); + VK_CHECK(qvkCreateDevice(vk.physical_device, &device_desc, NULL, &vk.device)); + +} + + +static void vk_loadDeviceFunctions(void) +{ + ri.Printf( PRINT_ALL, " Loading device level function. \n" ); + + #define INIT_DEVICE_FUNCTION(func) \ + q##func = (PFN_ ## func)qvkGetDeviceProcAddr(vk.device, #func); \ + if (q##func == NULL) { \ + ri.Error(ERR_FATAL, "Failed to find entrypoint %s", #func); \ + } + + INIT_DEVICE_FUNCTION(vkAllocateCommandBuffers) + INIT_DEVICE_FUNCTION(vkAllocateDescriptorSets) + INIT_DEVICE_FUNCTION(vkAllocateMemory) + INIT_DEVICE_FUNCTION(vkBeginCommandBuffer) + INIT_DEVICE_FUNCTION(vkBindBufferMemory) + INIT_DEVICE_FUNCTION(vkBindImageMemory) + INIT_DEVICE_FUNCTION(vkCmdBeginRenderPass) + INIT_DEVICE_FUNCTION(vkCmdBindDescriptorSets) + INIT_DEVICE_FUNCTION(vkCmdBindIndexBuffer) + INIT_DEVICE_FUNCTION(vkCmdBindPipeline) + INIT_DEVICE_FUNCTION(vkCmdBindVertexBuffers) + INIT_DEVICE_FUNCTION(vkCmdBlitImage) + INIT_DEVICE_FUNCTION(vkCmdClearAttachments) + INIT_DEVICE_FUNCTION(vkCmdCopyBufferToImage) + INIT_DEVICE_FUNCTION(vkCmdCopyImage) + INIT_DEVICE_FUNCTION(vkCmdCopyImageToBuffer) + INIT_DEVICE_FUNCTION(vkCmdDraw) + INIT_DEVICE_FUNCTION(vkCmdDrawIndexed) + INIT_DEVICE_FUNCTION(vkCmdEndRenderPass) + INIT_DEVICE_FUNCTION(vkCmdPipelineBarrier) + INIT_DEVICE_FUNCTION(vkCmdPushConstants) + INIT_DEVICE_FUNCTION(vkCmdSetDepthBias) + INIT_DEVICE_FUNCTION(vkCmdSetScissor) + INIT_DEVICE_FUNCTION(vkCmdSetViewport) + INIT_DEVICE_FUNCTION(vkCreateBuffer) + INIT_DEVICE_FUNCTION(vkCreateCommandPool) + INIT_DEVICE_FUNCTION(vkCreateDescriptorPool) + INIT_DEVICE_FUNCTION(vkCreateDescriptorSetLayout) + INIT_DEVICE_FUNCTION(vkCreateFence) + INIT_DEVICE_FUNCTION(vkCreateFramebuffer) + INIT_DEVICE_FUNCTION(vkCreateGraphicsPipelines) + INIT_DEVICE_FUNCTION(vkCreateImage) + INIT_DEVICE_FUNCTION(vkCreateImageView) + INIT_DEVICE_FUNCTION(vkCreatePipelineLayout) + INIT_DEVICE_FUNCTION(vkCreateRenderPass) + INIT_DEVICE_FUNCTION(vkCreateSampler) + INIT_DEVICE_FUNCTION(vkCreateSemaphore) + INIT_DEVICE_FUNCTION(vkCreateShaderModule) + INIT_DEVICE_FUNCTION(vkDestroyBuffer) + INIT_DEVICE_FUNCTION(vkDestroyCommandPool) + INIT_DEVICE_FUNCTION(vkDestroyDescriptorPool) + INIT_DEVICE_FUNCTION(vkDestroyDescriptorSetLayout) + INIT_DEVICE_FUNCTION(vkDestroyDevice) + INIT_DEVICE_FUNCTION(vkDestroyFence) + INIT_DEVICE_FUNCTION(vkDestroyFramebuffer) + INIT_DEVICE_FUNCTION(vkDestroyImage) + INIT_DEVICE_FUNCTION(vkDestroyImageView) + INIT_DEVICE_FUNCTION(vkDestroyPipeline) + INIT_DEVICE_FUNCTION(vkDestroyPipelineLayout) + INIT_DEVICE_FUNCTION(vkDestroyRenderPass) + INIT_DEVICE_FUNCTION(vkDestroySampler) + INIT_DEVICE_FUNCTION(vkDestroySemaphore) + INIT_DEVICE_FUNCTION(vkDestroyShaderModule) + INIT_DEVICE_FUNCTION(vkDeviceWaitIdle) + INIT_DEVICE_FUNCTION(vkEndCommandBuffer) + INIT_DEVICE_FUNCTION(vkFreeCommandBuffers) + INIT_DEVICE_FUNCTION(vkFreeDescriptorSets) + INIT_DEVICE_FUNCTION(vkFreeMemory) + INIT_DEVICE_FUNCTION(vkGetBufferMemoryRequirements) + INIT_DEVICE_FUNCTION(vkGetDeviceQueue) + INIT_DEVICE_FUNCTION(vkGetImageMemoryRequirements) + INIT_DEVICE_FUNCTION(vkGetImageSubresourceLayout) + INIT_DEVICE_FUNCTION(vkMapMemory) + INIT_DEVICE_FUNCTION(vkUnmapMemory) + INIT_DEVICE_FUNCTION(vkQueueSubmit) + INIT_DEVICE_FUNCTION(vkQueueWaitIdle) + INIT_DEVICE_FUNCTION(vkResetDescriptorPool) + INIT_DEVICE_FUNCTION(vkResetFences) + INIT_DEVICE_FUNCTION(vkUpdateDescriptorSets) + INIT_DEVICE_FUNCTION(vkWaitForFences) + + INIT_DEVICE_FUNCTION(vkCreateSwapchainKHR) + INIT_DEVICE_FUNCTION(vkDestroySwapchainKHR) + INIT_DEVICE_FUNCTION(vkGetSwapchainImagesKHR) + INIT_DEVICE_FUNCTION(vkAcquireNextImageKHR) + INIT_DEVICE_FUNCTION(vkQueuePresentKHR) + + #undef INIT_DEVICE_FUNCTION +} + + + +void vk_getProcAddress(void) +{ + vk_loadGlobalFunctions(); + +#ifndef NDEBUG + // Create debug callback. + vk_createDebugCallback(vk_DebugCallback); +#endif + + // The window surface needs to be created right after the instance creation, + // because it can actually influence the presentation mode selection. + vk_createSurfaceImpl(); + + // select physical device + vk_selectPhysicalDevice(); + + vk_selectSurfaceFormat(); + + vk_selectQueueFamilyForPresentation(); + + vk_createLogicalDevice(); + + // Get device level functions. + vk_loadDeviceFunctions(); + + // a call to retrieve queue handle + qvkGetDeviceQueue(vk.device, vk.queue_family_index, 0, &vk.queue); +} + + +void vk_clearProcAddress(void) +{ + + ri.Printf( PRINT_ALL, " Destroy logical device: vk.device. \n" ); + // Device queues are implicitly cleaned up when the device is destroyed + // so we don't need to do anything in clean up + qvkDestroyDevice(vk.device, NULL); + + ri.Printf( PRINT_ALL, " Destroy surface: vk.surface. \n" ); + // make sure that the surface is destroyed before the instance + qvkDestroySurfaceKHR(vk.instance, vk.surface, NULL); + +#ifndef NDEBUG + ri.Printf( PRINT_ALL, " Destroy callback function: vk.h_debugCB. \n" ); + + qvkDestroyDebugReportCallbackEXT(vk.instance, vk.h_debugCB, NULL); +#endif + + ri.Printf( PRINT_ALL, " Destroy instance: vk.instance. \n" ); + qvkDestroyInstance(vk.instance, NULL); + +// =========================================================== + ri.Printf( PRINT_ALL, " clear all proc address \n" ); + + qvkCreateInstance = NULL; + qvkEnumerateInstanceExtensionProperties = NULL; + + qvkCreateDevice = NULL; + qvkDestroyInstance = NULL; + qvkEnumerateDeviceExtensionProperties = NULL; + qvkEnumeratePhysicalDevices = NULL; + qvkGetDeviceProcAddr = NULL; + qvkGetPhysicalDeviceFeatures = NULL; + qvkGetPhysicalDeviceFormatProperties = NULL; + qvkGetPhysicalDeviceMemoryProperties = NULL; + qvkGetPhysicalDeviceProperties = NULL; + qvkGetPhysicalDeviceQueueFamilyProperties = NULL; + + qvkDestroySurfaceKHR = NULL; + qvkGetPhysicalDeviceSurfaceCapabilitiesKHR = NULL; + qvkGetPhysicalDeviceSurfaceFormatsKHR = NULL; + qvkGetPhysicalDeviceSurfacePresentModesKHR = NULL; + qvkGetPhysicalDeviceSurfaceSupportKHR = NULL; +#ifndef NDEBUG + qvkCreateDebugReportCallbackEXT = NULL; + qvkDestroyDebugReportCallbackEXT = NULL; +#endif + + qvkAllocateCommandBuffers = NULL; + qvkAllocateDescriptorSets = NULL; + qvkAllocateMemory = NULL; + qvkBeginCommandBuffer = NULL; + qvkBindBufferMemory = NULL; + qvkBindImageMemory = NULL; + qvkCmdBeginRenderPass = NULL; + qvkCmdBindDescriptorSets = NULL; + qvkCmdBindIndexBuffer = NULL; + qvkCmdBindPipeline = NULL; + qvkCmdBindVertexBuffers = NULL; + qvkCmdBlitImage = NULL; + qvkCmdClearAttachments = NULL; + qvkCmdCopyBufferToImage = NULL; + qvkCmdCopyImage = NULL; + qvkCmdCopyImageToBuffer = NULL; + qvkCmdDraw = NULL; + qvkCmdDrawIndexed = NULL; + qvkCmdEndRenderPass = NULL; + qvkCmdPipelineBarrier = NULL; + qvkCmdPushConstants = NULL; + qvkCmdSetDepthBias = NULL; + qvkCmdSetScissor = NULL; + qvkCmdSetViewport = NULL; + qvkCreateBuffer = NULL; + qvkCreateCommandPool = NULL; + qvkCreateDescriptorPool = NULL; + qvkCreateDescriptorSetLayout = NULL; + qvkCreateFence = NULL; + qvkCreateFramebuffer = NULL; + qvkCreateGraphicsPipelines = NULL; + qvkCreateImage = NULL; + qvkCreateImageView = NULL; + qvkCreatePipelineLayout = NULL; + qvkCreateRenderPass = NULL; + qvkCreateSampler = NULL; + qvkCreateSemaphore = NULL; + qvkCreateShaderModule = NULL; + qvkDestroyBuffer = NULL; + qvkDestroyCommandPool = NULL; + qvkDestroyDescriptorPool = NULL; + qvkDestroyDescriptorSetLayout = NULL; + qvkDestroyDevice = NULL; + qvkDestroyFence = NULL; + qvkDestroyFramebuffer = NULL; + qvkDestroyImage = NULL; + qvkDestroyImageView = NULL; + qvkDestroyPipeline = NULL; + qvkDestroyPipelineLayout = NULL; + qvkDestroyRenderPass = NULL; + qvkDestroySampler = NULL; + qvkDestroySemaphore = NULL; + qvkDestroyShaderModule = NULL; + qvkDeviceWaitIdle = NULL; + qvkEndCommandBuffer = NULL; + qvkFreeCommandBuffers = NULL; + qvkFreeDescriptorSets = NULL; + qvkFreeMemory = NULL; + qvkGetBufferMemoryRequirements = NULL; + qvkGetDeviceQueue = NULL; + qvkGetImageMemoryRequirements = NULL; + qvkGetImageSubresourceLayout = NULL; + qvkMapMemory = NULL; + qvkUnmapMemory = NULL; + qvkQueueSubmit = NULL; + qvkQueueWaitIdle = NULL; + qvkResetDescriptorPool = NULL; + qvkResetFences = NULL; + qvkUpdateDescriptorSets = NULL; + qvkWaitForFences = NULL; + qvkAcquireNextImageKHR = NULL; + qvkCreateSwapchainKHR = NULL; + qvkDestroySwapchainKHR = NULL; + qvkGetSwapchainImagesKHR = NULL; + qvkQueuePresentKHR = NULL; +} + + +const char * cvtResToStr(VkResult result) +{ + switch(result) + { + case VK_SUCCESS: + return "VK_SUCCESS"; + case VK_NOT_READY: + return "VK_NOT_READY"; + case VK_TIMEOUT: + return "VK_TIMEOUT"; + case VK_EVENT_SET: + return "VK_EVENT_SET"; + case VK_EVENT_RESET: + return "VK_EVENT_RESET"; + case VK_INCOMPLETE: + return "VK_INCOMPLETE"; + case VK_ERROR_OUT_OF_HOST_MEMORY: + return "VK_ERROR_OUT_OF_HOST_MEMORY"; + case VK_ERROR_OUT_OF_DEVICE_MEMORY: + return "VK_ERROR_OUT_OF_DEVICE_MEMORY"; + case VK_ERROR_INITIALIZATION_FAILED: + return "VK_ERROR_INITIALIZATION_FAILED"; + case VK_ERROR_DEVICE_LOST: + return "VK_ERROR_DEVICE_LOST"; + case VK_ERROR_MEMORY_MAP_FAILED: + return "VK_ERROR_MEMORY_MAP_FAILED"; + case VK_ERROR_LAYER_NOT_PRESENT: + return "VK_ERROR_LAYER_NOT_PRESENT"; + case VK_ERROR_EXTENSION_NOT_PRESENT: + return "VK_ERROR_EXTENSION_NOT_PRESENT"; + case VK_ERROR_FEATURE_NOT_PRESENT: + return "VK_ERROR_FEATURE_NOT_PRESENT"; + case VK_ERROR_INCOMPATIBLE_DRIVER: + return "VK_ERROR_INCOMPATIBLE_DRIVER"; + case VK_ERROR_TOO_MANY_OBJECTS: + return "VK_ERROR_TOO_MANY_OBJECTS"; + case VK_ERROR_FORMAT_NOT_SUPPORTED: + return "VK_ERROR_FORMAT_NOT_SUPPORTED"; + case VK_ERROR_FRAGMENTED_POOL: + return "VK_ERROR_FRAGMENTED_POOL"; + case VK_ERROR_SURFACE_LOST_KHR: + return "VK_ERROR_SURFACE_LOST_KHR"; + case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: + return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR"; + case VK_SUBOPTIMAL_KHR: + return "VK_SUBOPTIMAL_KHR"; + case VK_ERROR_OUT_OF_DATE_KHR: + return "VK_ERROR_OUT_OF_DATE_KHR"; + case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: + return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR"; + case VK_ERROR_VALIDATION_FAILED_EXT: + return "VK_ERROR_VALIDATION_FAILED_EXT"; + case VK_ERROR_OUT_OF_POOL_MEMORY_KHR: + return "VK_ERROR_OUT_OF_POOL_MEMORY_KHR"; + case VK_ERROR_INVALID_SHADER_NV: + return "VK_ERROR_INVALID_SHADER_NV"; +// + case VK_ERROR_INVALID_EXTERNAL_HANDLE: + return "VK_ERROR_INVALID_EXTERNAL_HANDLE"; + case VK_ERROR_NOT_PERMITTED_EXT: + return "VK_ERROR_NOT_PERMITTED_EXT"; +// + case VK_RESULT_MAX_ENUM: + return "VK_RESULT_MAX_ENUM"; + case VK_ERROR_FRAGMENTATION_EXT: + return "VK_ERROR_FRAGMENTATION_EXT"; + default: + break; + } + + return "UNKNOWN_ERROR"; +} diff --git a/code/renderer_vulkan/vk_instance.h b/code/renderer_vulkan/vk_instance.h new file mode 100644 index 0000000000..d8b819415f --- /dev/null +++ b/code/renderer_vulkan/vk_instance.h @@ -0,0 +1,210 @@ +#ifndef VK_INSTANCE_H_ +#define VK_INSTANCE_H_ + + +#include "VKimpl.h" +#include "ref_import.h" + + +extern PFN_vkGetInstanceProcAddr qvkGetInstanceProcAddr; + +extern PFN_vkCreateInstance qvkCreateInstance; +extern PFN_vkEnumerateInstanceExtensionProperties qvkEnumerateInstanceExtensionProperties; + +extern PFN_vkCreateDevice qvkCreateDevice; +extern PFN_vkDestroyInstance qvkDestroyInstance; +extern PFN_vkEnumerateDeviceExtensionProperties qvkEnumerateDeviceExtensionProperties; +extern PFN_vkEnumeratePhysicalDevices qvkEnumeratePhysicalDevices; +extern PFN_vkGetDeviceProcAddr qvkGetDeviceProcAddr; +extern PFN_vkGetPhysicalDeviceFeatures qvkGetPhysicalDeviceFeatures; +extern PFN_vkGetPhysicalDeviceFormatProperties qvkGetPhysicalDeviceFormatProperties; +extern PFN_vkGetPhysicalDeviceMemoryProperties qvkGetPhysicalDeviceMemoryProperties; +extern PFN_vkGetPhysicalDeviceProperties qvkGetPhysicalDeviceProperties; +extern PFN_vkGetPhysicalDeviceQueueFamilyProperties qvkGetPhysicalDeviceQueueFamilyProperties; +extern PFN_vkDestroySurfaceKHR qvkDestroySurfaceKHR; +extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR qvkGetPhysicalDeviceSurfaceCapabilitiesKHR; +extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR qvkGetPhysicalDeviceSurfaceFormatsKHR; +extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR qvkGetPhysicalDeviceSurfacePresentModesKHR; +extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR qvkGetPhysicalDeviceSurfaceSupportKHR; + +#ifndef NDEBUG +extern PFN_vkCreateDebugReportCallbackEXT qvkCreateDebugReportCallbackEXT; +extern PFN_vkDestroyDebugReportCallbackEXT qvkDestroyDebugReportCallbackEXT; +#endif + +extern PFN_vkAllocateCommandBuffers qvkAllocateCommandBuffers; +extern PFN_vkAllocateDescriptorSets qvkAllocateDescriptorSets; +extern PFN_vkAllocateMemory qvkAllocateMemory; +extern PFN_vkBeginCommandBuffer qvkBeginCommandBuffer; +extern PFN_vkBindBufferMemory qvkBindBufferMemory; +extern PFN_vkBindImageMemory qvkBindImageMemory; +extern PFN_vkCmdBeginRenderPass qvkCmdBeginRenderPass; +extern PFN_vkCmdBindDescriptorSets qvkCmdBindDescriptorSets; +extern PFN_vkCmdBindIndexBuffer qvkCmdBindIndexBuffer; +extern PFN_vkCmdBindPipeline qvkCmdBindPipeline; +extern PFN_vkCmdBindVertexBuffers qvkCmdBindVertexBuffers; +extern PFN_vkCmdBlitImage qvkCmdBlitImage; +extern PFN_vkCmdClearAttachments qvkCmdClearAttachments; +extern PFN_vkCmdCopyBufferToImage qvkCmdCopyBufferToImage; +extern PFN_vkCmdCopyImage qvkCmdCopyImage; +extern PFN_vkCmdCopyImageToBuffer qvkCmdCopyImageToBuffer; +extern PFN_vkCmdDraw qvkCmdDraw; +extern PFN_vkCmdDrawIndexed qvkCmdDrawIndexed; +extern PFN_vkCmdEndRenderPass qvkCmdEndRenderPass; +extern PFN_vkCmdPipelineBarrier qvkCmdPipelineBarrier; +extern PFN_vkCmdPushConstants qvkCmdPushConstants; +extern PFN_vkCmdSetDepthBias qvkCmdSetDepthBias; +extern PFN_vkCmdSetScissor qvkCmdSetScissor; +extern PFN_vkCmdSetViewport qvkCmdSetViewport; +extern PFN_vkCreateBuffer qvkCreateBuffer; +extern PFN_vkCreateCommandPool qvkCreateCommandPool; +extern PFN_vkCreateDescriptorPool qvkCreateDescriptorPool; +extern PFN_vkCreateDescriptorSetLayout qvkCreateDescriptorSetLayout; +extern PFN_vkCreateFence qvkCreateFence; +extern PFN_vkCreateFramebuffer qvkCreateFramebuffer; +extern PFN_vkCreateGraphicsPipelines qvkCreateGraphicsPipelines; +extern PFN_vkCreateImage qvkCreateImage; +extern PFN_vkCreateImageView qvkCreateImageView; +extern PFN_vkCreatePipelineLayout qvkCreatePipelineLayout; +extern PFN_vkCreateRenderPass qvkCreateRenderPass; +extern PFN_vkCreateSampler qvkCreateSampler; +extern PFN_vkCreateSemaphore qvkCreateSemaphore; +extern PFN_vkCreateShaderModule qvkCreateShaderModule; +extern PFN_vkDestroyBuffer qvkDestroyBuffer; +extern PFN_vkDestroyCommandPool qvkDestroyCommandPool; +extern PFN_vkDestroyDescriptorPool qvkDestroyDescriptorPool; +extern PFN_vkDestroyDescriptorSetLayout qvkDestroyDescriptorSetLayout; +extern PFN_vkDestroyDevice qvkDestroyDevice; +extern PFN_vkDestroyFence qvkDestroyFence; +extern PFN_vkDestroyFramebuffer qvkDestroyFramebuffer; +extern PFN_vkDestroyImage qvkDestroyImage; +extern PFN_vkDestroyImageView qvkDestroyImageView; +extern PFN_vkDestroyPipeline qvkDestroyPipeline; +extern PFN_vkDestroyPipelineLayout qvkDestroyPipelineLayout; +extern PFN_vkDestroyRenderPass qvkDestroyRenderPass; +extern PFN_vkDestroySampler qvkDestroySampler; +extern PFN_vkDestroySemaphore qvkDestroySemaphore; +extern PFN_vkDestroyShaderModule qvkDestroyShaderModule; +extern PFN_vkDeviceWaitIdle qvkDeviceWaitIdle; +extern PFN_vkEndCommandBuffer qvkEndCommandBuffer; +extern PFN_vkFreeCommandBuffers qvkFreeCommandBuffers; +extern PFN_vkFreeDescriptorSets qvkFreeDescriptorSets; +extern PFN_vkFreeMemory qvkFreeMemory; +extern PFN_vkGetBufferMemoryRequirements qvkGetBufferMemoryRequirements; +extern PFN_vkGetDeviceQueue qvkGetDeviceQueue; +extern PFN_vkGetImageMemoryRequirements qvkGetImageMemoryRequirements; +extern PFN_vkGetImageSubresourceLayout qvkGetImageSubresourceLayout; +extern PFN_vkMapMemory qvkMapMemory; +extern PFN_vkUnmapMemory qvkUnmapMemory; +extern PFN_vkQueueSubmit qvkQueueSubmit; +extern PFN_vkQueueWaitIdle qvkQueueWaitIdle; +extern PFN_vkResetDescriptorPool qvkResetDescriptorPool; +extern PFN_vkResetFences qvkResetFences; +extern PFN_vkUpdateDescriptorSets qvkUpdateDescriptorSets; +extern PFN_vkWaitForFences qvkWaitForFences; +extern PFN_vkAcquireNextImageKHR qvkAcquireNextImageKHR; +extern PFN_vkCreateSwapchainKHR qvkCreateSwapchainKHR; +extern PFN_vkDestroySwapchainKHR qvkDestroySwapchainKHR; +extern PFN_vkGetSwapchainImagesKHR qvkGetSwapchainImagesKHR; +extern PFN_vkQueuePresentKHR qvkQueuePresentKHR; + + + +// Initializes VK_Instance structure. +void vk_getProcAddress(void); +void vk_clearProcAddress(void); + +const char * cvtResToStr(VkResult result); + +#ifndef NDEDBG +#define VK_CHECK(function_call) { \ + VkResult result = function_call; \ + if (result != VK_SUCCESS) \ + ri.Printf(PRINT_ALL, \ + "Vulkan: error %s returned by %s \n", cvtResToStr(result), #function_call); \ +} +#else +#define VK_CHECK(function_call) \ + function_call; +#endif + + +#define MAX_SWAPCHAIN_IMAGES 8 + +// Vk_Instance contains engine-specific vulkan resources that persist entire renderer lifetime. +// This structure is initialized/deinitialized by vk_initialize/vk_shutdown functions correspondingly. +struct Vk_Instance { + VkInstance instance ; + VkPhysicalDevice physical_device; + VkPhysicalDeviceFeatures features; + + // Native platform surface or window objects are abstracted by surface objects, + // which are represented by VkSurfaceKHR handles. The VK_KHR_surface extension + // declares the VkSurfaceKHR object, and provides a function for destroying + // VkSurfaceKHR objects. Separate platform-specific extensions each provide a + // function for creating a VkSurfaceKHR object for the respective platform. + VkSurfaceKHR surface; + VkSurfaceFormatKHR surface_format; + VkSurfaceCapabilitiesKHR surface_caps; + +// Depth/stencil formats are considered opaque and need not be stored +// in the exact number of bits pertexel or component ordering indicated +// by the format enum. However, implementations must not substitute a +// different depth or stencil precision than that described in the +// format (e.g. D16 must not be implemented as D24 or D32). + +// The features for the set of formats (VkFormat) supported by the +// implementation are queried individually using the +// vkGetPhysicalDeviceFormatProperties command. +// To determine the set of valid usage bits for a given format, +// call vkGetPhysicalDeviceFormatProperties. + +// depth and stencil aspects of a given image subresource must always be in the same layout. + + VkFormat fmt_DepthStencil; + VkPhysicalDeviceMemoryProperties devMemProperties; + + uint32_t queue_family_index; + VkDevice device; + VkQueue queue; + + VkSwapchainKHR swapchain; + uint32_t swapchain_image_count ; + VkImage swapchain_images_array[MAX_SWAPCHAIN_IMAGES]; + VkImageView swapchain_image_views[MAX_SWAPCHAIN_IMAGES]; + uint32_t idx_swapchain_image; + + + VkCommandPool command_pool; + VkCommandBuffer command_buffer; + + VkImage depth_image; + VkDeviceMemory depth_image_memory; + VkImageView depth_image_view; + + VkRenderPass render_pass; + VkFramebuffer framebuffers[MAX_SWAPCHAIN_IMAGES]; + + VkDescriptorPool descriptor_pool; + VkDescriptorSetLayout set_layout; + + // Pipeline layout: the uniform and push values referenced by + // the shader that can be updated at draw time + VkPipelineLayout pipeline_layout; + + VkBool32 isBlitSupported; + + VkBool32 isInitialized; + +#ifndef NDEBUG + VkDebugReportCallbackEXT h_debugCB; +#endif +}; + + + +extern struct Vk_Instance vk; + + + +#endif diff --git a/code/renderer_vulkan/vk_pipelines.c b/code/renderer_vulkan/vk_pipelines.c new file mode 100644 index 0000000000..ce6d4f0c6a --- /dev/null +++ b/code/renderer_vulkan/vk_pipelines.c @@ -0,0 +1,954 @@ +#include "tr_local.h" +#include "vk_instance.h" +#include "vk_shaders.h" +#include "vk_pipelines.h" +#include "tr_shader.h" +// The graphics pipeline is the sequence of operations that take the vertices +// and textures of your meshes all the way to the pixels in the render targets +// +// The input assembler collects the raw vertex data from the buffers you specify +// and may also use an index buffer to repeat cartain elements without having to +// duplicate the vertex data itself. +// +// The vertex shader is run for every vertex and generally applies transformations +// to turn vertex positions from model space to screen space. It also passes +// per-vertex data down the pipeline +// +// The tessllation shaders allow you to subdivide geometry based on certain rules +// to increase the mesh quality. This is often make surfaces like brick walls +// and staircases look less flat when they are nearby. +// +// The geometry shader is run on every primitive(triangle, line, point) and can +// discard it or output more primitives than came in. This is similar to the +// tessellation shader, but much more flexible. +// +// The rasterization stage discretizes the primitives into fragments. These are +// the pixel elements that they fill on the framebuffer. Any fragments that fall +// outside the screen are discarded and the attributes outputted by the vertex +// shader are interpolated across the fragments. +// +// The fragment shader is invoked for every fragment that servives and determines +// which framebuffer(s) the fragment are written to and with which color and depth +// values. It can do this using the interpolated data from the vertex shader, +// which can include things like texture coordinates and normals for lighting. +// +// The color blending stage applies operations to mix different fragments that +// map to the same pixel in the framebuffer. Fragments can simply overwrite +// each other, add up or be mixed based opon transparency. +// + + + +// used with cg_shadows == 2 +enum Vk_Shadow_Phase { + SHADOWS_RENDERING_DISABLED, + SHADOWS_RENDERING_EDGES, + SHADOWS_RENDERING_FULLSCREEN_QUAD +}; + + +struct Vk_Pipeline_Def { + VkPipeline pipeline; + uint32_t state_bits; // GLS_XXX flags + cullType_t face_culling;// cullType_t + VkBool32 polygon_offset; + VkBool32 clipping_plane; + VkBool32 mirror; + VkBool32 line_primitives; + enum Vk_Shader_Type shader_type; + enum Vk_Shadow_Phase shadow_phase; +}; + + +struct GlobalPipelineManager g_stdPipelines; + +#define MAX_VK_PIPELINES 1024 +static struct Vk_Pipeline_Def s_pipeline_defs[MAX_VK_PIPELINES]; +static uint32_t s_numPipelines = 0; + + +void R_PipelineList_f(void) +{ + ri.Printf(PRINT_ALL, " Total pipeline created: %d\n", s_numPipelines); +} + + + +// uniform values in the shaders need to be specified during pipeline creation +// transformation matrix to the vertex shader, or to create texture samplers +// in the fragment shader. + + +void vk_createPipelineLayout(void) +{ + ri.Printf(PRINT_ALL, " Create: vk.descriptor_pool, vk.set_layout, vk.pipeline_layout\n"); + + // Like command buffers, descriptor sets are allocated from a pool. + // So we must first create the Descriptor pool. + { + VkDescriptorPoolSize pool_size; + pool_size.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + pool_size.descriptorCount = MAX_DRAWIMAGES; + + VkDescriptorPoolCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; + desc.pNext = NULL; + desc.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; // used by the cinematic images + desc.maxSets = MAX_DRAWIMAGES; + desc.poolSizeCount = 1; + // pPoolSizes is a pointer to an array of VkDescriptorPoolSize structures, + // each containing a descriptor type and number of descriptors of + // that type to be allocated in the pool. + desc.pPoolSizes = &pool_size; + + VK_CHECK(qvkCreateDescriptorPool(vk.device, &desc, NULL, &vk.descriptor_pool)); + } + + + // + // Descriptor set layout. + + { + VkDescriptorSetLayoutBinding descriptor_binding; + // is the binding number of this entry and corresponds to + // a resource of the same binding number in the shader stages + descriptor_binding.binding = 0; + // descriptorType is a VkDescriptorType specifying which type of + // resource descriptors are used for this binding. + descriptor_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + // 1 is the number of descriptors contained in the binding, + // accessed in a shader as an array + descriptor_binding.descriptorCount = 1; + // stageFlags member is a bitmask of VkShaderStageFlagBits specifying + // which pipeline shader stages can access a resource for this binding + descriptor_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; + // pImmutableSamplers affects initialization of samplers. If descriptorType + // specifies a VK_DESCRIPTOR_TYPE_SAMPLER or + // VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER type descriptor, then + // pImmutableSamplers can be used to initialize a set of immutable samplers. + // Immutable samplers are permanently bound into the set layout; + // later binding a sampler into an immutable sampler slot in a descriptor + // set is not allowed. If pImmutableSamplers is not NULL, then it is + // considered to be a pointer to an array of sampler handles that + // will be consumed by the set layout and used for the corresponding binding. + // If pImmutableSamplers is NULL, then the sampler slots are dynamic + // and sampler handles must be bound into descriptor sets using this layout. + // If descriptorType is not one of these descriptor types, + // then pImmutableSamplers is ignored. + descriptor_binding.pImmutableSamplers = NULL; + + VkDescriptorSetLayoutCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.bindingCount = 1; + desc.pBindings = &descriptor_binding; + + // To create descriptor set layout objects + VK_CHECK(qvkCreateDescriptorSetLayout(vk.device, &desc, NULL, &vk.set_layout)); + } + + + VkPushConstantRange push_range; + push_range.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; + push_range.offset = 0; + push_range.size = 128; // 16 mvp floats + 16 + + VkDescriptorSetLayout set_layouts[2] = {vk.set_layout, vk.set_layout}; + + VkPipelineLayoutCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + + // setLayoutCount: the number of descriptor sets included in the pipeline layout. + // pSetLayouts: a pointer to an array of VkDescriptorSetLayout objects. + desc.setLayoutCount = 2; + desc.pSetLayouts = set_layouts; + + // pushConstantRangeCount is the number of push constant ranges + // included in the pipeline layout. + // + // pPushConstantRanges is a pointer to an array of VkPushConstantRange + // structures defining a set of push constant ranges for use in + // a single pipeline layout. + // + // In addition to descriptor set layouts, a pipeline layout also + // describes how many push constants can be accessed by each stage + // of the pipeline. + + desc.pushConstantRangeCount = 1; + desc.pPushConstantRanges = &push_range; + + // Access to descriptor sets from a pipeline is accomplished through + // a pipeline layout. Zero or more descriptor set layouts and zero or + // more push constant ranges are combined to form a pipeline layout + // object which describes the complete set of resources that can be + // accessed by a pipeline. The pipeline layout represents a sequence + // of descriptor sets with each having a specific layout. + // This sequence of layouts is used to determine the interface between + // shader stages and shader resources. + // + // Each pipeline is created using a pipeline layout. + VK_CHECK(qvkCreatePipelineLayout(vk.device, &desc, NULL, &vk.pipeline_layout)); +} + + +static void vk_create_pipeline(const struct Vk_Pipeline_Def* def, VkPipeline* pPipeLine) +{ + + struct Specialization_Data { + int32_t alpha_test_func; + int32_t color_op; + int32_t clipping_plane; + } specialization_data; + + if ((def->state_bits & GLS_ATEST_BITS) == 0) + specialization_data.alpha_test_func = 0; + else if (def->state_bits & GLS_ATEST_GT_0) + specialization_data.alpha_test_func = 1; + else if (def->state_bits & GLS_ATEST_LT_80) + specialization_data.alpha_test_func = 2; + else if (def->state_bits & GLS_ATEST_GE_80) + specialization_data.alpha_test_func = 3; + else + ri.Error(ERR_DROP, "create_pipeline: invalid alpha test state bits\n"); + + specialization_data.color_op = def->shader_type == ST_MULTI_TEXURE_ADD; + + specialization_data.clipping_plane = def->clipping_plane && !vk.features.shaderClipDistance; + + VkSpecializationMapEntry specialization_entries[3]; + specialization_entries[0].constantID = 0; + specialization_entries[0].offset = offsetof(struct Specialization_Data, alpha_test_func); + specialization_entries[0].size = sizeof(int32_t); + + specialization_entries[1].constantID = 1; + specialization_entries[1].offset = offsetof(struct Specialization_Data, color_op); + specialization_entries[1].size = sizeof(int32_t); + + specialization_entries[2].constantID = 2; + specialization_entries[2].offset = offsetof(struct Specialization_Data, clipping_plane); + specialization_entries[2].size = sizeof(int32_t); + + VkSpecializationInfo specialization_info; + specialization_info.mapEntryCount = 3; + specialization_info.pMapEntries = specialization_entries; + specialization_info.dataSize = sizeof(struct Specialization_Data); + specialization_info.pData = &specialization_data; + + + // Two stages: vs and fs + VkPipelineShaderStageCreateInfo shaderStages[2]; + + shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; + //shaderStages[0].module = *vs_module; + shaderStages[0].pName = "main"; + shaderStages[0].pNext = NULL; + shaderStages[0].flags = 0; + shaderStages[0].pSpecializationInfo = NULL; + + shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT; + //shaderStages[1].module = *fs_module; + shaderStages[1].pName = "main"; + shaderStages[1].pNext = NULL; + shaderStages[1].flags = 0; + + // pSpecializationInfo allows you to specify values for shader constants, + // you can use a single shader module where its behavior can be configured + // at pipeline creation by specifying different values fot the constants + // used in it. This is more effient than configuring the shader using + // variables at render time, because the compiler can do optimizations. + + shaderStages[0].pSpecializationInfo = &specialization_info; + shaderStages[1].pSpecializationInfo = &specialization_info; + + vk_specifyShaderModule(def->shader_type, def->clipping_plane, &shaderStages[0].module, &shaderStages[1].module); + + // ============== Vertex Input Description ================= + // Applications specify vertex input attribute and vertex input binding + // descriptions as part of graphics pipeline creation + // A vertex binding describes at which rate to load data + // from memory throughout the vertices + + VkVertexInputBindingDescription bindings[4]; + { + // xyz array + bindings[0].binding = 0; + // The stride parameter specifies the number of bytes from one entry to the next + bindings[0].stride = sizeof(vec4_t); + // move to the next data entry after each vertex + bindings[0].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + // color array + bindings[1].binding = 1; + bindings[1].stride = sizeof(color4ub_t); + bindings[1].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + // st0 array + bindings[2].binding = 2; + bindings[2].stride = sizeof(vec2_t); + bindings[2].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + // st1 array + bindings[3].binding = 3; + bindings[3].stride = sizeof(vec2_t); + bindings[3].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; + } + + // Describes how to handle vertex input + VkVertexInputAttributeDescription attribs[4]; + { + // xyz + attribs[0].location = 0; + attribs[0].binding = 0; + attribs[0].format = VK_FORMAT_R32G32B32A32_SFLOAT; + attribs[0].offset = 0; + // color + attribs[1].location = 1; + attribs[1].binding = 1; + attribs[1].format = VK_FORMAT_R8G8B8A8_UNORM; + attribs[1].offset = 0; + // st0 + attribs[2].location = 2; + attribs[2].binding = 2; + attribs[2].format = VK_FORMAT_R32G32_SFLOAT; + attribs[2].offset = 0; + // st1 + attribs[3].location = 3; + attribs[3].binding = 3; + attribs[3].format = VK_FORMAT_R32G32_SFLOAT; + attribs[3].offset = 0; + } + + VkPipelineVertexInputStateCreateInfo vertex_input_state; + vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; + vertex_input_state.pNext = NULL; + vertex_input_state.flags = 0; + vertex_input_state.vertexBindingDescriptionCount = (def->shader_type == ST_SINGLE_TEXTURE) ? 3 : 4; + vertex_input_state.pVertexBindingDescriptions = bindings; + vertex_input_state.vertexAttributeDescriptionCount = (def->shader_type == ST_SINGLE_TEXTURE) ? 3 : 4; + vertex_input_state.pVertexAttributeDescriptions = attribs; + + // + // Primitive assembly. + // + VkPipelineInputAssemblyStateCreateInfo input_assembly_state; + input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; + input_assembly_state.pNext = NULL; + input_assembly_state.flags = 0; + input_assembly_state.topology = def->line_primitives ? VK_PRIMITIVE_TOPOLOGY_LINE_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; + input_assembly_state.primitiveRestartEnable = VK_FALSE; + + // + // Viewport. + // + VkPipelineViewportStateCreateInfo viewport_state; + viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; + viewport_state.pNext = NULL; + viewport_state.flags = 0; + viewport_state.viewportCount = 1; + viewport_state.pViewports = NULL; // dynamic viewport state + viewport_state.scissorCount = 1; + viewport_state.pScissors = NULL; // dynamic scissor state + + // + // Rasterization. + // The rasterizer takes the geometry that is shaped by the vertices + // from the vertex shader and turns it into fragments to be colored + // by the fragment shader. It also performs depth testing, face culling + // and the scissor test. + VkPipelineRasterizationStateCreateInfo rasterization_state; + rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; + rasterization_state.pNext = NULL; + rasterization_state.flags = 0; + rasterization_state.depthClampEnable = VK_FALSE; + rasterization_state.rasterizerDiscardEnable = VK_FALSE; + rasterization_state.polygonMode = (def->state_bits & GLS_POLYMODE_LINE) ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL; + + switch ( def->face_culling ) + { + case CT_TWO_SIDED: + rasterization_state.cullMode = VK_CULL_MODE_NONE; + break; + case CT_FRONT_SIDED: + rasterization_state.cullMode = + (def->mirror ? VK_CULL_MODE_FRONT_BIT : VK_CULL_MODE_BACK_BIT); + break; + case CT_BACK_SIDED: + rasterization_state.cullMode = + (def->mirror ? VK_CULL_MODE_BACK_BIT : VK_CULL_MODE_FRONT_BIT); + break; + } + + + // how fragments are generated for geometry. + rasterization_state.frontFace = VK_FRONT_FACE_CLOCKWISE; // Q3 defaults to clockwise vertex order + + rasterization_state.depthBiasEnable = def->polygon_offset ? VK_TRUE : VK_FALSE; + rasterization_state.depthBiasConstantFactor = 0.0f; // dynamic depth bias state + rasterization_state.depthBiasClamp = 0.0f; // dynamic depth bias state + rasterization_state.depthBiasSlopeFactor = 0.0f; // dynamic depth bias state + rasterization_state.lineWidth = 1.0f; + + VkPipelineMultisampleStateCreateInfo multisample_state; + multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; + multisample_state.pNext = NULL; + multisample_state.flags = 0; + multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; + multisample_state.sampleShadingEnable = VK_FALSE; + multisample_state.minSampleShading = 1.0f; + multisample_state.pSampleMask = NULL; + multisample_state.alphaToCoverageEnable = VK_FALSE; + multisample_state.alphaToOneEnable = VK_FALSE; + + // If you are using a depth and/or stencil buffer, then you also need to configure + // the depth and stencil tests. + VkPipelineDepthStencilStateCreateInfo depth_stencil_state; + depth_stencil_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; + depth_stencil_state.pNext = NULL; + depth_stencil_state.flags = 0; + depth_stencil_state.depthTestEnable = (def->state_bits & GLS_DEPTHTEST_DISABLE) ? VK_FALSE : VK_TRUE; + depth_stencil_state.depthWriteEnable = (def->state_bits & GLS_DEPTHMASK_TRUE) ? VK_TRUE : VK_FALSE; + depth_stencil_state.depthCompareOp = (def->state_bits & GLS_DEPTHFUNC_EQUAL) ? VK_COMPARE_OP_EQUAL : VK_COMPARE_OP_LESS_OR_EQUAL; + depth_stencil_state.depthBoundsTestEnable = VK_FALSE; + depth_stencil_state.stencilTestEnable = (def->shadow_phase != SHADOWS_RENDERING_DISABLED) ? VK_TRUE : VK_FALSE; + + if (def->shadow_phase == SHADOWS_RENDERING_EDGES) + { + depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP; + depth_stencil_state.front.passOp = (def->face_culling == CT_FRONT_SIDED) ? VK_STENCIL_OP_INCREMENT_AND_CLAMP : VK_STENCIL_OP_DECREMENT_AND_CLAMP; + depth_stencil_state.front.depthFailOp = VK_STENCIL_OP_KEEP; + depth_stencil_state.front.compareOp = VK_COMPARE_OP_ALWAYS; + depth_stencil_state.front.compareMask = 255; + depth_stencil_state.front.writeMask = 255; + depth_stencil_state.front.reference = 0; + + depth_stencil_state.back = depth_stencil_state.front; + } + else if (def->shadow_phase == SHADOWS_RENDERING_FULLSCREEN_QUAD) + { + depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP; + depth_stencil_state.front.passOp = VK_STENCIL_OP_KEEP; + depth_stencil_state.front.depthFailOp = VK_STENCIL_OP_KEEP; + depth_stencil_state.front.compareOp = VK_COMPARE_OP_NOT_EQUAL; + depth_stencil_state.front.compareMask = 255; + depth_stencil_state.front.writeMask = 255; + depth_stencil_state.front.reference = 0; + + depth_stencil_state.back = depth_stencil_state.front; + } + else + { + memset(&depth_stencil_state.front, 0, sizeof(depth_stencil_state.front)); + memset(&depth_stencil_state.back, 0, sizeof(depth_stencil_state.back)); + } + + depth_stencil_state.minDepthBounds = 0.0; + depth_stencil_state.maxDepthBounds = 0.0; + + + //After a fragment shader has returned a color, it needs to be combined + //with the color that is already in the framebuffer. This transformation + //is known as color blending and there are two ways to do it + // + // 1) Mix the old and new value to produce a final color. + // 2) combine the old and the new value using a bitwise operation. + + // contains the configuraturation per attached framebuffer + + VkPipelineColorBlendAttachmentState attachment_blend_state = {}; + attachment_blend_state.blendEnable = (def->state_bits & (GLS_SRCBLEND_BITS | GLS_DSTBLEND_BITS)) ? VK_TRUE : VK_FALSE; + + if (def->shadow_phase == SHADOWS_RENDERING_EDGES) + attachment_blend_state.colorWriteMask = 0; + else + attachment_blend_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + + if (attachment_blend_state.blendEnable) + { + switch (def->state_bits & GLS_SRCBLEND_BITS) + { + case GLS_SRCBLEND_ZERO: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ZERO; + break; + case GLS_SRCBLEND_ONE: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE; + break; + case GLS_SRCBLEND_DST_COLOR: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_DST_COLOR; + break; + case GLS_SRCBLEND_ONE_MINUS_DST_COLOR: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR; + break; + case GLS_SRCBLEND_SRC_ALPHA: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; + break; + case GLS_SRCBLEND_ONE_MINUS_SRC_ALPHA: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + break; + case GLS_SRCBLEND_DST_ALPHA: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_DST_ALPHA; + break; + case GLS_SRCBLEND_ONE_MINUS_DST_ALPHA: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA; + break; + case GLS_SRCBLEND_ALPHA_SATURATE: + attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA_SATURATE; + break; + default: + ri.Error( ERR_DROP, "create_pipeline: invalid src blend state bits\n" ); + break; + } + switch (def->state_bits & GLS_DSTBLEND_BITS) + { + case GLS_DSTBLEND_ZERO: + attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO; + break; + case GLS_DSTBLEND_ONE: + attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE; + break; + case GLS_DSTBLEND_SRC_COLOR: + attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; + break; + case GLS_DSTBLEND_ONE_MINUS_SRC_COLOR: + attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; + break; + case GLS_DSTBLEND_SRC_ALPHA: + attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; + break; + case GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA: + attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + break; + case GLS_DSTBLEND_DST_ALPHA: + attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_DST_ALPHA; + break; + case GLS_DSTBLEND_ONE_MINUS_DST_ALPHA: + attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA; + break; + default: + ri.Error( ERR_DROP, "create_pipeline: invalid dst blend state bits\n" ); + break; + } + + attachment_blend_state.srcAlphaBlendFactor = attachment_blend_state.srcColorBlendFactor; + attachment_blend_state.dstAlphaBlendFactor = attachment_blend_state.dstColorBlendFactor; + attachment_blend_state.colorBlendOp = VK_BLEND_OP_ADD; + attachment_blend_state.alphaBlendOp = VK_BLEND_OP_ADD; + } + + // Contains the global color blending settings + VkPipelineColorBlendStateCreateInfo blend_state; + blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; + blend_state.pNext = NULL; + blend_state.flags = 0; + blend_state.logicOpEnable = VK_FALSE; + blend_state.logicOp = VK_LOGIC_OP_COPY; + blend_state.attachmentCount = 1; + blend_state.pAttachments = &attachment_blend_state; + blend_state.blendConstants[0] = 0.0f; + blend_state.blendConstants[1] = 0.0f; + blend_state.blendConstants[2] = 0.0f; + blend_state.blendConstants[3] = 0.0f; + + + // A limited amount of the state that we've specified in the previous + // structs can actually be changed without recreating the pipeline. + // Examples are the size of the viewport, line width and blend constants + // If we want to do that, we have to fill in a VkPipelineDynamicStateCreateInfo + // structure like this. + VkPipelineDynamicStateCreateInfo dynamic_state; + dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + dynamic_state.pNext = NULL; + dynamic_state.flags = 0; + dynamic_state.dynamicStateCount = 3; + VkDynamicState dynamic_state_array[3] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR, VK_DYNAMIC_STATE_DEPTH_BIAS }; + dynamic_state.pDynamicStates = dynamic_state_array; + + + VkGraphicsPipelineCreateInfo create_info; + create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; + // pNext is NULL or a pointer to an extension-specific structure. + create_info.pNext = NULL; + // flags is a bitmask of VkPipelineCreateFlagBits + // specifying how the pipeline will be generated. + create_info.flags = 0; + // stageCount is the number of entries in the pStages array. + create_info.stageCount = 2; + // pStages is an array of size stageCount structures of type + // VkPipelineShaderStageCreateInfo describing the set of the + // shader stages to be included in the graphics pipeline. + create_info.pStages = shaderStages; + // pVertexInputState is a pointer to an instance of the + // VkPipelineVertexInputStateCreateInfo structure. + create_info.pVertexInputState = &vertex_input_state; + // pInputAssemblyState is a pointer to an instance of the + // VkPipelineInputAssemblyStateCreateInfo structure which + // determines input assembly behavior, as described in Drawing Commands. + create_info.pInputAssemblyState = &input_assembly_state; + // pTessellationState is a pointer to an instance of the + // VkPipelineTessellationStateCreateInfo structure, and is ignored + // if the pipeline does not include a tessellation control shader + // stage and tessellation evaluation shader stage + create_info.pTessellationState = NULL; + // pViewportState is a pointer to an instance of the + // VkPipelineViewportStateCreateInfo structure, and + // is ignored if the pipeline has rasterization disabled. + create_info.pViewportState = &viewport_state; + // pRasterizationState is a pointer to an instance of the + // VkPipelineRasterizationStateCreateInfo structure. + create_info.pRasterizationState = &rasterization_state; + // pMultisampleState is a pointer to an instance of the + // VkPipelineMultisampleStateCreateInfo, and is ignored + // if the pipeline has rasterization disabled. + create_info.pMultisampleState = &multisample_state; + + // pDepthStencilState is a pointer to an instance of the + // VkPipelineDepthStencilStateCreateInfe structure, and is ignored + // if the pipeline has rasterization disabled or + // if the subpass of the render pass the pipeline is created + // against does not use a depth/stencil attachment. + create_info.pDepthStencilState = &depth_stencil_state; + + // pColorBlendState is a pointer to an instance of the + // VkPipelineColorBlendStateCreateInfo structure, and is ignored + // if the pipeline has rasterization disabled or if the subpass of + // the render pass the pipeline is created against does not use + // any color attachments. + create_info.pColorBlendState = &blend_state; + + // pDynamicState is a pointer to VkPipelineDynamicStateCreateInfo and + // is used to indicate which properties of the pipeline state object + // are dynamic and can be changed independently of the pipeline state. + // This can be NULL, which means no state in the pipeline is considered dynamic. + create_info.pDynamicState = &dynamic_state; + // layout is the description of binding locations used + // by both the pipeline and descriptor sets used with the pipeline. + create_info.layout = vk.pipeline_layout; + // renderPass is a handle to a render pass object describing the environment + // in which the pipeline will be used; the pipeline must only be used with + // an instance of any render pass compatible with the one provided. + // See Render Pass Compatibility for more information. + create_info.renderPass = vk.render_pass; + + // A pipeline derivative is a child pipeline created from a parent pipeline, + // where the child and parent are expected to have much commonality. + // The goal of derivative pipelines is that they be cheaper to create + // using the parent as a starting point, and that it be more efficient + // on either host or device to switch/bind between children of the same + // parent. + // subpass is the index of the subpass in the render pass + // where this pipeline will be used. + create_info.subpass = 0; + // basePipelineHandle is a pipeline to derive from. + create_info.basePipelineHandle = VK_NULL_HANDLE; + create_info.basePipelineIndex = -1; + + // Graphics pipelines consist of multiple shader stages, + // multiple fixed-function pipeline stages, and a pipeline layout. + // To create graphics pipelines + // VK_NULL_HANDLE indicating that pipeline caching is disabled; + // TODO: provide the handle of a valid pipeline cache object, + // 1 is the length of the pCreateInfos and pPipelines arrays. + // + VK_CHECK(qvkCreateGraphicsPipelines(vk.device, VK_NULL_HANDLE, 1, &create_info, NULL, pPipeLine)); +} + + + +static VkPipeline vk_find_pipeline(struct Vk_Pipeline_Def* def) +{ + uint32_t i = 0; + for (i = 0; i < s_numPipelines; i++) + { + if (s_pipeline_defs[i].shader_type == def->shader_type && + s_pipeline_defs[i].state_bits == def->state_bits && + s_pipeline_defs[i].face_culling == def->face_culling && + s_pipeline_defs[i].polygon_offset == def->polygon_offset && + s_pipeline_defs[i].clipping_plane == def->clipping_plane && + s_pipeline_defs[i].mirror == def->mirror + // && s_pipeline_defs[i].line_primitives == def->line_primitives + // && s_pipeline_defs[i].shadow_phase == def->shadow_phase + ) + { + return s_pipeline_defs[i].pipeline; + } + } + + + //VkPipeline pipeline; + vk_create_pipeline(def, &def->pipeline); + + s_pipeline_defs[s_numPipelines] = *def; + //s_pipeline_defs[s_numPipelines].pipeline = pipeline; + + if (++s_numPipelines >= MAX_VK_PIPELINES) + { + ri.Error(ERR_DROP, "vk_create_pipeline: MAX_VK_PIPELINES hit\n"); + } + return def->pipeline; +} + + + +void create_pipelines_for_each_stage(shaderStage_t *pStage, shader_t* pShader) +{ + struct Vk_Pipeline_Def def; + + def.line_primitives = 0; + def.shadow_phase = 0; + def.face_culling = pShader->cullType; + def.polygon_offset = pShader->polygonOffset; + def.state_bits = pStage->stateBits; + + if (pStage->bundle[1].image[0] == NULL) + def.shader_type = ST_SINGLE_TEXTURE; + else if (pShader->multitextureEnv == GL_MODULATE) + def.shader_type = ST_MULTI_TEXURE_MUL; + else if (pShader->multitextureEnv == GL_ADD) + def.shader_type = ST_MULTI_TEXURE_ADD; + else + ri.Error(ERR_FATAL, "Vulkan: could not create pipelines for q3 shader '%s'\n", pShader->name); + + def.clipping_plane = VK_FALSE; + def.mirror = VK_FALSE; + pStage->vk_pipeline = vk_find_pipeline(&def); + + + def.clipping_plane = VK_TRUE; + def.mirror = VK_FALSE; + pStage->vk_portal_pipeline = vk_find_pipeline(&def); + + + def.clipping_plane = VK_TRUE; + def.mirror = VK_TRUE; + pStage->vk_mirror_pipeline = vk_find_pipeline(&def); +} + + + +void create_standard_pipelines(void) +{ + + ri.Printf(PRINT_ALL, " Create skybox pipeline \n"); + { + + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + def.shader_type = ST_SINGLE_TEXTURE; + def.state_bits = 0; + def.face_culling = CT_FRONT_SIDED; + def.polygon_offset = VK_FALSE; + def.clipping_plane = VK_FALSE; + def.mirror = VK_FALSE; + + vk_create_pipeline(&def, &g_stdPipelines.skybox_pipeline); + } + + ri.Printf(PRINT_ALL, " Create Q3 stencil shadows pipeline \n"); + { + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + + def.polygon_offset = VK_FALSE; + def.state_bits = 0; + def.shader_type = ST_SINGLE_TEXTURE; + def.clipping_plane = VK_FALSE; + def.shadow_phase = SHADOWS_RENDERING_EDGES; + + cullType_t cull_types[2] = {CT_FRONT_SIDED, CT_BACK_SIDED}; + VkBool32 mirror_flags[2] = {VK_TRUE, VK_FALSE}; + + int i = 0; + int j = 0; + + for (i = 0; i < 2; i++) + { + def.face_culling = cull_types[i]; + for (j = 0; j < 2; j++) + { + def.mirror = mirror_flags[j]; + + vk_create_pipeline(&def, &g_stdPipelines.shadow_volume_pipelines[i][j]); + } + } + } + + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + def.face_culling = CT_FRONT_SIDED; + def.polygon_offset = VK_FALSE; + def.state_bits = GLS_DEPTHMASK_TRUE | GLS_SRCBLEND_DST_COLOR | GLS_DSTBLEND_ZERO; + def.shader_type = ST_SINGLE_TEXTURE; + def.clipping_plane = VK_FALSE; + def.mirror = VK_FALSE; + def.shadow_phase = SHADOWS_RENDERING_FULLSCREEN_QUAD; + + vk_create_pipeline(&def, &g_stdPipelines.shadow_finish_pipeline); + } + } + + + ri.Printf(PRINT_ALL, " Create fog and dlights pipeline \n"); + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + + def.shader_type = ST_SINGLE_TEXTURE; + def.clipping_plane = VK_FALSE; + def.mirror = VK_FALSE; + + unsigned int fog_state_bits[2] = { + GLS_SRCBLEND_SRC_ALPHA | GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA | GLS_DEPTHFUNC_EQUAL, + GLS_SRCBLEND_SRC_ALPHA | GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA + }; + unsigned int dlight_state_bits[2] = { + GLS_SRCBLEND_DST_COLOR | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL, + GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL + }; + + VkBool32 polygon_offset[2] = {VK_FALSE, VK_TRUE}; + + int i = 0, j = 0, k = 0; + + for (i = 0; i < 2; i++) + { + unsigned fog_state = fog_state_bits[i]; + unsigned dlight_state = dlight_state_bits[i]; + + for (j = 0; j < 3; j++) + { + def.face_culling = j; // cullType_t value + + for ( k = 0; k < 2; k++) + { + def.polygon_offset = polygon_offset[k]; + + def.state_bits = fog_state; + vk_create_pipeline(&def, &g_stdPipelines.fog_pipelines[i][j][k]); + + def.state_bits = dlight_state; + vk_create_pipeline(&def, &g_stdPipelines.dlight_pipelines[i][j][k]); + } + } + } + } + + + // debug pipelines + ri.Printf(PRINT_ALL, " Create tris debug pipeline \n"); + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + def.state_bits = GLS_POLYMODE_LINE | GLS_DEPTHMASK_TRUE; + vk_create_pipeline(&def, &g_stdPipelines.tris_debug_pipeline); + } + + + ri.Printf(PRINT_ALL, " Create tris mirror debug pipeline \n"); + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + def.state_bits = GLS_POLYMODE_LINE | GLS_DEPTHMASK_TRUE; + def.face_culling = CT_BACK_SIDED; + vk_create_pipeline(&def, &g_stdPipelines.tris_mirror_debug_pipeline); + } + + ri.Printf(PRINT_ALL, " Create normals debug pipeline \n"); + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + def.state_bits = GLS_DEPTHMASK_TRUE; + def.line_primitives = VK_TRUE; + vk_create_pipeline(&def, &g_stdPipelines.normals_debug_pipeline); + } + + + ri.Printf(PRINT_ALL, " Create surface debug pipeline \n"); + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + def.state_bits = GLS_DEPTHMASK_TRUE | GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE; + vk_create_pipeline(&def, &g_stdPipelines.surface_debug_pipeline_solid); + } + + ri.Printf(PRINT_ALL, " Create surface debug outline pipeline \n"); + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + def.state_bits = GLS_POLYMODE_LINE | GLS_DEPTHMASK_TRUE | GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE; + def.line_primitives = VK_TRUE; + vk_create_pipeline(&def, &g_stdPipelines.surface_debug_pipeline_outline); + } + + ri.Printf(PRINT_ALL, " Create images debug pipeline \n"); + { + struct Vk_Pipeline_Def def; + memset(&def, 0, sizeof(def)); + + def.state_bits = GLS_DEPTHTEST_DISABLE | GLS_SRCBLEND_SRC_ALPHA | GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA; + vk_create_pipeline(&def, &g_stdPipelines.images_debug_pipeline); + } +} + + +void vk_destroyShaderStagePipeline(void) +{ + // shader stage + qvkDeviceWaitIdle(vk.device); + uint32_t i; + for (i = 0; i < s_numPipelines; i++) + { + qvkDestroyPipeline(vk.device, s_pipeline_defs[i].pipeline, NULL); + memset(&s_pipeline_defs[i], 0, sizeof(struct Vk_Pipeline_Def)); + } + s_numPipelines = 0; +} + + +void vk_destroyGlobalStagePipeline(void) +{ + int i, j, k; + + qvkDestroyDescriptorSetLayout(vk.device, vk.set_layout, NULL); + qvkDestroyPipelineLayout(vk.device, vk.pipeline_layout, NULL); + // You don't need to explicitly clean up descriptor sets, + // because they will be automaticall freed when the descripter pool + // is destroyed. + qvkDestroyDescriptorPool(vk.device, vk.descriptor_pool, NULL); + // + qvkDestroyPipeline(vk.device, g_stdPipelines.skybox_pipeline, NULL); + for (i = 0; i < 2; i++) + for (j = 0; j < 2; j++) + { + qvkDestroyPipeline(vk.device, g_stdPipelines.shadow_volume_pipelines[i][j], NULL); + } + + qvkDestroyPipeline(vk.device, g_stdPipelines.shadow_finish_pipeline, NULL); + + + for (i = 0; i < 2; i++) + for (j = 0; j < 3; j++) + for (k = 0; k < 2; k++) + { + qvkDestroyPipeline(vk.device, g_stdPipelines.fog_pipelines[i][j][k], NULL); + qvkDestroyPipeline(vk.device, g_stdPipelines.dlight_pipelines[i][j][k], NULL); + } + + qvkDestroyPipeline(vk.device, g_stdPipelines.tris_debug_pipeline, NULL); + qvkDestroyPipeline(vk.device, g_stdPipelines.tris_mirror_debug_pipeline, NULL); + qvkDestroyPipeline(vk.device, g_stdPipelines.normals_debug_pipeline, NULL); + qvkDestroyPipeline(vk.device, g_stdPipelines.surface_debug_pipeline_solid, NULL); + qvkDestroyPipeline(vk.device, g_stdPipelines.surface_debug_pipeline_outline, NULL); + qvkDestroyPipeline(vk.device, g_stdPipelines.images_debug_pipeline, NULL); +} diff --git a/code/renderer_vulkan/vk_pipelines.h b/code/renderer_vulkan/vk_pipelines.h new file mode 100644 index 0000000000..7b3abd4ea5 --- /dev/null +++ b/code/renderer_vulkan/vk_pipelines.h @@ -0,0 +1,48 @@ +#ifndef VK_PIPELINES_H_ +#define VK_PIPELINES_H_ + +#include "tr_local.h" + +void create_standard_pipelines(void); +void create_pipelines_for_each_stage(shaderStage_t *pStage, shader_t* pShader); +void vk_createPipelineLayout(void); + +void vk_destroyShaderStagePipeline(void); +void vk_destroyGlobalStagePipeline(void); + +void R_PipelineList_f(void); + +struct GlobalPipelineManager { + // + // Standard pipelines. + // + VkPipeline skybox_pipeline; + + // dim 0: 0 - front side, 1 - back size + // dim 1: 0 - normal view, 1 - mirror view + VkPipeline shadow_volume_pipelines[2][2]; + VkPipeline shadow_finish_pipeline; + + // dim 0 is based on fogPass_t: 0 - corresponds to FP_EQUAL, 1 - corresponds to FP_LE. + // dim 1 is directly a cullType_t enum value. + // dim 2 is a polygon offset value (0 - off, 1 - on). + VkPipeline fog_pipelines[2][3][2]; + + // dim 0 is based on dlight additive flag: 0 - not additive, 1 - additive + // dim 1 is directly a cullType_t enum value. + // dim 2 is a polygon offset value (0 - off, 1 - on). + VkPipeline dlight_pipelines[2][3][2]; + + // debug visualization pipelines + VkPipeline tris_debug_pipeline; + VkPipeline tris_mirror_debug_pipeline; + VkPipeline normals_debug_pipeline; + VkPipeline surface_debug_pipeline_solid; + VkPipeline surface_debug_pipeline_outline; + VkPipeline images_debug_pipeline; +}; + +extern struct GlobalPipelineManager g_stdPipelines; + + +#endif diff --git a/code/renderer_vulkan/vk_screenshot.c b/code/renderer_vulkan/vk_screenshot.c new file mode 100644 index 0000000000..e3f4ea6578 --- /dev/null +++ b/code/renderer_vulkan/vk_screenshot.c @@ -0,0 +1,663 @@ +#include "tr_globals.h" +#include "vk_instance.h" +#include "vk_image.h" +#include "vk_cmd.h" +#include "vk_screenshot.h" + +#include "R_ImageProcess.h" +#include "R_ImageJPG.h" +#include "ref_import.h" +#include "glConfig.h" +/* +============================================================================== + + SCREEN SHOTS + +NOTE TTimo +some thoughts about the screenshots system: +screenshots get written in fs_homepath + fs_gamedir +vanilla q3 .. baseq3/screenshots/ *.tga +team arena .. missionpack/screenshots/ *.tga + +two commands: "screenshot" and "screenshotJPEG" +we use statics to store a count and start writing the first screenshot/screenshot????.tga (.jpg) available +(with FS_FileExists / FS_FOpenFileWrite calls) +FIXME: the statics don't get a reinit between fs_game changes + + +Images created with tiling equal to VK_IMAGE_TILING_LINEAR have further restrictions on their +limits and capabilities compared to images created with tiling equal to VK_IMAGE_TILING_OPTIMAL. +Creation of images with tiling VK_IMAGE_TILING_LINEAR may not be supported unless other parameters +meetall of the constraints: +* imageType is VK_IMAGE_TYPE_2D +* format is not a depth/stencil format +* mipLevels is 1 +* arrayLayers is 1 +* samples is VK_SAMPLE_COUNT_1_BIT +* usage only includes VK_IMAGE_USAGE_TRANSFER_SRC_BIT and/or VK_IMAGE_USAGE_TRANSFER_DST_BIT +Implementations may support additional limits and capabilities beyond those listed above. + +============================================================================== + +*/ + + +static void imgFlipY(unsigned char * pBuf, const uint32_t w, const uint32_t h) +{ + const uint32_t a_row = w * 4; + const uint32_t nLines = h / 2; + + unsigned char* pTmp = (unsigned char*) malloc( a_row ); + unsigned char *pSrc = pBuf; + unsigned char *pDst = pBuf + w * (h - 1) * 4; + + uint32_t j = 0; + for (j = 0; j < nLines; j++) + { + memcpy(pTmp, pSrc, a_row ); + memcpy(pSrc, pDst, a_row ); + memcpy(pDst, pTmp, a_row ); + + pSrc += a_row; + pDst -= a_row; + } + + free(pTmp); +} + + +// Just reading the pixels for the GPU MEM, don't care about swizzling +static void vk_read_pixels(unsigned char* pBuf, uint32_t W, uint32_t H) +{ + + qvkDeviceWaitIdle(vk.device); + + // Create image in host visible memory to serve as a destination for framebuffer pixels. + + const uint32_t sizeFB = W * H * 4; + + + VkBuffer buffer; + VkDeviceMemory memory; + { + VkBufferCreateInfo buffer_create_info; + memset(&buffer_create_info, 0, sizeof(buffer_create_info)); + buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + buffer_create_info.size = sizeFB; + buffer_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; + VK_CHECK( qvkCreateBuffer(vk.device, &buffer_create_info, NULL, &buffer) ); + + VkMemoryRequirements memory_requirements; + qvkGetBufferMemoryRequirements(vk.device, buffer, &memory_requirements); + + VkMemoryAllocateInfo memory_allocate_info; + memset(&memory_allocate_info, 0, sizeof(memory_allocate_info)); + memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + memory_allocate_info.allocationSize = memory_requirements.size; + // + memory_allocate_info.memoryTypeIndex = find_memory_type(memory_requirements.memoryTypeBits, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + VK_CHECK( qvkAllocateMemory(vk.device, &memory_allocate_info, NULL, &memory) ); + VK_CHECK( qvkBindBufferMemory(vk.device, buffer, memory, 0) ); + } + + + ////////////////////////////////////////////////////////// + + VkBufferImageCopy image_copy; + { + image_copy.bufferOffset = 0; + image_copy.bufferRowLength = W; + image_copy.bufferImageHeight = H; + + image_copy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + image_copy.imageSubresource.layerCount = 1; + image_copy.imageSubresource.mipLevel = 0; + image_copy.imageSubresource.baseArrayLayer = 0; + image_copy.imageOffset.x = 0; + image_copy.imageOffset.y = 0; + image_copy.imageOffset.z = 0; + image_copy.imageExtent.width = W; + image_copy.imageExtent.height = H; + image_copy.imageExtent.depth = 1; + } + + // Memory barriers are used to explicitly control access to buffer and image subresource ranges. + // Memory barriers are used to transfer ownership between queue families, change image layouts, + // and define availability and visibility operations. They explicitly define the access types + // and buffer and image subresource ranges that are included in the access scopes of a memory + // dependency that is created by a synchronization command that includes them. + // + // Image memory barriers only apply to memory accesses involving a specific image subresource + // range. That is, a memory dependency formed from an image memory barrier is scoped to access + // via the specified image subresource range. Image memory barriers can also be used to define + // image layout transitions or a queue family ownership transfer for the specified image + // subresource range. + + VkImageMemoryBarrier image_barrier; + { + image_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; + image_barrier.pNext = NULL; + image_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT; + image_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + image_barrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; + image_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; + image_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + image_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; + image_barrier.image = vk.swapchain_images_array[vk.idx_swapchain_image]; + image_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + image_barrier.subresourceRange.baseMipLevel = 0; + image_barrier.subresourceRange.levelCount = 1; + image_barrier.subresourceRange.baseArrayLayer = 0; + image_barrier.subresourceRange.layerCount = 1; + } + + + // read pixel with command buffer + VkCommandBuffer cmdBuf; + + VkCommandBufferAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.commandPool = vk.command_pool; + alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; + alloc_info.commandBufferCount = 1; + VK_CHECK(qvkAllocateCommandBuffers(vk.device, &alloc_info, &cmdBuf)); + + VkCommandBufferBeginInfo begin_info; + begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; + begin_info.pNext = NULL; + begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; + begin_info.pInheritanceInfo = NULL; + VK_CHECK(qvkBeginCommandBuffer(cmdBuf, &begin_info)); + + qvkCmdPipelineBarrier(cmdBuf, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &image_barrier); + qvkCmdCopyImageToBuffer(cmdBuf, vk.swapchain_images_array[vk.idx_swapchain_image], VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1, &image_copy); + VK_CHECK(qvkEndCommandBuffer(cmdBuf)); + + VkSubmitInfo submit_info; + submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; + submit_info.pNext = NULL; + submit_info.waitSemaphoreCount = 0; + submit_info.pWaitSemaphores = NULL; + submit_info.pWaitDstStageMask = NULL; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = &cmdBuf; + submit_info.signalSemaphoreCount = 0; + submit_info.pSignalSemaphores = NULL; + VK_CHECK(qvkQueueSubmit(vk.queue, 1, &submit_info, VK_NULL_HANDLE)); + + VK_CHECK(qvkQueueWaitIdle(vk.queue)); + + qvkFreeCommandBuffers(vk.device, vk.command_pool, 1, &cmdBuf); + + + // Memory objects created with the memory property VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT + // are considered mappable. Memory objects must be mappable in order to be successfully + // mapped on the host. + // + // To retrieve a host virtual address pointer to a region of a mappable memory object + unsigned char* data; + VK_CHECK(qvkMapMemory(vk.device, memory, 0, VK_WHOLE_SIZE, 0, (void**)&data)); + memcpy(pBuf, data, sizeFB); + qvkUnmapMemory(vk.device, memory); + qvkFreeMemory(vk.device, memory, NULL); + qvkDestroyBuffer(vk.device, buffer, NULL); +} + +extern void RE_SaveJPG(char * filename, int quality, int image_width, int image_height, unsigned char *image_buffer, int padding); + + + +void RB_TakeScreenshot( int width, int height, char *fileName, VkBool32 isJpeg) +{ + ri.Printf(PRINT_ALL, "read %dx%d pixels from GPU\n", width, height); + const uint32_t cnPixels = width * height; + + if(isJpeg) + { + + //unsigned char *buffer; + //size_t offset = 0, memcount; + //int padlen; + //memcount = (width * 3 + padlen) * height; + //RE_SaveJPG(fileName, 90, width, height, buffer + offset, padlen); + unsigned char* const pImg = (unsigned char*) malloc ( cnPixels * 4); + + vk_read_pixels(pImg, width, height); + + // but why this is need ? why the readed image got fliped about Y ??? + imgFlipY(pImg, width, height); + + // Remove alpha channel and rbg <-> bgr + { + unsigned char* pSrc = pImg; + unsigned char* pDst = pImg; + + uint32_t i; + for (i = 0; i < cnPixels; i++) + { + pSrc[0] = pDst[2]; + pSrc[1] = pDst[1]; + pSrc[2] = pDst[0]; + pSrc += 3; + pDst += 4; + } + } + + RE_SaveJPG(fileName, 90, width, height, pImg, 0); + + free( pImg ); + + //bufSize = RE_SaveJPGToBuffer(out, bufSize, 90, width, height, pImg, padding); + //ri.FS_WriteFile(filename, out, bufSize); + } + else + { + + //const uint32_t cnPixels = width * height; + const uint32_t imgSize = 18 + cnPixels * 3; + + unsigned char* const pBuffer = (unsigned char*) malloc ( imgSize + cnPixels * 4 ); + unsigned char* const buffer_ptr = pBuffer + 18; + unsigned char* const pImg = pBuffer + imgSize; + + vk_read_pixels(pImg, width, height); + + // but why this is need ? why the readed image got fliped about Y ??? + imgFlipY(pImg, width, height); + + memset (pBuffer, 0, 18); + pBuffer[2] = 2; // uncompressed type + pBuffer[12] = width & 255; + pBuffer[13] = width >> 8; + pBuffer[14] = height & 255; + pBuffer[15] = height >> 8; + pBuffer[16] = 24; // pixel size + + // VkBool32 need_swizzle = ( + // vk.surface_format.format == VK_FORMAT_B8G8R8A8_SRGB || + // vk.surface_format.format == VK_FORMAT_B8G8R8A8_UNORM || + // vk.surface_format.format == VK_FORMAT_B8G8R8A8_SNORM ); + + uint32_t i; + if (0) + { + for (i = 0; i < cnPixels; i++) + { + buffer_ptr[i*3] = *(pImg + i*4 + 2); + buffer_ptr[i*3+1] = *(pImg + i*4 + 1); + buffer_ptr[i*3+2] = *(pImg + i*4 );; + } + } + else + { + for (i = 0; i < cnPixels; i++) + { + buffer_ptr[i*3] = *(pImg + i*4 ); + buffer_ptr[i*3+1] = *(pImg + i*4 + 1); + buffer_ptr[i*3+2] = *(pImg + i*4 + 2); + } + } + ri.FS_WriteFile( fileName, pBuffer, imgSize); + + free( pBuffer ); + } +} + + +static void R_TakeScreenshot( int x, int y, int width, int height, char *name, qboolean jpeg ) +{ + static char fileName[MAX_OSPATH] = {0}; // bad things if two screenshots per frame? + + screenshotCommand_t *cmd = (screenshotCommand_t*) R_GetCommandBuffer(sizeof(*cmd)); + if ( !cmd ) { + return; + } + cmd->commandId = RC_SCREENSHOT; + + cmd->x = x; + cmd->y = y; + cmd->width = width; + cmd->height = height; + + //Q_strncpyz( fileName, name, sizeof(fileName) ); + + strncpy(fileName, name, sizeof(fileName)); + + cmd->fileName = fileName; + cmd->jpeg = jpeg; +} + + + +/* +==================== +R_LevelShot + +levelshots are specialized 128*128 thumbnails for the +menu system, sampled down from full screen distorted images +==================== +*/ +static void R_LevelShot( int W, int H ) +{ + char checkname[MAX_OSPATH]; + unsigned char* buffer; + unsigned char* source; + unsigned char* src; + unsigned char* dst; + int x, y; + int r, g, b; + float xScale, yScale; + int xx, yy; + int i = 0; + sprintf( checkname, "levelshots/%s.tga", tr.world->baseName ); + + source = (unsigned char*) ri.Hunk_AllocateTempMemory( W * H * 3 ); + + buffer = (unsigned char*) ri.Hunk_AllocateTempMemory( 128 * 128*3 + 18); + memset (buffer, 0, 18); + buffer[2] = 2; // uncompressed type + buffer[12] = 128; + buffer[14] = 128; + buffer[16] = 24; // pixel size + + { + unsigned char* buffer2 = (unsigned char*) malloc (W * H * 4); + vk_read_pixels(buffer2, W, H); + + unsigned char* buffer_ptr = source; + unsigned char* buffer2_ptr = buffer2; + for (i = 0; i < W * H; i++) + { + buffer_ptr[0] = buffer2_ptr[0]; + buffer_ptr[1] = buffer2_ptr[1]; + buffer_ptr[2] = buffer2_ptr[2]; + buffer_ptr += 3; + buffer2_ptr += 4; + } + free(buffer2); + } + + // resample from source + xScale = W / 512.0f; + yScale = H / 384.0f; + for ( y = 0 ; y < 128 ; y++ ) { + for ( x = 0 ; x < 128 ; x++ ) { + r = g = b = 0; + for ( yy = 0 ; yy < 3 ; yy++ ) { + for ( xx = 0 ; xx < 4 ; xx++ ) { + src = source + 3 * ( W * (int)( (y*3+yy)*yScale ) + (int)( (x*4+xx)*xScale ) ); + r += src[0]; + g += src[1]; + b += src[2]; + } + } + dst = buffer + 18 + 3 * ( y * 128 + x ); + dst[0] = b / 12; + dst[1] = g / 12; + dst[2] = r / 12; + } + } + + ri.FS_WriteFile( checkname, buffer, 128 * 128*3 + 18 ); + + ri.Hunk_FreeTempMemory( buffer ); + ri.Hunk_FreeTempMemory( source ); + + ri.Printf( PRINT_ALL, "Wrote %s\n", checkname ); +} + +/* +================== +R_ScreenShot_f + +screenshot +screenshot [silent] +screenshot [levelshot] +screenshot [filename] + +Doesn't print the pacifier message if there is a second arg +================== +*/ +void R_ScreenShot_f (void) +{ + char checkname[MAX_OSPATH]; + static int lastNumber = -1; + qboolean silent; + + int W; + int H; + + R_GetWinResolution(&W, &H); + + + if ( !strcmp( ri.Cmd_Argv(1), "levelshot" ) ) + { + R_LevelShot(W, H); + return; + } + + if ( !strcmp( ri.Cmd_Argv(1), "silent" ) ) + { + silent = qtrue; + } + else + { + silent = qfalse; + } + + + if ( ri.Cmd_Argc() == 2 && !silent ) + { + // explicit filename + snprintf( checkname, sizeof(checkname), "screenshots/%s.tga", ri.Cmd_Argv( 1 ) ); + } + else + { + // scan for a free filename + + // if we have saved a previous screenshot, don't scan again, + // because recording demo avis can involve thousands of shots + if ( lastNumber == -1 ) { + lastNumber = 0; + } + // scan for a free number + for ( ; lastNumber <= 9999 ; lastNumber++ ) + { + //R_ScreenshotFilename( lastNumber, checkname ); + + int a,b,c,d; + + a = lastNumber / 1000; + b = lastNumber % 1000 / 100; + c = lastNumber % 100 / 10; + d = lastNumber % 10; + + snprintf( checkname, sizeof(checkname), "screenshots/shot%i%i%i%i.tga", a, b, c, d ); + + if (!ri.FS_FileExists( checkname )) + { + break; // file doesn't exist + } + } + + if ( lastNumber >= 9999 ) + { + ri.Printf (PRINT_ALL, "ScreenShot: Couldn't create a file\n"); + return; + } + + lastNumber++; + } + + R_TakeScreenshot( 0, 0, W, H, checkname, qfalse ); + + if ( !silent ) { + ri.Printf (PRINT_ALL, "Wrote %s\n", checkname); + } +} + + +void R_ScreenShotJPEG_f(void) +{ + char checkname[MAX_OSPATH]; + static int lastNumber = -1; + qboolean silent; + + int W; + int H; + + R_GetWinResolution(&W, &H); + + if ( !strcmp( ri.Cmd_Argv(1), "levelshot" ) ) { + R_LevelShot(W, H); + return; + } + + if ( !strcmp( ri.Cmd_Argv(1), "silent" ) ) { + silent = qtrue; + } else { + silent = qfalse; + } + + if ( ri.Cmd_Argc() == 2 && !silent ) { + // explicit filename + snprintf( checkname, sizeof(checkname), "screenshots/%s.jpg", ri.Cmd_Argv( 1 ) ); + } else { + // scan for a free filename + + // if we have saved a previous screenshot, don't scan + // again, because recording demo avis can involve + // thousands of shots + if ( lastNumber == -1 ) { + lastNumber = 0; + } + // scan for a free number + for ( ; lastNumber <= 9999 ; lastNumber++ ) + { + int a,b,c,d; + + a = lastNumber / 1000; + lastNumber -= a*1000; + b = lastNumber / 100; + lastNumber -= b*100; + c = lastNumber / 10; + lastNumber -= c*10; + d = lastNumber; + + snprintf( checkname, sizeof(checkname), "screenshots/shot%i%i%i%i.jpg" + , a, b, c, d ); + + if (!ri.FS_FileExists( checkname )) + { + break; // file doesn't exist + } + } + + lastNumber++; + } + + R_TakeScreenshot( 0, 0, W, H, checkname, qtrue ); + + if ( !silent ) { + ri.Printf (PRINT_ALL, "Wrote %s\n", checkname); + } +} + + + + +void RB_TakeVideoFrameCmd( const videoFrameCommand_t * const cmd ) +{ + + size_t memcount, linelen; + int padwidth, avipadwidth, padlen; + + + linelen = cmd->width * 3; + + // Alignment stuff for glReadPixels + padwidth = PAD(linelen, 4); + padlen = padwidth - linelen; + // AVI line padding + avipadwidth = PAD(linelen, 4); + + + unsigned char* const pImg = (unsigned char*) malloc ( cmd->width * cmd->height * 4); + + vk_read_pixels(pImg, cmd->width, cmd->height); + + imgFlipY(pImg, cmd->width, cmd->height); + + memcount = padwidth * cmd->height; + + + if(cmd->motionJpeg) + { + + const uint32_t cnPixels = cmd->width * cmd->height; + unsigned char* pSrc = pImg; + const unsigned char* pDst = pImg; + + uint32_t i; + for (i = 0; i < cnPixels; i++) + { + pSrc[0] = pDst[2]; + pSrc[1] = pDst[1]; + pSrc[2] = pDst[0]; + pSrc += 3; + pDst += 4; + } + + + memcount = RE_SaveJPGToBuffer(cmd->encodeBuffer, linelen * cmd->height, + 90, cmd->width, cmd->height, pImg, padlen); + + ri.CL_WriteAVIVideoFrame(cmd->encodeBuffer, memcount); + } + else + { + + unsigned char* buffer_ptr = cmd->encodeBuffer; + const unsigned char* buffer2_ptr = pImg; + + uint32_t i; + for (i = 0; i < cmd->width * cmd->height; i++) + { + buffer_ptr[0] = buffer2_ptr[0]; + buffer_ptr[1] = buffer2_ptr[1]; + buffer_ptr[2] = buffer2_ptr[2]; + buffer_ptr += 3; + buffer2_ptr += 4; + } + + ri.CL_WriteAVIVideoFrame(cmd->encodeBuffer, avipadwidth * cmd->height); + } + + + free(pImg); + +} + + +void RE_TakeVideoFrame( int width, int height, unsigned char *captureBuffer, unsigned char *encodeBuffer, qboolean motionJpeg ) +{ + if( !tr.registered ) { + return; + } + + videoFrameCommand_t * cmd = R_GetCommandBuffer( sizeof( *cmd ) ); + if( !cmd ) { + return; + } + + cmd->commandId = RC_VIDEOFRAME; + + cmd->width = width; + cmd->height = height; + cmd->captureBuffer = captureBuffer; + cmd->encodeBuffer = encodeBuffer; + cmd->motionJpeg = motionJpeg; +} diff --git a/code/renderer_vulkan/vk_screenshot.h b/code/renderer_vulkan/vk_screenshot.h new file mode 100644 index 0000000000..54c50ef959 --- /dev/null +++ b/code/renderer_vulkan/vk_screenshot.h @@ -0,0 +1,30 @@ +#ifndef VK_SCREENSHOT_H_ +#define VK_SCREENSHOT_H_ + +void R_ScreenShotJPEG_f(void); +void R_ScreenShot_f( void ); + + +typedef struct { + int commandId; + int x; + int y; + int width; + int height; + char *fileName; + qboolean jpeg; +} screenshotCommand_t; + +typedef struct { + int commandId; + int width; + int height; + unsigned char* captureBuffer; + unsigned char* encodeBuffer; + VkBool32 motionJpeg; +} videoFrameCommand_t; + +void RB_TakeVideoFrameCmd( const videoFrameCommand_t * const cmd ); +void RB_TakeScreenshot( int width, int height, char *fileName, VkBool32 isJpeg); + +#endif diff --git a/code/renderer_vulkan/vk_shade_geometry.c b/code/renderer_vulkan/vk_shade_geometry.c new file mode 100644 index 0000000000..601cb01c7d --- /dev/null +++ b/code/renderer_vulkan/vk_shade_geometry.c @@ -0,0 +1,1299 @@ +#include "vk_shade_geometry.h" +#include "vk_instance.h" +#include "tr_globals.h" +#include "tr_cvar.h" +#include "vk_image.h" +#include "vk_pipelines.h" +#include "matrix_multiplication.h" +#include "tr_backend.h" +#include "glConfig.h" +#include "R_PortalPlane.h" +#include "tr_light.h" +#include "tr_shader.h" + +#define VERTEX_CHUNK_SIZE (768 * 1024) +#define INDEX_BUFFER_SIZE (2 * 1024 * 1024) + +#define XYZ_SIZE (4 * VERTEX_CHUNK_SIZE) +#define COLOR_SIZE (1 * VERTEX_CHUNK_SIZE) +#define ST0_SIZE (2 * VERTEX_CHUNK_SIZE) +#define ST1_SIZE (2 * VERTEX_CHUNK_SIZE) + +#define XYZ_OFFSET 0 +#define COLOR_OFFSET (XYZ_OFFSET + XYZ_SIZE) +#define ST0_OFFSET (COLOR_OFFSET + COLOR_SIZE) +#define ST1_OFFSET (ST0_OFFSET + ST0_SIZE) + +struct ShadingData_t +{ + // Buffers represent linear arrays of data which are used for various purposes + // by binding them to a graphics or compute pipeline via descriptor sets or + // via certain commands, or by directly specifying them as parameters to + // certain commands. Buffers are represented by VkBuffer handles: + VkBuffer vertex_buffer; + unsigned char* vertex_buffer_ptr ; // pointer to mapped vertex buffer + uint32_t xyz_elements; + uint32_t color_st_elements; + + VkBuffer index_buffer; + unsigned char* index_buffer_ptr; // pointer to mapped index buffer + uint32_t index_buffer_offset; + + // host visible memory that holds both vertex and index data + VkDeviceMemory vertex_buffer_memory; + VkDeviceMemory index_buffer_memory; + VkDescriptorSet curDescriptorSets[2]; + + // This flag is used to decide whether framebuffer's depth attachment should be cleared + // with vmCmdClearAttachment (dirty_depth_attachment == true), or it have just been + // cleared by render pass instance clear op (dirty_depth_attachment == false). + + VkBool32 s_depth_attachment_dirty; +}; + +struct ShadingData_t shadingDat; + + +VkBuffer vk_getIndexBuffer(void) +{ + return shadingDat.index_buffer; +} + + +static float s_modelview_matrix[16] QALIGN(16); + + + +void set_modelview_matrix(const float mv[16]) +{ + memcpy(s_modelview_matrix, mv, 64); +} + + +const float * getptr_modelview_matrix(void) +{ + return s_modelview_matrix; +} + + +// TODO : figure out the principle +static void vk_setViewportScissor(VkBool32 is2D, enum Vk_Depth_Range dR, + VkViewport* const vp, VkRect2D* const pRect) +{ + int width, height; + R_GetWinResolution(&width, &height); + + if (is2D) + { + + pRect->offset.x = vp->x = 0; + pRect->offset.y = vp->y = 0; + + pRect->extent.width = vp->width = width; + pRect->extent.height = vp->height = height; + } + else + { + int X = backEnd.viewParms.viewportX; + int Y = backEnd.viewParms.viewportY; + int W = backEnd.viewParms.viewportWidth; + int H = backEnd.viewParms.viewportHeight; + + //pRect->offset.x = backEnd.viewParms.viewportX; + //pRect->offset.y = backEnd.viewParms.viewportY; + //pRect->extent.width = backEnd.viewParms.viewportWidth; + //pRect->extent.height = backEnd.viewParms.viewportHeight; + + if ( X < 0) + X = 0; + if (Y < 0) + Y = 0; + if (X + W > width) + W = width - X; + if (Y + H > height) + H = height - Y; + + pRect->offset.x = vp->x = X; + pRect->offset.y = vp->y = Y; + pRect->extent.width = vp->width = W; + pRect->extent.height = vp->height = H; + } + + switch(dR) + { + case DEPTH_RANGE_NORMAL: + { + vp->minDepth = 0.0f; + vp->maxDepth = 1.0f; + }break; + + case DEPTH_RANGE_ZERO: + { + vp->minDepth = 0.0f; + vp->maxDepth = 0.0f; + }break; + + case DEPTH_RANGE_ONE: + { + vp->minDepth = 1.0f; + vp->maxDepth = 1.0f; + }break; + + case DEPTH_RANGE_WEAPON: + { + vp->minDepth = 0.0f; + vp->maxDepth = 0.3f; + }break; + } +} + + +VkRect2D get_scissor_rect(void) +{ + + VkRect2D r; + + int width, height; + R_GetWinResolution(&width, &height); + + if (backEnd.projection2D) + { + r.offset.x = 0.0f; + r.offset.y = 0.0f; + r.extent.width = width; + r.extent.height = height; + } + else + { + assert(backEnd.viewParms.viewportWidth > 0); + assert(backEnd.viewParms.viewportHeight > 0); + r.offset.x = backEnd.viewParms.viewportX; + r.offset.y = backEnd.viewParms.viewportY; + r.extent.width = backEnd.viewParms.viewportWidth; + r.extent.height = backEnd.viewParms.viewportHeight; + + // for draw model in setu manus + if (r.offset.x < 0) + r.offset.x = 0; + if (r.offset.y < 0) + r.offset.y = 0; + if (r.offset.x + r.extent.width > width) + r.extent.width = width - r.offset.x; + if (r.offset.y + r.extent.height > height) + r.extent.height = height - r.offset.y; + + // ri.Printf(PRINT_ALL, "(%d, %d, %d, %d)\n", + // r.offset.x, r.offset.y, r.extent.width, r.extent.height); + } + + return r; +} + + +// Vulkan memory is broken up into two categories, host memory and device memory. +// Host memory is memory needed by the Vulkan implementation for +// non-device-visible storage. Allocations returned by vkAllocateMemory +// are guaranteed to meet any alignment requirement of the implementation +// +// Host access to buffer must be externally synchronized + +void vk_createVertexBuffer(void) +{ + ri.Printf(PRINT_ALL, " Create vertex buffer: shadingDat.vertex_buffer \n"); + + VkBufferCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + desc.queueFamilyIndexCount = 0; + desc.pQueueFamilyIndices = NULL; + //VERTEX_BUFFER_SIZE + desc.size = XYZ_SIZE + COLOR_SIZE + ST0_SIZE + ST1_SIZE; + desc.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; + + VK_CHECK(qvkCreateBuffer(vk.device, &desc, NULL, &shadingDat.vertex_buffer)); + + + VkMemoryRequirements vb_memory_requirements; + qvkGetBufferMemoryRequirements(vk.device, shadingDat.vertex_buffer, &vb_memory_requirements); + + uint32_t memory_type_bits = vb_memory_requirements.memoryTypeBits; + + VkMemoryAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.allocationSize = vb_memory_requirements.size; + // VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT bit specifies that memory allocated with + // this type can be mapped for host access using vkMapMemory. + // + // VK_MEMORY_PROPERTY_HOST_COHERENT_BIT bit specifies that the host cache + // management commands vkFlushMappedMemoryRanges and vkInvalidateMappedMemoryRanges + // are not needed to flush host writes to the device or make device writes visible + // to the host, respectively. + alloc_info.memoryTypeIndex = find_memory_type(memory_type_bits, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + ri.Printf(PRINT_ALL, " Allocate device memory for Vertex Buffer: %llu bytes. \n", + (unsigned long long)alloc_info.allocationSize); + + VK_CHECK(qvkAllocateMemory(vk.device, &alloc_info, NULL, &shadingDat.vertex_buffer_memory)); + + qvkBindBufferMemory(vk.device, shadingDat.vertex_buffer, shadingDat.vertex_buffer_memory, 0); + + void* data; + VK_CHECK(qvkMapMemory(vk.device, shadingDat.vertex_buffer_memory, 0, VK_WHOLE_SIZE, 0, &data)); + shadingDat.vertex_buffer_ptr = (unsigned char*)data; + +} + + + +void vk_createIndexBuffer(void) +{ + ri.Printf(PRINT_ALL, " Create index buffer: shadingDat.index_buffer \n"); + + VkBufferCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + desc.queueFamilyIndexCount = 0; + desc.pQueueFamilyIndices = NULL; + desc.size = INDEX_BUFFER_SIZE; + desc.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; + + VK_CHECK(qvkCreateBuffer(vk.device, &desc, NULL, &shadingDat.index_buffer)); + + + VkMemoryRequirements ib_memory_requirements; + qvkGetBufferMemoryRequirements(vk.device, shadingDat.index_buffer, &ib_memory_requirements); + + uint32_t memory_type_bits = ib_memory_requirements.memoryTypeBits; + + + VkMemoryAllocateInfo alloc_info; + alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; + alloc_info.pNext = NULL; + alloc_info.allocationSize = ib_memory_requirements.size; + // VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT bit specifies that memory allocated with + // this type can be mapped for host access using vkMapMemory. + // + // VK_MEMORY_PROPERTY_HOST_COHERENT_BIT bit specifies that the host cache + // management commands vkFlushMappedMemoryRanges and vkInvalidateMappedMemoryRanges + // are not needed to flush host writes to the device or make device writes visible + // to the host, respectively. + alloc_info.memoryTypeIndex = find_memory_type(memory_type_bits, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); + + ri.Printf(PRINT_ALL, " Allocate device memory for Index Buffer: %llu bytes. \n", + (unsigned long long)alloc_info.allocationSize); + + VK_CHECK(qvkAllocateMemory(vk.device, &alloc_info, NULL, &shadingDat.index_buffer_memory)); + qvkBindBufferMemory(vk.device, shadingDat.index_buffer, shadingDat.index_buffer_memory, 0); + + void* data; + VK_CHECK(qvkMapMemory(vk.device, shadingDat.index_buffer_memory, 0, VK_WHOLE_SIZE, 0, &data)); + shadingDat.index_buffer_ptr = (unsigned char*)data; +} + + +// Descriptors and Descriptor Sets +// A descriptor is a special opaque shader variable that shaders use to access buffer +// and image resources in an indirect fashion. It can be thought of as a "pointer" to +// a resource. The Vulkan API allows these variables to be changed between draw +// operations so that the shaders can access different resources for each draw. + +// A descriptor set is called a "set" because it can refer to an array of homogenous +// resources that can be described with the same layout binding. one possible way to +// use multiple descriptors is to construct a descriptor set with two descriptors, +// with each descriptor referencing a separate texture. Both textures are therefore +// available during a draw. A command in a command buffer could then select the texture +// to use by specifying the index of the desired texture. To describe a descriptor set, +// you use a descriptor set layout. + +// Descriptor sets corresponding to bound texture images. + +// outside of TR since it shouldn't be cleared during ref re-init +// the renderer front end should never modify glstate_t +//typedef struct { + + + +void updateCurDescriptor( VkDescriptorSet curDesSet, uint32_t tmu) +{ + shadingDat.curDescriptorSets[tmu] = curDesSet; +} + + + +void vk_shade_geometry(VkPipeline pipeline, VkBool32 multitexture, enum Vk_Depth_Range depRg, VkBool32 indexed) +{ + // configure vertex data stream + VkBuffer bufs[3] = { shadingDat.vertex_buffer, shadingDat.vertex_buffer, shadingDat.vertex_buffer }; + VkDeviceSize offs[3] = { + COLOR_OFFSET + shadingDat.color_st_elements * sizeof(color4ub_t), + ST0_OFFSET + shadingDat.color_st_elements * sizeof(vec2_t), + ST1_OFFSET + shadingDat.color_st_elements * sizeof(vec2_t) + }; + + // color + if ((shadingDat.color_st_elements + tess.numVertexes) * sizeof(color4ub_t) > COLOR_SIZE) + ri.Error(ERR_DROP, "vulkan: vertex buffer overflow (color) %ld \n", + (shadingDat.color_st_elements + tess.numVertexes) * sizeof(color4ub_t)); + + unsigned char* dst_color = shadingDat.vertex_buffer_ptr + offs[0]; + memcpy(dst_color, tess.svars.colors, tess.numVertexes * sizeof(color4ub_t)); + // st0 + + unsigned char* dst_st0 = shadingDat.vertex_buffer_ptr + offs[1]; + memcpy(dst_st0, tess.svars.texcoords[0], tess.numVertexes * sizeof(vec2_t)); + + // st1 + if (multitexture) + { + unsigned char* dst = shadingDat.vertex_buffer_ptr + offs[2]; + memcpy(dst, tess.svars.texcoords[1], tess.numVertexes * sizeof(vec2_t)); + } + + qvkCmdBindVertexBuffers(vk.command_buffer, 1, multitexture ? 3 : 2, bufs, offs); + shadingDat.color_st_elements += tess.numVertexes; + + // bind descriptor sets + +// vkCmdBindDescriptorSets causes the sets numbered [firstSet.. firstSet+descriptorSetCount-1] to use +// the bindings stored in pDescriptorSets[0..descriptorSetCount-1] for subsequent rendering commands +// (either compute or graphics, according to the pipelineBindPoint). +// Any bindings that were previously applied via these sets are no longer valid. + + qvkCmdBindDescriptorSets(vk.command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, + vk.pipeline_layout, 0, (multitexture ? 2 : 1), shadingDat.curDescriptorSets, 0, NULL); + + // bind pipeline + qvkCmdBindPipeline(vk.command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); + + // configure pipeline's dynamic state + + VkViewport viewport; + VkRect2D scissor; // = get_scissor_rect(); + + vk_setViewportScissor(backEnd.projection2D, depRg, &viewport, &scissor); + + qvkCmdSetScissor(vk.command_buffer, 0, 1, &scissor); + qvkCmdSetViewport(vk.command_buffer, 0, 1, &viewport); + + + if (tess.shader->polygonOffset) { + qvkCmdSetDepthBias(vk.command_buffer, r_offsetUnits->value, 0.0f, r_offsetFactor->value); + } + + // issue draw call + if (indexed) + qvkCmdDrawIndexed(vk.command_buffer, tess.numIndexes, 1, 0, 0, 0); + else + qvkCmdDraw(vk.command_buffer, tess.numVertexes, 1, 0, 0); + + shadingDat.s_depth_attachment_dirty = VK_TRUE; +} + + + +void updateMVP(VkBool32 isPortal, VkBool32 is2D, const float mvMat4x4[16]) +{ + if (isPortal) + { + // mvp transform + eye transform + clipping plane in eye space + float push_constants[32] QALIGN(16); + + // Eye space transform. + MatrixMultiply4x4_SSE(mvMat4x4, backEnd.viewParms.projectionMatrix, push_constants); + + // NOTE: backEnd.or.modelMatrix incorporates s_flipMatrix, + // so it should be taken into account when computing clipping plane too. + + push_constants[16] = backEnd.or.modelMatrix[0]; + push_constants[17] = backEnd.or.modelMatrix[4]; + push_constants[18] = backEnd.or.modelMatrix[8]; + push_constants[19] = backEnd.or.modelMatrix[12]; + + push_constants[20] = backEnd.or.modelMatrix[1]; + push_constants[21] = backEnd.or.modelMatrix[5]; + push_constants[22] = backEnd.or.modelMatrix[9]; + push_constants[23] = backEnd.or.modelMatrix[13]; + + push_constants[24] = backEnd.or.modelMatrix[2]; + push_constants[25] = backEnd.or.modelMatrix[6]; + push_constants[26] = backEnd.or.modelMatrix[10]; + push_constants[27] = backEnd.or.modelMatrix[14]; + + // Clipping plane in eye coordinates. + struct rplane_s eye_plane; + + R_TransformPlane(backEnd.viewParms.or.axis, backEnd.viewParms.or.origin, &eye_plane); + + // Apply s_flipMatrix to be in the same coordinate system as push_constants. + + push_constants[28] = -eye_plane.normal[1]; + push_constants[29] = eye_plane.normal[2]; + push_constants[30] = -eye_plane.normal[0]; + push_constants[31] = eye_plane.dist; + + + // As described above in section Pipeline Layouts, the pipeline layout defines shader push constants + // which are updated via Vulkan commands rather than via writes to memory or copy commands. + // Push constants represent a high speed path to modify constant data in pipelines + // that is expected to outperform memory-backed resource updates. + qvkCmdPushConstants(vk.command_buffer, vk.pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, 128, push_constants); + } + else + { + // push constants are another way of passing dynamic values to shaders + // Specify push constants. + float mvp[16] QALIGN(16); // mvp transform + eye transform + clipping plane in eye space + + if (is2D) + { + float width, height; + R_GetWinResolutionF(&width, &height); + + mvp[0] = 2.0f / width; + mvp[1] = 0.0f; + mvp[2] = 0.0f; + mvp[3] = 0.0f; + + mvp[4] = 0.0f; + mvp[5] = 2.0f / height; + mvp[6] = 0.0f; + mvp[7] = 0.0f; + + mvp[8] = 0.0f; + mvp[9] = 0.0f; + mvp[10] = 1.0f; + mvp[11] = 0.0f; + + mvp[12] = -1.0f; + mvp[13] = -1.0f; + mvp[14] = 0.0f; + mvp[15] = 1.0f; + } + else + { + // update q3's proj matrix (opengl) to vulkan conventions: + // z - [0, 1] instead of [-1, 1] and invert y direction + MatrixMultiply4x4_SSE(mvMat4x4, backEnd.viewParms.projectionMatrix, mvp); + } + + // As described above in section Pipeline Layouts, the pipeline layout defines shader push constants + // which are updated via Vulkan commands rather than via writes to memory or copy commands. + // Push constants represent a high speed path to modify constant data in pipelines + // that is expected to outperform memory-backed resource updates. + qvkCmdPushConstants(vk.command_buffer, vk.pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, 64, mvp); + } +} + + +// ========================================================= +// Vertex fetching is controlled via configurable state, +// as a logically distinct graphics pipeline stage. +// +// Vertex Attributes +// +// Vertex shaders can define input variables, which receive vertex attribute data +// transferred from one or more VkBuffer(s) by drawing commands. Vertex shader +// input variables are bound to buffers via an indirect binding where the vertex +// shader associates a vertex input attribute number with each variable, vertex +// input attributes are associated to vertex input bindings on a per-pipeline basis, +// and vertex input bindings are associated with specific buffers on a per-draw basis +// via the vkCmdBindVertexBuffers command. +// +// Vertex input attribute and vertex input binding descriptions also +// contain format information controlling how data is extracted from +// buffer memory and converted to the format expected by the vertex shader. +// +// There are VkPhysicalDeviceLimits::maxVertexInputAttributes number of vertex +// input attributes and VkPhysicalDeviceLimits::maxVertexInputBindings number of +// vertex input bindings (each referred to by zero-based indices), where there +// are at least as many vertex input attributes as there are vertex input bindings. +// Applications can store multiple vertex input attributes interleaved in a single +// buffer, and use a single vertex input binding to access those attributes. +// +// In GLSL, vertex shaders associate input variables with a vertex input attribute +// number using the location layout qualifier. The component layout qualifier +// associates components of a vertex shader input variable with components of +// a vertex input attribute. + +void vk_UploadXYZI(float (*pXYZ)[4], uint32_t nVertex, uint32_t* pIdx, uint32_t nIndex) +{ + // xyz stream + { + const VkDeviceSize xyz_offset = XYZ_OFFSET + shadingDat.xyz_elements * sizeof(vec4_t); + + unsigned char* vDst = shadingDat.vertex_buffer_ptr + xyz_offset; + + // 4 float in the array, with each 4 bytes. + memcpy(vDst, pXYZ, nVertex * 16); + + qvkCmdBindVertexBuffers(vk.command_buffer, 0, 1, &shadingDat.vertex_buffer, &xyz_offset); + + shadingDat.xyz_elements += tess.numVertexes; + + assert (shadingDat.xyz_elements * sizeof(vec4_t) < XYZ_SIZE); + } + + // indexes stream + if(nIndex != 0) + { + const uint32_t indexes_size = nIndex * sizeof(uint32_t); + + unsigned char* iDst = shadingDat.index_buffer_ptr + shadingDat.index_buffer_offset; + memcpy(iDst, pIdx, indexes_size); + + qvkCmdBindIndexBuffer(vk.command_buffer, shadingDat.index_buffer, shadingDat.index_buffer_offset, VK_INDEX_TYPE_UINT32); + + shadingDat.index_buffer_offset += indexes_size; + + assert (shadingDat.index_buffer_offset < INDEX_BUFFER_SIZE); + } +} + + +void vk_resetGeometryBuffer(void) +{ + // Reset geometry buffer's current offsets. + shadingDat.xyz_elements = 0; + shadingDat.color_st_elements = 0; + shadingDat.index_buffer_offset = 0; + shadingDat.s_depth_attachment_dirty = VK_FALSE; + + Mat4Identity(s_modelview_matrix); +} + + +void vk_destroy_shading_data(void) +{ + ri.Printf(PRINT_ALL, " Destroy vertex/index buffer: shadingDat.vertex_buffer shadingDat.index_buffer. \n"); + ri.Printf(PRINT_ALL, " Free device memory: vertex_buffer_memory index_buffer_memory. \n"); + + qvkUnmapMemory(vk.device, shadingDat.vertex_buffer_memory); + qvkFreeMemory(vk.device, shadingDat.vertex_buffer_memory, NULL); + + qvkUnmapMemory(vk.device, shadingDat.index_buffer_memory); + qvkFreeMemory(vk.device, shadingDat.index_buffer_memory, NULL); + + qvkDestroyBuffer(vk.device, shadingDat.vertex_buffer, NULL); + qvkDestroyBuffer(vk.device, shadingDat.index_buffer, NULL); + + memset(&shadingDat, 0, sizeof(shadingDat)); + + + VK_CHECK(qvkResetDescriptorPool(vk.device, vk.descriptor_pool, 0)); +} + + + +void vk_clearDepthStencilAttachments(void) +{ + if(shadingDat.s_depth_attachment_dirty) + { + VkClearAttachment attachments; + memset(&attachments, 0, sizeof(VkClearAttachment)); + + attachments.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; + attachments.clearValue.depthStencil.depth = 1.0f; + + if (r_shadows->integer == 2) { + attachments.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT; + attachments.clearValue.depthStencil.stencil = 0; + } + + VkClearRect clear_rect; + clear_rect.rect = get_scissor_rect(); + clear_rect.baseArrayLayer = 0; + clear_rect.layerCount = 1; + + + qvkCmdClearAttachments(vk.command_buffer, 1, &attachments, 1, &clear_rect); + } +} + + + + +void vk_clearColorAttachments(const float* color) +{ + + // ri.Printf(PRINT_ALL, "vk_clearColorAttachments\n"); + + VkClearAttachment attachments[1]; + memset(attachments, 0, sizeof(VkClearAttachment)); + + // aspectMask is a mask selecting the color, depth and/or stencil aspects + // of the attachment to be cleared. aspectMask can include + // VK_IMAGE_ASPECT_COLOR_BIT for color attachments, + // VK_IMAGE_ASPECT_DEPTH_BIT for depth/stencil attachments with a depth + // component, and VK_IMAGE_ASPECT_STENCIL_BIT for depth/stencil attachments + // with a stencil component. If the subpass��s depth/stencil attachment + // is VK_ATTACHMENT_UNUSED, then the clear has no effect. + + attachments[0].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + // colorAttachment is only meaningful if VK_IMAGE_ASPECT_COLOR_BIT + // is set in aspectMask, in which case it is an index to + // the pColorAttachments array in the VkSubpassDescription structure + // of the current subpass which selects the color attachment to clear. + attachments[0].colorAttachment = 0; + attachments[0].clearValue.color.float32[0] = color[0]; + attachments[0].clearValue.color.float32[1] = color[1]; + attachments[0].clearValue.color.float32[2] = color[2]; + attachments[0].clearValue.color.float32[3] = color[3]; + +/* + VkClearRect clear_rect[2]; + clear_rect[0].rect = get_scissor_rect(); + clear_rect[0].baseArrayLayer = 0; + clear_rect[0].layerCount = 1; + uint32_t rect_count = 1; + + // Split viewport rectangle into two non-overlapping rectangles. + // It's a HACK to prevent Vulkan validation layer's performance warning: + // "vkCmdClearAttachments() issued on command buffer object XXX prior to any Draw Cmds. + // It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw." + // + // NOTE: we don't use LOAD_OP_CLEAR for color attachment when we begin renderpass + // since at that point we don't know whether we need color buffer clear (usually we don't). + uint32_t h = clear_rect[0].rect.extent.height / 2; + clear_rect[0].rect.extent.height = h; + clear_rect[1] = clear_rect[0]; + clear_rect[1].rect.offset.y = h; + rect_count = 2; +*/ + + VkClearRect clear_rect[1]; + clear_rect[0].rect = get_scissor_rect(); + clear_rect[0].baseArrayLayer = 0; + clear_rect[0].layerCount = 1; + + qvkCmdClearAttachments(vk.command_buffer, 1, attachments, 1, clear_rect); + +} + + + + +static void ComputeColors( shaderStage_t *pStage ) +{ + int i, nVerts; + // + // rgbGen + // + switch ( pStage->rgbGen ) + { + case CGEN_IDENTITY: + memset( tess.svars.colors, 0xff, tess.numVertexes * 4 ); + break; + default: + case CGEN_IDENTITY_LIGHTING: + memset( tess.svars.colors, tr.identityLightByte, tess.numVertexes * 4 ); + break; + case CGEN_LIGHTING_DIFFUSE: + RB_CalcDiffuseColor( tess.svars.colors ); + break; + case CGEN_EXACT_VERTEX: + memcpy( tess.svars.colors, tess.vertexColors, tess.numVertexes * sizeof( tess.vertexColors[0] ) ); + break; + case CGEN_CONST: + + nVerts = tess.numVertexes; + + for ( i = 0; i < nVerts; i++ ) + { + memcpy(tess.svars.colors[i], pStage->constantColor, 4); + } + break; + case CGEN_VERTEX: + if ( tr.identityLight == 1 ) + { + memcpy( tess.svars.colors, tess.vertexColors, tess.numVertexes * sizeof( tess.vertexColors[0] ) ); + } + else + { + for ( i = 0; i < tess.numVertexes; i++ ) + { + tess.svars.colors[i][0] = tess.vertexColors[i][0] * tr.identityLight; + tess.svars.colors[i][1] = tess.vertexColors[i][1] * tr.identityLight; + tess.svars.colors[i][2] = tess.vertexColors[i][2] * tr.identityLight; + tess.svars.colors[i][3] = tess.vertexColors[i][3]; + } + } + break; + case CGEN_ONE_MINUS_VERTEX: + if ( tr.identityLight == 1 ) + { + for ( i = 0; i < tess.numVertexes; i++ ) + { + tess.svars.colors[i][0] = 255 - tess.vertexColors[i][0]; + tess.svars.colors[i][1] = 255 - tess.vertexColors[i][1]; + tess.svars.colors[i][2] = 255 - tess.vertexColors[i][2]; + } + } + else + { + for ( i = 0; i < tess.numVertexes; i++ ) + { + tess.svars.colors[i][0] = ( 255 - tess.vertexColors[i][0] ) * tr.identityLight; + tess.svars.colors[i][1] = ( 255 - tess.vertexColors[i][1] ) * tr.identityLight; + tess.svars.colors[i][2] = ( 255 - tess.vertexColors[i][2] ) * tr.identityLight; + } + } + break; + case CGEN_FOG: + { + fog_t* fog = tr.world->fogs + tess.fogNum; + + nVerts = tess.numVertexes; + + for (i = 0; i < nVerts; i++) + { + tess.svars.colors[i][0] = fog->colorRGBA[0]; + tess.svars.colors[i][1] = fog->colorRGBA[1]; + tess.svars.colors[i][2] = fog->colorRGBA[2]; + tess.svars.colors[i][3] = fog->colorRGBA[3]; + } + }break; + case CGEN_WAVEFORM: + RB_CalcWaveColor( &pStage->rgbWave, tess.svars.colors ); + break; + case CGEN_ENTITY: + RB_CalcColorFromEntity( tess.svars.colors ); + break; + case CGEN_ONE_MINUS_ENTITY: + RB_CalcColorFromOneMinusEntity( tess.svars.colors ); + break; + } + + // + // alphaGen + // + switch ( pStage->alphaGen ) + { + case AGEN_SKIP: + break; + case AGEN_IDENTITY: + if ( pStage->rgbGen != CGEN_IDENTITY ) { + if ( ( pStage->rgbGen == CGEN_VERTEX && tr.identityLight != 1 ) || + pStage->rgbGen != CGEN_VERTEX ) { + for ( i = 0; i < tess.numVertexes; i++ ) { + tess.svars.colors[i][3] = 0xff; + } + } + } + break; + case AGEN_CONST: + if ( pStage->rgbGen != CGEN_CONST ) { + for ( i = 0; i < tess.numVertexes; i++ ) { + tess.svars.colors[i][3] = pStage->constantColor[3]; + } + } + break; + case AGEN_WAVEFORM: + RB_CalcWaveAlpha( &pStage->alphaWave, ( unsigned char * ) tess.svars.colors ); + break; + case AGEN_LIGHTING_SPECULAR: + RB_CalcSpecularAlpha( ( unsigned char * ) tess.svars.colors ); + break; + case AGEN_ENTITY: + RB_CalcAlphaFromEntity( ( unsigned char * ) tess.svars.colors ); + break; + case AGEN_ONE_MINUS_ENTITY: + RB_CalcAlphaFromOneMinusEntity( ( unsigned char * ) tess.svars.colors ); + break; + case AGEN_VERTEX: + if ( pStage->rgbGen != CGEN_VERTEX ) { + for ( i = 0; i < tess.numVertexes; i++ ) { + tess.svars.colors[i][3] = tess.vertexColors[i][3]; + } + } + break; + case AGEN_ONE_MINUS_VERTEX: + for ( i = 0; i < tess.numVertexes; i++ ) + { + tess.svars.colors[i][3] = 255 - tess.vertexColors[i][3]; + } + break; + case AGEN_PORTAL: + { + unsigned char alpha; + + for ( i = 0; i < tess.numVertexes; i++ ) + { + vec3_t v; + + VectorSubtract( tess.xyz[i], backEnd.viewParms.or.origin, v ); + float len = VectorLength( v ); + + len /= tess.shader->portalRange; + + if ( len < 0 ) + { + alpha = 0; + } + else if ( len > 1 ) + { + alpha = 0xff; + } + else + { + alpha = len * 0xff; + } + + tess.svars.colors[i][3] = alpha; + } + } + break; + } + + // + // fog adjustment for colors to fade out as fog increases + // + if ( tess.fogNum ) + { + switch ( pStage->adjustColorsForFog ) + { + case ACFF_MODULATE_RGB: + RB_CalcModulateColorsByFog( ( unsigned char * ) tess.svars.colors ); + break; + case ACFF_MODULATE_ALPHA: + RB_CalcModulateAlphasByFog( ( unsigned char * ) tess.svars.colors ); + break; + case ACFF_MODULATE_RGBA: + RB_CalcModulateRGBAsByFog( ( unsigned char * ) tess.svars.colors ); + break; + case ACFF_NONE: + break; + } + } +} + +static void ComputeTexCoords( shaderStage_t *pStage ) +{ + uint32_t i; + uint32_t b; + + for ( b = 0; b < NUM_TEXTURE_BUNDLES; b++ ) + { + int tm; + + // + // generate the texture coordinates + // + switch ( pStage->bundle[b].tcGen ) + { + case TCGEN_IDENTITY: + memset( tess.svars.texcoords[b], 0, sizeof( float ) * 2 * tess.numVertexes ); + break; + case TCGEN_TEXTURE: + for ( i = 0 ; i < tess.numVertexes ; i++ ) { + tess.svars.texcoords[b][i][0] = tess.texCoords[i][0][0]; + tess.svars.texcoords[b][i][1] = tess.texCoords[i][0][1]; + } + break; + case TCGEN_LIGHTMAP: + for ( i = 0 ; i < tess.numVertexes ; i++ ) { + tess.svars.texcoords[b][i][0] = tess.texCoords[i][1][0]; + tess.svars.texcoords[b][i][1] = tess.texCoords[i][1][1]; + } + break; + case TCGEN_VECTOR: + for ( i = 0 ; i < tess.numVertexes ; i++ ) { + tess.svars.texcoords[b][i][0] = DotProduct( tess.xyz[i], pStage->bundle[b].tcGenVectors[0] ); + tess.svars.texcoords[b][i][1] = DotProduct( tess.xyz[i], pStage->bundle[b].tcGenVectors[1] ); + } + break; + case TCGEN_FOG: + RB_CalcFogTexCoords( ( float * ) tess.svars.texcoords[b] ); + break; + case TCGEN_ENVIRONMENT_MAPPED: + RB_CalcEnvironmentTexCoords( ( float * ) tess.svars.texcoords[b] ); + break; + case TCGEN_BAD: + return; + } + + // + // alter texture coordinates + // + for ( tm = 0; tm < pStage->bundle[b].numTexMods ; tm++ ) + { + switch ( pStage->bundle[b].texMods[tm].type ) + { + case TMOD_NONE: + tm = TR_MAX_TEXMODS; // break out of for loop + break; + + case TMOD_TURBULENT: + RB_CalcTurbulentTexCoords( &pStage->bundle[b].texMods[tm].wave, ( float * ) tess.svars.texcoords[b] ); + break; + + case TMOD_ENTITY_TRANSLATE: + RB_CalcScrollTexCoords( backEnd.currentEntity->e.shaderTexCoord, ( float * ) tess.svars.texcoords[b] ); + break; + + case TMOD_SCROLL: + RB_CalcScrollTexCoords( pStage->bundle[b].texMods[tm].scroll, ( float * ) tess.svars.texcoords[b] ); + break; + + case TMOD_SCALE: + RB_CalcScaleTexCoords( pStage->bundle[b].texMods[tm].scale, ( float * ) tess.svars.texcoords[b] ); + break; + + case TMOD_STRETCH: + RB_CalcStretchTexCoords( &pStage->bundle[b].texMods[tm].wave, ( float * ) tess.svars.texcoords[b] ); + break; + + case TMOD_TRANSFORM: + RB_CalcTransformTexCoords( &pStage->bundle[b].texMods[tm], ( float * ) tess.svars.texcoords[b] ); + break; + + case TMOD_ROTATE: + RB_CalcRotateTexCoords( pStage->bundle[b].texMods[tm].rotateSpeed, ( float * ) tess.svars.texcoords[b] ); + break; + + default: + ri.Error( ERR_DROP, "ERROR: unknown texmod '%d' in shader '%s'\n", pStage->bundle[b].texMods[tm].type, tess.shader->name ); + break; + } + } + } +} + + + +/* +=================== +ProjectDlightTexture +Perform dynamic lighting with another rendering pass +=================== +*/ +static void ProjectDlightTexture( void ) +{ + byte clipBits[SHADER_MAX_VERTEXES]; + + if ( !backEnd.refdef.num_dlights ) { + return; + } + + uint32_t l; + for ( l = 0 ; l < backEnd.refdef.num_dlights ; l++ ) + { + //dlight_t *dl; + + if ( !( tess.dlightBits & ( 1 << l ) ) ) { + continue; // this surface definately doesn't have any of this light + } + float* texCoords = tess.svars.texcoords[0][0]; + //colors = tess.svars.colors[0]; + + //dl = &backEnd.refdef.dlights[l]; + vec3_t origin; + + VectorCopy( backEnd.refdef.dlights[l].transformed, origin ); + + + float radius = backEnd.refdef.dlights[l].radius; + float scale = 1.0f / radius; + float modulate; + + float floatColor[3] = { + backEnd.refdef.dlights[l].color[0] * 255.0f, + backEnd.refdef.dlights[l].color[1] * 255.0f, + backEnd.refdef.dlights[l].color[2] * 255.0f + }; + + uint32_t i; + for ( i = 0 ; i < tess.numVertexes ; i++, texCoords += 2) + { + vec3_t dist; + + backEnd.pc.c_dlightVertexes++; + + VectorSubtract( origin, tess.xyz[i], dist ); + texCoords[0] = 0.5f + dist[0] * scale; + texCoords[1] = 0.5f + dist[1] * scale; + + uint32_t clip = 0; + if ( texCoords[0] < 0.0f ) { + clip |= 1; + } + else if ( texCoords[0] > 1.0f ) { + clip |= 2; + } + + if ( texCoords[1] < 0.0f ) { + clip |= 4; + } + else if ( texCoords[1] > 1.0f ) { + clip |= 8; + } + + // modulate the strength based on the height and color + if ( dist[2] > radius ) + { + clip |= 16; + modulate = 0.0f; + } + else if ( dist[2] < -radius ) + { + clip |= 32; + modulate = 0.0f; + } + else + { + dist[2] = fabs(dist[2]); + if ( dist[2] < radius * 0.5f ) + { + modulate = 1.0f; + } + else + { + modulate = 2.0f * (radius - dist[2]) * scale; + } + } + clipBits[i] = clip; + + // += 4 + tess.svars.colors[i][0] = (floatColor[0] * modulate); + tess.svars.colors[i][1] = (floatColor[1] * modulate); + tess.svars.colors[i][2] = (floatColor[2] * modulate); + tess.svars.colors[i][3] = 255; + } + + + // build a list of triangles that need light + uint32_t numIndexes = 0; + for ( i = 0 ; i < tess.numIndexes ; i += 3 ) + { + uint32_t a, b, c; + + a = tess.indexes[i]; + b = tess.indexes[i+1]; + c = tess.indexes[i+2]; + if ( clipBits[a] & clipBits[b] & clipBits[c] ) { + continue; // not lighted + } + numIndexes += 3; + } + + if ( numIndexes == 0 ) { + continue; + } + + + updateCurDescriptor( tr.dlightImage->descriptor_set, 0 ); + // include GLS_DEPTHFUNC_EQUAL so alpha tested surfaces don't add light where they aren't rendered + backEnd.pc.c_totalIndexes += numIndexes; + backEnd.pc.c_dlightIndexes += numIndexes; + + // VULKAN + + vk_shade_geometry(g_stdPipelines.dlight_pipelines[backEnd.refdef.dlights[l].additive > 0 ? 1 : 0][tess.shader->cullType][tess.shader->polygonOffset], + VK_FALSE, DEPTH_RANGE_NORMAL, VK_TRUE); + + } +} + + + +/* +=================== +RB_FogPass +Blends a fog texture on top of everything else +=================== +*/ +static void RB_FogPass( void ) { + + unsigned int i; + + fog_t* fog = tr.world->fogs + tess.fogNum; + + const unsigned int nVerts = tess.numVertexes; + for (i = 0; i < nVerts; i++) + { + tess.svars.colors[i][0] = fog->colorRGBA[0]; + tess.svars.colors[i][1] = fog->colorRGBA[1]; + tess.svars.colors[i][2] = fog->colorRGBA[2]; + tess.svars.colors[i][3] = fog->colorRGBA[3]; + } + + RB_CalcFogTexCoords( ( float * ) tess.svars.texcoords[0] ); + + updateCurDescriptor( tr.fogImage->descriptor_set, 0); + + // VULKAN + + assert(tess.shader->fogPass > 0); + VkPipeline pipeline = g_stdPipelines.fog_pipelines[tess.shader->fogPass - 1][tess.shader->cullType][tess.shader->polygonOffset]; + vk_shade_geometry(pipeline, VK_FALSE, DEPTH_RANGE_NORMAL, VK_TRUE); +} + + +void RB_StageIteratorGeneric( void ) +{ +// shaderCommands_t *input = &tess; + + RB_DeformTessGeometry(); + + // call shader function + // + // VULKAN + + vk_UploadXYZI(tess.xyz, tess.numVertexes, tess.indexes, tess.numIndexes); + + updateMVP(backEnd.viewParms.isPortal, backEnd.projection2D, + getptr_modelview_matrix() ); + + + uint32_t stage = 0; + + for ( stage = 0; stage < MAX_SHADER_STAGES; ++stage ) + { + if ( NULL == tess.xstages[stage]) + { + break; + } + + ComputeColors( tess.xstages[stage] ); + ComputeTexCoords( tess.xstages[stage] ); + + // base + // set state + //R_BindAnimatedImage( &tess.xstages[stage]->bundle[0] ); + VkBool32 multitexture = (tess.xstages[stage]->bundle[1].image[0] != NULL); + + { + if ( tess.xstages[stage]->bundle[0].isVideoMap ) + { + ri.CIN_RunCinematic(tess.xstages[stage]->bundle[0].videoMapHandle); + ri.CIN_UploadCinematic(tess.xstages[stage]->bundle[0].videoMapHandle); + goto ENDANIMA; + } + + int numAnimaImg = tess.xstages[stage]->bundle[0].numImageAnimations; + + if ( numAnimaImg <= 1 ) + { + updateCurDescriptor( tess.xstages[stage]->bundle[0].image[0]->descriptor_set, 0); + //GL_Bind(tess.xstages[stage]->bundle[0].image[0]); + goto ENDANIMA; + } + + // it is necessary to do this messy calc to make sure animations line up + // exactly with waveforms of the same frequency + int index = (int)( tess.shaderTime * tess.xstages[stage]->bundle[0].imageAnimationSpeed * FUNCTABLE_SIZE ) >> FUNCTABLE_SIZE2; + + if ( index < 0 ) { + index = 0; // may happen with shader time offsets + } + + index %= numAnimaImg; + + updateCurDescriptor( tess.xstages[stage]->bundle[0].image[ index ]->descriptor_set, 0); + //GL_Bind(tess.xstages[stage]->bundle[0].image[ index ]); + } + +ENDANIMA: + // + // do multitexture + // + + if ( multitexture ) + { + // DrawMultitextured( input, stage ); + // output = t0 * t1 or t0 + t1 + + // t0 = most upstream according to spec + // t1 = most downstream according to spec + // this is an ugly hack to work around a GeForce driver + // bug with multitexture and clip planes + + + if ( tess.xstages[stage]->bundle[1].isVideoMap ) + { + ri.CIN_RunCinematic(tess.xstages[stage]->bundle[1].videoMapHandle); + ri.CIN_UploadCinematic(tess.xstages[stage]->bundle[1].videoMapHandle); + goto END_ANIMA2; + } + + if ( tess.xstages[stage]->bundle[1].numImageAnimations <= 1 ) { + updateCurDescriptor( tess.xstages[stage]->bundle[1].image[0]->descriptor_set, 1); + goto END_ANIMA2; + } + + // it is necessary to do this messy calc to make sure animations line up + // exactly with waveforms of the same frequency + int index2 = (int)( tess.shaderTime * tess.xstages[stage]->bundle[1].imageAnimationSpeed * FUNCTABLE_SIZE ) >> FUNCTABLE_SIZE2; + + if ( index2 < 0 ) { + index2 = 0; // may happen with shader time offsets + } + + index2 %= tess.xstages[stage]->bundle[1].numImageAnimations; + + updateCurDescriptor( tess.xstages[stage]->bundle[1].image[ index2 ]->descriptor_set , 1); + +END_ANIMA2: + + if (r_lightmap->integer) + updateCurDescriptor(tr.whiteImage->descriptor_set, 0); + + // replace diffuse texture with a white one thus effectively render only lightmap + } + + + enum Vk_Depth_Range depth_range = DEPTH_RANGE_NORMAL; + if (tess.shader->isSky) + { + depth_range = DEPTH_RANGE_ONE; + if (r_showsky->integer) + depth_range = DEPTH_RANGE_ZERO; + } + else if (backEnd.currentEntity->e.renderfx & RF_DEPTHHACK) + { + depth_range = DEPTH_RANGE_WEAPON; + } + + + if (backEnd.viewParms.isMirror) + { + vk_shade_geometry(tess.xstages[stage]->vk_mirror_pipeline, multitexture, depth_range, VK_TRUE); + } + else if (backEnd.viewParms.isPortal) + { + vk_shade_geometry(tess.xstages[stage]->vk_portal_pipeline, multitexture, depth_range, VK_TRUE); + } + else + { + vk_shade_geometry(tess.xstages[stage]->vk_pipeline, multitexture, depth_range, VK_TRUE); + } + + + // allow skipping out to show just lightmaps during development + if ( r_lightmap->integer && ( tess.xstages[stage]->bundle[0].isLightmap || tess.xstages[stage]->bundle[1].isLightmap ) ) + { + break; + } + } + + // + // now do any dynamic lighting needed + // + if ( tess.dlightBits && tess.shader->sort <= SS_OPAQUE + && !(tess.shader->surfaceFlags & (SURF_NODLIGHT | SURF_SKY) ) ) { + ProjectDlightTexture(); + } + + // + // now do fog + // + if ( tess.fogNum && tess.shader->fogPass ) { + RB_FogPass(); + } +} diff --git a/code/renderer_vulkan/vk_shade_geometry.h b/code/renderer_vulkan/vk_shade_geometry.h new file mode 100644 index 0000000000..b75aafbffd --- /dev/null +++ b/code/renderer_vulkan/vk_shade_geometry.h @@ -0,0 +1,38 @@ +#ifndef VK_SHADE_GEOMETRY +#define VK_SHADE_GEOMETRY + +#include "vk_instance.h" + +enum Vk_Depth_Range { + DEPTH_RANGE_NORMAL, // [0..1] + DEPTH_RANGE_ZERO, // [0..0] + DEPTH_RANGE_ONE, // [1..1] + DEPTH_RANGE_WEAPON // [0..0.3] +}; + + +const float * getptr_modelview_matrix(void); + +void set_modelview_matrix(const float mv[16]); + +void vk_shade_geometry(VkPipeline pipeline, VkBool32 multitexture, enum Vk_Depth_Range depth_range, VkBool32 indexed); +void vk_UploadXYZI(float (*pXYZ)[4], uint32_t nVertex, uint32_t* pIdx, uint32_t nIndex); + +void updateMVP(VkBool32 isPortal, VkBool32 is2D, const float mvMat4x4[16]); +void vk_resetGeometryBuffer(void); + +void vk_createVertexBuffer(void); +void vk_createIndexBuffer(void); + +VkBuffer vk_getIndexBuffer(void); +void vk_destroy_shading_data(void); + +void updateCurDescriptor( VkDescriptorSet curDesSet, uint32_t tmu); + +VkRect2D get_scissor_rect(void); + + +void vk_clearColorAttachments(const float* color); +void vk_clearDepthStencilAttachments(void); + +#endif diff --git a/code/renderer_vulkan/vk_shaders.c b/code/renderer_vulkan/vk_shaders.c new file mode 100644 index 0000000000..8a12a5f81f --- /dev/null +++ b/code/renderer_vulkan/vk_shaders.c @@ -0,0 +1,167 @@ +#include "vk_instance.h" +#include "vk_shaders.h" + +// Vulkan has to be specified in a bytecode format which is called SPIR-V +// and is designed to be work with both Vulkan and OpenCL. +// +// The graphics pipeline is the sequence of the operations that take the +// vertices and textures of your meshes all way to the pixels in the +// render targets. + + +/* +static VkPipelineShaderStageCreateInfo get_shader_stage_desc( + VkShaderStageFlagBits stage, VkShaderModule shader_module, const char* entry) +{ + VkPipelineShaderStageCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.stage = stage; + desc.module = shader_module; + desc.pName = entry; + desc.pSpecializationInfo = NULL; + return desc; +}; +*/ + +struct StageShaderModuleManager{ + // + // Shader modules. + // + VkShaderModule single_texture_vs; + VkShaderModule single_texture_clipping_plane_vs; + VkShaderModule single_texture_fs; + VkShaderModule multi_texture_vs; + VkShaderModule multi_texture_clipping_plane_vs; + VkShaderModule multi_texture_fs; +}; + + + +static struct StageShaderModuleManager s_gShaderModules; + + +// The function will take a buffer with the bytecode and the size of the buffer as parameter +// and craete VkShaderModule from it + +static void create_shader_module(const unsigned char* pBytes, const int count, VkShaderModule* pVkShaderMod) +{ + if (count % 4 != 0) { + ri.Error(ERR_FATAL, "Vulkan: SPIR-V binary buffer size is not multiple of 4"); + } + VkShaderModuleCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.codeSize = count; + desc.pCode = (const uint32_t*)pBytes; + + VK_CHECK(qvkCreateShaderModule(vk.device, &desc, NULL, pVkShaderMod)); +} + + + +// The VkShaderModule object is just a dumb wrapper around the bytecode buffer +// The shaders aren't linked to each other yet and they haven't even been given +// a purpose yet. +void vk_loadShaderModules(void) +{ + + extern unsigned char single_texture_vert_spv[]; + extern int single_texture_vert_spv_size; + + create_shader_module(single_texture_vert_spv, single_texture_vert_spv_size, + &s_gShaderModules.single_texture_vs); + + extern unsigned char single_texture_clipping_plane_vert_spv[]; + extern int single_texture_clipping_plane_vert_spv_size; + + create_shader_module(single_texture_clipping_plane_vert_spv, single_texture_clipping_plane_vert_spv_size, + &s_gShaderModules.single_texture_clipping_plane_vs); + + extern unsigned char single_texture_frag_spv[]; + extern int single_texture_frag_spv_size; + + create_shader_module(single_texture_frag_spv, single_texture_frag_spv_size, + &s_gShaderModules.single_texture_fs); + + extern unsigned char multi_texture_vert_spv[]; + extern int multi_texture_vert_spv_size; + + create_shader_module(multi_texture_vert_spv, multi_texture_vert_spv_size, + &s_gShaderModules.multi_texture_vs); + + extern unsigned char multi_texture_clipping_plane_vert_spv[]; + extern int multi_texture_clipping_plane_vert_spv_size; + create_shader_module(multi_texture_clipping_plane_vert_spv, multi_texture_clipping_plane_vert_spv_size, + &s_gShaderModules.multi_texture_clipping_plane_vs); + + extern unsigned char multi_texture_frag_spv[]; + extern int multi_texture_frag_spv_size; + create_shader_module(multi_texture_frag_spv, multi_texture_frag_spv_size, + &s_gShaderModules.multi_texture_fs); +} + + + +void vk_specifyShaderModule(const enum Vk_Shader_Type shader_type, const VkBool32 isClippingPlane, + VkShaderModule* vs, VkShaderModule* fs) +{ + // Specify the shader module containing the shader code, and the function + // to invoke. This means that it's possible to combine multiple fragment + // shaders into a single shader module and use different entry points + // to differnentiate between their behaviors + // In this case we'll stick to the standard main. + + if(isClippingPlane && vk.features.shaderClipDistance) + { + switch(shader_type) + { + case ST_MULTI_TEXURE_ADD: + case ST_MULTI_TEXURE_MUL: + { + *vs = s_gShaderModules.multi_texture_clipping_plane_vs; + *fs = s_gShaderModules.multi_texture_fs; + }break; + + case ST_SINGLE_TEXTURE: + { + *vs = s_gShaderModules.single_texture_clipping_plane_vs; + *fs = s_gShaderModules.single_texture_fs; + }break; + } + } + else + { + switch(shader_type) + { + case ST_MULTI_TEXURE_ADD: + case ST_MULTI_TEXURE_MUL: + { + *vs = s_gShaderModules.multi_texture_vs; + *fs = s_gShaderModules.multi_texture_fs; + }break; + + case ST_SINGLE_TEXTURE: + { + *vs = s_gShaderModules.single_texture_vs; + *fs = s_gShaderModules.single_texture_fs; + }break; + } + + } + +} + + + +void vk_destroyShaderModules(void) +{ + qvkDestroyShaderModule(vk.device, s_gShaderModules.single_texture_vs, NULL); + qvkDestroyShaderModule(vk.device, s_gShaderModules.single_texture_clipping_plane_vs, NULL); + qvkDestroyShaderModule(vk.device, s_gShaderModules.single_texture_fs, NULL); + qvkDestroyShaderModule(vk.device, s_gShaderModules.multi_texture_vs, NULL); + qvkDestroyShaderModule(vk.device, s_gShaderModules.multi_texture_clipping_plane_vs, NULL); + qvkDestroyShaderModule(vk.device, s_gShaderModules.multi_texture_fs, NULL); +} diff --git a/code/renderer_vulkan/vk_shaders.h b/code/renderer_vulkan/vk_shaders.h new file mode 100644 index 0000000000..224ab7f590 --- /dev/null +++ b/code/renderer_vulkan/vk_shaders.h @@ -0,0 +1,51 @@ +#ifndef VK_SHADERS_H_ +#define VK_SHADERS_H_ + + + +#define GLS_SRCBLEND_ZERO 0x00000001 +#define GLS_SRCBLEND_ONE 0x00000002 +#define GLS_SRCBLEND_DST_COLOR 0x00000003 +#define GLS_SRCBLEND_ONE_MINUS_DST_COLOR 0x00000004 +#define GLS_SRCBLEND_SRC_ALPHA 0x00000005 +#define GLS_SRCBLEND_ONE_MINUS_SRC_ALPHA 0x00000006 +#define GLS_SRCBLEND_DST_ALPHA 0x00000007 +#define GLS_SRCBLEND_ONE_MINUS_DST_ALPHA 0x00000008 +#define GLS_SRCBLEND_ALPHA_SATURATE 0x00000009 +#define GLS_SRCBLEND_BITS 0x0000000f + +#define GLS_DSTBLEND_ZERO 0x00000010 +#define GLS_DSTBLEND_ONE 0x00000020 +#define GLS_DSTBLEND_SRC_COLOR 0x00000030 +#define GLS_DSTBLEND_ONE_MINUS_SRC_COLOR 0x00000040 +#define GLS_DSTBLEND_SRC_ALPHA 0x00000050 +#define GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA 0x00000060 +#define GLS_DSTBLEND_DST_ALPHA 0x00000070 +#define GLS_DSTBLEND_ONE_MINUS_DST_ALPHA 0x00000080 +#define GLS_DSTBLEND_BITS 0x000000f0 + +#define GLS_DEPTHMASK_TRUE 0x00000100 + +#define GLS_POLYMODE_LINE 0x00001000 + +#define GLS_DEPTHTEST_DISABLE 0x00010000 +#define GLS_DEPTHFUNC_EQUAL 0x00020000 + +#define GLS_ATEST_GT_0 0x10000000 +#define GLS_ATEST_LT_80 0x20000000 +#define GLS_ATEST_GE_80 0x40000000 +#define GLS_ATEST_BITS 0x70000000 + +#define GLS_DEFAULT GLS_DEPTHMASK_TRUE + +enum Vk_Shader_Type { + ST_SINGLE_TEXTURE, + ST_MULTI_TEXURE_MUL, + ST_MULTI_TEXURE_ADD +}; + +void vk_loadShaderModules(void); +void vk_specifyShaderModule(const enum Vk_Shader_Type shader_type, const VkBool32 isClippingPlane, VkShaderModule* vs, VkShaderModule* fs); +void vk_destroyShaderModules(void); + +#endif diff --git a/code/renderer_vulkan/vk_swapchain.c b/code/renderer_vulkan/vk_swapchain.c new file mode 100644 index 0000000000..3ff7f3c76a --- /dev/null +++ b/code/renderer_vulkan/vk_swapchain.c @@ -0,0 +1,311 @@ +#include "ref_import.h" +#include "tr_cvar.h" +#include "VKimpl.h" +#include "vk_instance.h" +#include "glConfig.h" + + +/* + +A surface has changed in such a way that it is no longer compatible with the swapchain, +and further presentation requests using the swapchain will fail. Applications must +query the new surface properties and recreate their swapchain if they wish to continue +presenting to the surface. + +VK_IMAGE_LAYOUT_PRESENT_SRC_KHR must only be used for presenting a presentable image +for display. A swapchain's image must be transitioned to this layout before calling +vkQueuePresentKHR, and must be transitioned away from this layout after calling +vkAcquireNextImageKHR. + +*/ + + +// vulkan does not have the concept of a "default framebuffer", hence it requires an +// infrastruture that will own the buffers we will render to before we visualize them +// on the screen. This infrastructure is known as the swap chain and must be created +// explicity in vulkan. The swap chain is essentially a queue of images that are +// waiting to be presented to the screen. The general purpose of the swap chain is to +// synchronize the presentation of images with the refresh rate of the screen. + +// 1) Basic surface capabilities (min/max number of images in the swap chain, +// min/max number of images in the swap chain). +// 2) Surcface formats(pixel format, color space) +// 3) Available presentation modes + + +void vk_recreateSwapChain(void) +{ + + ri.Printf( PRINT_ALL, " Recreate swap chain \n"); + + if( r_fullscreen->integer ) + { + ri.Cvar_Set( "r_fullscreen", "0" ); + r_fullscreen->modified = qtrue; + } + + // hasty prevent crash. + ri.Cmd_ExecuteText (EXEC_NOW, "vid_restart\n"); +} + + +// create swap chain +void vk_createSwapChain(VkDevice device, VkSurfaceKHR surface, VkSurfaceFormatKHR surface_format) +{ + + // The presentation is arguably the most impottant setting for the swap chain + // because it represents the actual conditions for showing images to the screen + // There four possible modes available in Vulkan: + + // 1) VK_PRESENT_MODE_IMMEDIATE_KHR: Images submitted by your application + // are transferred to the screen right away, which may result in tearing. + // + // 2) VK_PRESENT_MODE_FIFO_KHR: The swap chain is a queue where the display + // takes an image from the front of the queue when the display is refreshed + // and the program inserts rendered images at the back of the queue. If the + // queue is full then the program has to wait. This is most similar to + // vertical sync as found in modern games + // + // 3) VK_PRESENT_MODE_FIFO_RELAXED_KHR: variation of 2) + // + // 4) VK_PRESENT_MODE_MAILBOX_KHR: another variation of 2), the image already + // queued are simply replaced with the newer ones. This mode can be used + // to avoid tearing significantly less latency issues than standard vertical + // sync that uses double buffering. + // + // we have to look for the best mode available. + // determine present mode and swapchain image count + VkPresentModeKHR present_mode; + + // The number of images in the swap chain, essentially the queue length + // The implementation specifies the minimum amount of images to functions properly + uint32_t image_count; + + { + ri.Printf(PRINT_ALL, "\n-------- Determine present mode --------\n"); + + uint32_t nPM, i; + qvkGetPhysicalDeviceSurfacePresentModesKHR(vk.physical_device, surface, &nPM, NULL); + + VkPresentModeKHR *pPresentModes = (VkPresentModeKHR *) malloc( nPM * sizeof(VkPresentModeKHR) ); + + qvkGetPhysicalDeviceSurfacePresentModesKHR(vk.physical_device, surface, &nPM, pPresentModes); + + ri.Printf(PRINT_ALL, "Minimaal mumber ImageCount required: %d, Total %d present mode supported: \n", + vk.surface_caps.minImageCount, nPM); + + VkBool32 mailbox_supported = VK_FALSE; + VkBool32 immediate_supported = VK_FALSE; + + for ( i = 0; i < nPM; i++) + { + switch(pPresentModes[i]) + { + case VK_PRESENT_MODE_IMMEDIATE_KHR: + ri.Printf(PRINT_ALL, " VK_PRESENT_MODE_IMMEDIATE_KHR \n"); + immediate_supported = VK_TRUE; + break; + case VK_PRESENT_MODE_MAILBOX_KHR: + ri.Printf(PRINT_ALL, " VK_PRESENT_MODE_MAILBOX_KHR \n"); + mailbox_supported = VK_TRUE; + break; + case VK_PRESENT_MODE_FIFO_KHR: + ri.Printf(PRINT_ALL, " VK_PRESENT_MODE_FIFO_KHR \n"); + break; + case VK_PRESENT_MODE_FIFO_RELAXED_KHR: + ri.Printf(PRINT_ALL, " VK_PRESENT_MODE_FIFO_RELAXED_KHR \n"); + break; + default: + ri.Printf(PRINT_ALL, " This device do not support presentation %d\n", pPresentModes[i]); + break; + } + } + + free(pPresentModes); + + + // When r_swapInterval >= 1, force FIFO (vsync). FIFO is guaranteed + // by the Vulkan spec so we don't need to check for support. + // When r_swapInterval == 0, prefer MAILBOX > IMMEDIATE > FIFO + // for uncapped frame rates. + if (r_swapInterval->integer >= 1) + { + present_mode = VK_PRESENT_MODE_FIFO_KHR; + image_count = MAX(2u, vk.surface_caps.minImageCount); + + ri.Printf(PRINT_ALL, "\n VK_PRESENT_MODE_FIFO_KHR (vsync) mode, minImageCount: %d. \n", image_count); + } + else if (mailbox_supported) + { + present_mode = VK_PRESENT_MODE_MAILBOX_KHR; + image_count = MAX(3u, vk.surface_caps.minImageCount); + + ri.Printf(PRINT_ALL, "\n VK_PRESENT_MODE_MAILBOX_KHR mode, minImageCount: %d. \n", image_count); + } + else if(immediate_supported) + { + present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR; + image_count = MAX(2u, vk.surface_caps.minImageCount); + + ri.Printf(PRINT_ALL, "\n VK_PRESENT_MODE_IMMEDIATE_KHR mode, minImageCount: %d. \n", image_count); + } + else + { + // VK_PRESENT_MODE_FIFO_KHR mode is guaranteed to be available. + present_mode = VK_PRESENT_MODE_FIFO_KHR; + image_count = MAX(2u, vk.surface_caps.minImageCount); + } + + // The Spec Say: + // image_count must <= VkSurfaceCapabilitiesKHR.maxImageCount + // image_count must >= VkSurfaceCapabilitiesKHR.minImageCount + + // maxImageCount is the maximum number of images the specified device + // supports for a swapchain created for the surface, and will be either 0, + // or greater than or equal to minImageCount. A value of 0 means that + // there is no limit on the number of images, though there may be limits + // related to the total amount of memory used by presentable images. + + // Formulas such as min(N, maxImageCount) are not correct, + // since maxImageCount may be zero. + if (vk.surface_caps.maxImageCount == 0) + { + image_count = MAX_SWAPCHAIN_IMAGES; + } + else + { + image_count = MIN(image_count+1, vk.surface_caps.maxImageCount); + } + + ri.Printf(PRINT_ALL, " \n minImageCount: %d, maxImageCount: %d, setted: %d\n", + vk.surface_caps.minImageCount, vk.surface_caps.maxImageCount, image_count); + + ri.Printf(PRINT_ALL, "\n-------- ----------------------- --------\n"); + } + + + { + ri.Printf(PRINT_ALL, "\n-------- Create vk.swapchain --------\n"); + + // Re-query surface capabilities to get fresh values. + // The initial query in vk_instance.c may have returned stale dimensions + // (e.g., on Wayland the surface may not have been fully realized yet). + // This matches Quake3e's approach of querying right before swapchain creation. + VK_CHECK(qvkGetPhysicalDeviceSurfaceCapabilitiesKHR(vk.physical_device, surface, &vk.surface_caps)); + + VkExtent2D image_extent; + + // Use currentExtent when the compositor dictates the size (typical on X11). + // When currentExtent is 0xFFFFFFFF, the app must choose its own extent + // (typical on Wayland) - fall back to glConfig dimensions clamped to the + // surface's supported range. + if (vk.surface_caps.currentExtent.width != 0xFFFFFFFF && + vk.surface_caps.currentExtent.height != 0xFFFFFFFF) { + image_extent = vk.surface_caps.currentExtent; + } else { + int winWidth, winHeight; + R_GetWinResolution(&winWidth, &winHeight); + image_extent.width = MIN(vk.surface_caps.maxImageExtent.width, + MAX(vk.surface_caps.minImageExtent.width, (uint32_t)winWidth)); + image_extent.height = MIN(vk.surface_caps.maxImageExtent.height, + MAX(vk.surface_caps.minImageExtent.height, (uint32_t)winHeight)); + } + + ri.Printf(PRINT_ALL, " Surface capabilities, image_extent.width: %d, image_extent.height: %d\n", + image_extent.width, image_extent.height); + + + VkSwapchainCreateInfoKHR desc; + desc.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; + desc.pNext = NULL; + desc.flags = 0; + desc.surface = surface; + // minImageCount is the minimum number of presentable images that the application needs. + // The implementation will either create the swapchain with at least that many images, + // or it will fail to create the swapchain. + // + // minImageCount must be less than or equal to the value returned in + // the maxImageCount member of VkSurfaceCapabilitiesKHR the structure returned + // byvkGetPhysicalDeviceSurfaceCapabilitiesKHR for the surface + // if the returned maxImageCount is not zero + // + // minImageCount must be greater than or equal to the value returned in + // the minImageCount member of VkSurfaceCapabilitiesKHR the structure + // returned by vkGetPhysicalDeviceSurfaceCapabilitiesKHR for the surface + desc.minImageCount = image_count; + desc.imageFormat = surface_format.format; + desc.imageColorSpace = surface_format.colorSpace; + desc.imageExtent = image_extent; + desc.imageArrayLayers = 1; + + // render images to a separate image first to perform operations like post-processing + desc.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + + // An image is owned by one queue family at a time and ownership + // must be explicitly transfered before using it in an another + // queue family. This option offers the best performance. + desc.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; + desc.queueFamilyIndexCount = 0; + desc.pQueueFamilyIndices = NULL; + + // we can specify that a certain transform should be applied to + // images in the swap chain if it is support, like a 90 degree + // clockwise rotation or horizontal flip, To specify that you + // do not want any transformation, simply dprcify the current + // transformation + desc.preTransform = vk.surface_caps.currentTransform; + + // The compositeAlpha field specifies if the alpha channel + // should be used for blending with other windows int the + // windows system. + desc.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; + desc.presentMode = present_mode; + + // we don't care about the color of pixels that are obscured. + desc.clipped = VK_TRUE; + + // With Vulkan it's possible that your swap chain becomes invalid or unoptimized + // while your application is running, for example because the window was resized. + // In that case the swap chain actually needs to be recreated from scratch and a + // reference to the old one must be specified in this field. + desc.oldSwapchain = VK_NULL_HANDLE; + + VK_CHECK(qvkCreateSwapchainKHR(device, &desc, NULL, &vk.swapchain)); + } + + // + { + // To obtain the number of presentable images for swapchain + VK_CHECK(qvkGetSwapchainImagesKHR(device, vk.swapchain, &vk.swapchain_image_count, NULL)); + + ri.Printf(PRINT_ALL, " Swapchain image count: %d\n", vk.swapchain_image_count); + + if( vk.swapchain_image_count > MAX_SWAPCHAIN_IMAGES ) + vk.swapchain_image_count = MAX_SWAPCHAIN_IMAGES; + + // To obtain the array of presentable images associated with a swapchain + VK_CHECK(qvkGetSwapchainImagesKHR(device, vk.swapchain, &vk.swapchain_image_count, vk.swapchain_images_array)); + + uint32_t i; + for (i = 0; i < vk.swapchain_image_count; i++) + { + VkImageViewCreateInfo desc; + desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + desc.pNext = NULL; + desc.flags = 0; + desc.image = vk.swapchain_images_array[i]; + desc.viewType = VK_IMAGE_VIEW_TYPE_2D; + desc.format = vk.surface_format.format; + desc.components.r = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.g = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.b = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.components.a = VK_COMPONENT_SWIZZLE_IDENTITY; + desc.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + desc.subresourceRange.baseMipLevel = 0; + desc.subresourceRange.levelCount = 1; + desc.subresourceRange.baseArrayLayer = 0; + desc.subresourceRange.layerCount = 1; + VK_CHECK(qvkCreateImageView(device, &desc, NULL, &vk.swapchain_image_views[i])); + } + } +} diff --git a/code/renderer_vulkan/vk_swapchain.h b/code/renderer_vulkan/vk_swapchain.h new file mode 100644 index 0000000000..72116f165f --- /dev/null +++ b/code/renderer_vulkan/vk_swapchain.h @@ -0,0 +1,10 @@ +#ifndef VK_SWAPCHAIN_H_ +#define VK_SWAPCHAIN_H_ + +#include "VKimpl.h" + +void vk_recreateSwapChain(void); +void vk_createSwapChain(VkDevice device, VkSurfaceKHR surface, VkSurfaceFormatKHR surface_format); + + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_platform.h b/code/renderer_vulkan/vulkan/vk_platform.h new file mode 100644 index 0000000000..e431c7631d --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_platform.h @@ -0,0 +1,84 @@ +// +// File: vk_platform.h +// +/* +** Copyright 2014-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + + +#ifndef VK_PLATFORM_H_ +#define VK_PLATFORM_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif // __cplusplus + +/* +*************************************************************************************************** +* Platform-specific directives and type declarations +*************************************************************************************************** +*/ + +/* Platform-specific calling convention macros. + * + * Platforms should define these so that Vulkan clients call Vulkan commands + * with the same calling conventions that the Vulkan implementation expects. + * + * VKAPI_ATTR - Placed before the return type in function declarations. + * Useful for C++11 and GCC/Clang-style function attribute syntax. + * VKAPI_CALL - Placed after the return type in function declarations. + * Useful for MSVC-style calling convention syntax. + * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. + * + * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); + * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); + */ +#if defined(_WIN32) + // On Windows, Vulkan commands use the stdcall convention + #define VKAPI_ATTR + #define VKAPI_CALL __stdcall + #define VKAPI_PTR VKAPI_CALL +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 + #error "Vulkan is not supported for the 'armeabi' NDK ABI" +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) + // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" + // calling convention, i.e. float parameters are passed in registers. This + // is true even if the rest of the application passes floats on the stack, + // as it does by default when compiling for the armeabi-v7a NDK ABI. + #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) + #define VKAPI_CALL + #define VKAPI_PTR VKAPI_ATTR +#else + // On other platforms, use the default calling convention + #define VKAPI_ATTR + #define VKAPI_CALL + #define VKAPI_PTR +#endif + +#if !defined(VK_NO_STDDEF_H) + #include +#endif // !defined(VK_NO_STDDEF_H) + +#if !defined(VK_NO_STDINT_H) + #if defined(_MSC_VER) && (_MSC_VER < 1600) + typedef signed __int8 int8_t; + typedef unsigned __int8 uint8_t; + typedef signed __int16 int16_t; + typedef unsigned __int16 uint16_t; + typedef signed __int32 int32_t; + typedef unsigned __int32 uint32_t; + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #else + #include + #endif +#endif // !defined(VK_NO_STDINT_H) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std.h new file mode 100644 index 0000000000..75cebd74cb --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std.h @@ -0,0 +1,394 @@ +#ifndef VULKAN_VIDEO_CODEC_AV1STD_H_ +#define VULKAN_VIDEO_CODEC_AV1STD_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_av1std is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_av1std 1 +#include "vulkan_video_codecs_common.h" +#define STD_VIDEO_AV1_NUM_REF_FRAMES 8U +#define STD_VIDEO_AV1_REFS_PER_FRAME 7U +#define STD_VIDEO_AV1_TOTAL_REFS_PER_FRAME 8U +#define STD_VIDEO_AV1_MAX_TILE_COLS 64U +#define STD_VIDEO_AV1_MAX_TILE_ROWS 64U +#define STD_VIDEO_AV1_MAX_SEGMENTS 8U +#define STD_VIDEO_AV1_SEG_LVL_MAX 8U +#define STD_VIDEO_AV1_PRIMARY_REF_NONE 7U +#define STD_VIDEO_AV1_SELECT_INTEGER_MV 2U +#define STD_VIDEO_AV1_SELECT_SCREEN_CONTENT_TOOLS 2U +#define STD_VIDEO_AV1_SKIP_MODE_FRAMES 2U +#define STD_VIDEO_AV1_MAX_LOOP_FILTER_STRENGTHS 4U +#define STD_VIDEO_AV1_LOOP_FILTER_ADJUSTMENTS 2U +#define STD_VIDEO_AV1_MAX_CDEF_FILTER_STRENGTHS 8U +#define STD_VIDEO_AV1_MAX_NUM_PLANES 3U +#define STD_VIDEO_AV1_GLOBAL_MOTION_PARAMS 6U +#define STD_VIDEO_AV1_MAX_NUM_Y_POINTS 14U +#define STD_VIDEO_AV1_MAX_NUM_CB_POINTS 10U +#define STD_VIDEO_AV1_MAX_NUM_CR_POINTS 10U +#define STD_VIDEO_AV1_MAX_NUM_POS_LUMA 24U +#define STD_VIDEO_AV1_MAX_NUM_POS_CHROMA 25U + +typedef enum StdVideoAV1Profile { + STD_VIDEO_AV1_PROFILE_MAIN = 0, + STD_VIDEO_AV1_PROFILE_HIGH = 1, + STD_VIDEO_AV1_PROFILE_PROFESSIONAL = 2, + STD_VIDEO_AV1_PROFILE_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_PROFILE_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1Profile; + +typedef enum StdVideoAV1Level { + STD_VIDEO_AV1_LEVEL_2_0 = 0, + STD_VIDEO_AV1_LEVEL_2_1 = 1, + STD_VIDEO_AV1_LEVEL_2_2 = 2, + STD_VIDEO_AV1_LEVEL_2_3 = 3, + STD_VIDEO_AV1_LEVEL_3_0 = 4, + STD_VIDEO_AV1_LEVEL_3_1 = 5, + STD_VIDEO_AV1_LEVEL_3_2 = 6, + STD_VIDEO_AV1_LEVEL_3_3 = 7, + STD_VIDEO_AV1_LEVEL_4_0 = 8, + STD_VIDEO_AV1_LEVEL_4_1 = 9, + STD_VIDEO_AV1_LEVEL_4_2 = 10, + STD_VIDEO_AV1_LEVEL_4_3 = 11, + STD_VIDEO_AV1_LEVEL_5_0 = 12, + STD_VIDEO_AV1_LEVEL_5_1 = 13, + STD_VIDEO_AV1_LEVEL_5_2 = 14, + STD_VIDEO_AV1_LEVEL_5_3 = 15, + STD_VIDEO_AV1_LEVEL_6_0 = 16, + STD_VIDEO_AV1_LEVEL_6_1 = 17, + STD_VIDEO_AV1_LEVEL_6_2 = 18, + STD_VIDEO_AV1_LEVEL_6_3 = 19, + STD_VIDEO_AV1_LEVEL_7_0 = 20, + STD_VIDEO_AV1_LEVEL_7_1 = 21, + STD_VIDEO_AV1_LEVEL_7_2 = 22, + STD_VIDEO_AV1_LEVEL_7_3 = 23, + STD_VIDEO_AV1_LEVEL_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_LEVEL_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1Level; + +typedef enum StdVideoAV1FrameType { + STD_VIDEO_AV1_FRAME_TYPE_KEY = 0, + STD_VIDEO_AV1_FRAME_TYPE_INTER = 1, + STD_VIDEO_AV1_FRAME_TYPE_INTRA_ONLY = 2, + STD_VIDEO_AV1_FRAME_TYPE_SWITCH = 3, + STD_VIDEO_AV1_FRAME_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_FRAME_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1FrameType; + +typedef enum StdVideoAV1ReferenceName { + STD_VIDEO_AV1_REFERENCE_NAME_INTRA_FRAME = 0, + STD_VIDEO_AV1_REFERENCE_NAME_LAST_FRAME = 1, + STD_VIDEO_AV1_REFERENCE_NAME_LAST2_FRAME = 2, + STD_VIDEO_AV1_REFERENCE_NAME_LAST3_FRAME = 3, + STD_VIDEO_AV1_REFERENCE_NAME_GOLDEN_FRAME = 4, + STD_VIDEO_AV1_REFERENCE_NAME_BWDREF_FRAME = 5, + STD_VIDEO_AV1_REFERENCE_NAME_ALTREF2_FRAME = 6, + STD_VIDEO_AV1_REFERENCE_NAME_ALTREF_FRAME = 7, + STD_VIDEO_AV1_REFERENCE_NAME_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_REFERENCE_NAME_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1ReferenceName; + +typedef enum StdVideoAV1InterpolationFilter { + STD_VIDEO_AV1_INTERPOLATION_FILTER_EIGHTTAP = 0, + STD_VIDEO_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1, + STD_VIDEO_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2, + STD_VIDEO_AV1_INTERPOLATION_FILTER_BILINEAR = 3, + STD_VIDEO_AV1_INTERPOLATION_FILTER_SWITCHABLE = 4, + STD_VIDEO_AV1_INTERPOLATION_FILTER_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_INTERPOLATION_FILTER_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1InterpolationFilter; + +typedef enum StdVideoAV1TxMode { + STD_VIDEO_AV1_TX_MODE_ONLY_4X4 = 0, + STD_VIDEO_AV1_TX_MODE_LARGEST = 1, + STD_VIDEO_AV1_TX_MODE_SELECT = 2, + STD_VIDEO_AV1_TX_MODE_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_TX_MODE_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1TxMode; + +typedef enum StdVideoAV1FrameRestorationType { + STD_VIDEO_AV1_FRAME_RESTORATION_TYPE_NONE = 0, + STD_VIDEO_AV1_FRAME_RESTORATION_TYPE_WIENER = 1, + STD_VIDEO_AV1_FRAME_RESTORATION_TYPE_SGRPROJ = 2, + STD_VIDEO_AV1_FRAME_RESTORATION_TYPE_SWITCHABLE = 3, + STD_VIDEO_AV1_FRAME_RESTORATION_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_FRAME_RESTORATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1FrameRestorationType; + +typedef enum StdVideoAV1ColorPrimaries { + STD_VIDEO_AV1_COLOR_PRIMARIES_BT_709 = 1, + STD_VIDEO_AV1_COLOR_PRIMARIES_UNSPECIFIED = 2, + STD_VIDEO_AV1_COLOR_PRIMARIES_BT_470_M = 4, + STD_VIDEO_AV1_COLOR_PRIMARIES_BT_470_B_G = 5, + STD_VIDEO_AV1_COLOR_PRIMARIES_BT_601 = 6, + STD_VIDEO_AV1_COLOR_PRIMARIES_SMPTE_240 = 7, + STD_VIDEO_AV1_COLOR_PRIMARIES_GENERIC_FILM = 8, + STD_VIDEO_AV1_COLOR_PRIMARIES_BT_2020 = 9, + STD_VIDEO_AV1_COLOR_PRIMARIES_XYZ = 10, + STD_VIDEO_AV1_COLOR_PRIMARIES_SMPTE_431 = 11, + STD_VIDEO_AV1_COLOR_PRIMARIES_SMPTE_432 = 12, + STD_VIDEO_AV1_COLOR_PRIMARIES_EBU_3213 = 22, + STD_VIDEO_AV1_COLOR_PRIMARIES_INVALID = 0x7FFFFFFF, + // STD_VIDEO_AV1_COLOR_PRIMARIES_BT_UNSPECIFIED is a legacy alias + STD_VIDEO_AV1_COLOR_PRIMARIES_BT_UNSPECIFIED = STD_VIDEO_AV1_COLOR_PRIMARIES_UNSPECIFIED, + STD_VIDEO_AV1_COLOR_PRIMARIES_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1ColorPrimaries; + +typedef enum StdVideoAV1TransferCharacteristics { + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_RESERVED_0 = 0, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_BT_709 = 1, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_UNSPECIFIED = 2, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_RESERVED_3 = 3, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_BT_470_M = 4, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_BT_470_B_G = 5, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_BT_601 = 6, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_SMPTE_240 = 7, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_LINEAR = 8, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_LOG_100 = 9, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_LOG_100_SQRT10 = 10, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_IEC_61966 = 11, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_BT_1361 = 12, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_SRGB = 13, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_BT_2020_10_BIT = 14, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_BT_2020_12_BIT = 15, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_SMPTE_2084 = 16, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_SMPTE_428 = 17, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_HLG = 18, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_TRANSFER_CHARACTERISTICS_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1TransferCharacteristics; + +typedef enum StdVideoAV1MatrixCoefficients { + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_IDENTITY = 0, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_BT_709 = 1, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_UNSPECIFIED = 2, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_RESERVED_3 = 3, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_FCC = 4, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_BT_470_B_G = 5, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_BT_601 = 6, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_SMPTE_240 = 7, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_SMPTE_YCGCO = 8, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_BT_2020_NCL = 9, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_BT_2020_CL = 10, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_SMPTE_2085 = 11, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_CHROMAT_NCL = 12, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_CHROMAT_CL = 13, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_ICTCP = 14, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_MATRIX_COEFFICIENTS_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1MatrixCoefficients; + +typedef enum StdVideoAV1ChromaSamplePosition { + STD_VIDEO_AV1_CHROMA_SAMPLE_POSITION_UNKNOWN = 0, + STD_VIDEO_AV1_CHROMA_SAMPLE_POSITION_VERTICAL = 1, + STD_VIDEO_AV1_CHROMA_SAMPLE_POSITION_COLOCATED = 2, + STD_VIDEO_AV1_CHROMA_SAMPLE_POSITION_RESERVED = 3, + STD_VIDEO_AV1_CHROMA_SAMPLE_POSITION_INVALID = 0x7FFFFFFF, + STD_VIDEO_AV1_CHROMA_SAMPLE_POSITION_MAX_ENUM = 0x7FFFFFFF +} StdVideoAV1ChromaSamplePosition; +typedef struct StdVideoAV1ColorConfigFlags { + uint32_t mono_chrome : 1; + uint32_t color_range : 1; + uint32_t separate_uv_delta_q : 1; + uint32_t color_description_present_flag : 1; + uint32_t reserved : 28; +} StdVideoAV1ColorConfigFlags; + +typedef struct StdVideoAV1ColorConfig { + StdVideoAV1ColorConfigFlags flags; + uint8_t BitDepth; + uint8_t subsampling_x; + uint8_t subsampling_y; + uint8_t reserved1; + StdVideoAV1ColorPrimaries color_primaries; + StdVideoAV1TransferCharacteristics transfer_characteristics; + StdVideoAV1MatrixCoefficients matrix_coefficients; + StdVideoAV1ChromaSamplePosition chroma_sample_position; +} StdVideoAV1ColorConfig; + +typedef struct StdVideoAV1TimingInfoFlags { + uint32_t equal_picture_interval : 1; + uint32_t reserved : 31; +} StdVideoAV1TimingInfoFlags; + +typedef struct StdVideoAV1TimingInfo { + StdVideoAV1TimingInfoFlags flags; + uint32_t num_units_in_display_tick; + uint32_t time_scale; + uint32_t num_ticks_per_picture_minus_1; +} StdVideoAV1TimingInfo; + +typedef struct StdVideoAV1LoopFilterFlags { + uint32_t loop_filter_delta_enabled : 1; + uint32_t loop_filter_delta_update : 1; + uint32_t reserved : 30; +} StdVideoAV1LoopFilterFlags; + +typedef struct StdVideoAV1LoopFilter { + StdVideoAV1LoopFilterFlags flags; + uint8_t loop_filter_level[STD_VIDEO_AV1_MAX_LOOP_FILTER_STRENGTHS]; + uint8_t loop_filter_sharpness; + uint8_t update_ref_delta; + int8_t loop_filter_ref_deltas[STD_VIDEO_AV1_TOTAL_REFS_PER_FRAME]; + uint8_t update_mode_delta; + int8_t loop_filter_mode_deltas[STD_VIDEO_AV1_LOOP_FILTER_ADJUSTMENTS]; +} StdVideoAV1LoopFilter; + +typedef struct StdVideoAV1QuantizationFlags { + uint32_t using_qmatrix : 1; + uint32_t diff_uv_delta : 1; + uint32_t reserved : 30; +} StdVideoAV1QuantizationFlags; + +typedef struct StdVideoAV1Quantization { + StdVideoAV1QuantizationFlags flags; + uint8_t base_q_idx; + int8_t DeltaQYDc; + int8_t DeltaQUDc; + int8_t DeltaQUAc; + int8_t DeltaQVDc; + int8_t DeltaQVAc; + uint8_t qm_y; + uint8_t qm_u; + uint8_t qm_v; +} StdVideoAV1Quantization; + +typedef struct StdVideoAV1Segmentation { + uint8_t FeatureEnabled[STD_VIDEO_AV1_MAX_SEGMENTS]; + int16_t FeatureData[STD_VIDEO_AV1_MAX_SEGMENTS][STD_VIDEO_AV1_SEG_LVL_MAX]; +} StdVideoAV1Segmentation; + +typedef struct StdVideoAV1TileInfoFlags { + uint32_t uniform_tile_spacing_flag : 1; + uint32_t reserved : 31; +} StdVideoAV1TileInfoFlags; + +typedef struct StdVideoAV1TileInfo { + StdVideoAV1TileInfoFlags flags; + uint8_t TileCols; + uint8_t TileRows; + uint16_t context_update_tile_id; + uint8_t tile_size_bytes_minus_1; + uint8_t reserved1[7]; + const uint16_t* pMiColStarts; + const uint16_t* pMiRowStarts; + const uint16_t* pWidthInSbsMinus1; + const uint16_t* pHeightInSbsMinus1; +} StdVideoAV1TileInfo; + +typedef struct StdVideoAV1CDEF { + uint8_t cdef_damping_minus_3; + uint8_t cdef_bits; + uint8_t cdef_y_pri_strength[STD_VIDEO_AV1_MAX_CDEF_FILTER_STRENGTHS]; + uint8_t cdef_y_sec_strength[STD_VIDEO_AV1_MAX_CDEF_FILTER_STRENGTHS]; + uint8_t cdef_uv_pri_strength[STD_VIDEO_AV1_MAX_CDEF_FILTER_STRENGTHS]; + uint8_t cdef_uv_sec_strength[STD_VIDEO_AV1_MAX_CDEF_FILTER_STRENGTHS]; +} StdVideoAV1CDEF; + +typedef struct StdVideoAV1LoopRestoration { + StdVideoAV1FrameRestorationType FrameRestorationType[STD_VIDEO_AV1_MAX_NUM_PLANES]; + uint16_t LoopRestorationSize[STD_VIDEO_AV1_MAX_NUM_PLANES]; +} StdVideoAV1LoopRestoration; + +typedef struct StdVideoAV1GlobalMotion { + uint8_t GmType[STD_VIDEO_AV1_NUM_REF_FRAMES]; + int32_t gm_params[STD_VIDEO_AV1_NUM_REF_FRAMES][STD_VIDEO_AV1_GLOBAL_MOTION_PARAMS]; +} StdVideoAV1GlobalMotion; + +typedef struct StdVideoAV1FilmGrainFlags { + uint32_t chroma_scaling_from_luma : 1; + uint32_t overlap_flag : 1; + uint32_t clip_to_restricted_range : 1; + uint32_t update_grain : 1; + uint32_t reserved : 28; +} StdVideoAV1FilmGrainFlags; + +typedef struct StdVideoAV1FilmGrain { + StdVideoAV1FilmGrainFlags flags; + uint8_t grain_scaling_minus_8; + uint8_t ar_coeff_lag; + uint8_t ar_coeff_shift_minus_6; + uint8_t grain_scale_shift; + uint16_t grain_seed; + uint8_t film_grain_params_ref_idx; + uint8_t num_y_points; + uint8_t point_y_value[STD_VIDEO_AV1_MAX_NUM_Y_POINTS]; + uint8_t point_y_scaling[STD_VIDEO_AV1_MAX_NUM_Y_POINTS]; + uint8_t num_cb_points; + uint8_t point_cb_value[STD_VIDEO_AV1_MAX_NUM_CB_POINTS]; + uint8_t point_cb_scaling[STD_VIDEO_AV1_MAX_NUM_CB_POINTS]; + uint8_t num_cr_points; + uint8_t point_cr_value[STD_VIDEO_AV1_MAX_NUM_CR_POINTS]; + uint8_t point_cr_scaling[STD_VIDEO_AV1_MAX_NUM_CR_POINTS]; + int8_t ar_coeffs_y_plus_128[STD_VIDEO_AV1_MAX_NUM_POS_LUMA]; + int8_t ar_coeffs_cb_plus_128[STD_VIDEO_AV1_MAX_NUM_POS_CHROMA]; + int8_t ar_coeffs_cr_plus_128[STD_VIDEO_AV1_MAX_NUM_POS_CHROMA]; + uint8_t cb_mult; + uint8_t cb_luma_mult; + uint16_t cb_offset; + uint8_t cr_mult; + uint8_t cr_luma_mult; + uint16_t cr_offset; +} StdVideoAV1FilmGrain; + +typedef struct StdVideoAV1SequenceHeaderFlags { + uint32_t still_picture : 1; + uint32_t reduced_still_picture_header : 1; + uint32_t use_128x128_superblock : 1; + uint32_t enable_filter_intra : 1; + uint32_t enable_intra_edge_filter : 1; + uint32_t enable_interintra_compound : 1; + uint32_t enable_masked_compound : 1; + uint32_t enable_warped_motion : 1; + uint32_t enable_dual_filter : 1; + uint32_t enable_order_hint : 1; + uint32_t enable_jnt_comp : 1; + uint32_t enable_ref_frame_mvs : 1; + uint32_t frame_id_numbers_present_flag : 1; + uint32_t enable_superres : 1; + uint32_t enable_cdef : 1; + uint32_t enable_restoration : 1; + uint32_t film_grain_params_present : 1; + uint32_t timing_info_present_flag : 1; + uint32_t initial_display_delay_present_flag : 1; + uint32_t reserved : 13; +} StdVideoAV1SequenceHeaderFlags; + +typedef struct StdVideoAV1SequenceHeader { + StdVideoAV1SequenceHeaderFlags flags; + StdVideoAV1Profile seq_profile; + uint8_t frame_width_bits_minus_1; + uint8_t frame_height_bits_minus_1; + uint16_t max_frame_width_minus_1; + uint16_t max_frame_height_minus_1; + uint8_t delta_frame_id_length_minus_2; + uint8_t additional_frame_id_length_minus_1; + uint8_t order_hint_bits_minus_1; + uint8_t seq_force_integer_mv; + uint8_t seq_force_screen_content_tools; + uint8_t reserved1[5]; + const StdVideoAV1ColorConfig* pColorConfig; + const StdVideoAV1TimingInfo* pTimingInfo; +} StdVideoAV1SequenceHeader; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std_decode.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std_decode.h new file mode 100644 index 0000000000..60bf2c0398 --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std_decode.h @@ -0,0 +1,109 @@ +#ifndef VULKAN_VIDEO_CODEC_AV1STD_DECODE_H_ +#define VULKAN_VIDEO_CODEC_AV1STD_DECODE_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_av1std_decode is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_av1std_decode 1 +#include "vulkan_video_codec_av1std.h" + +#define VK_STD_VULKAN_VIDEO_CODEC_AV1_DECODE_API_VERSION_1_0_0 VK_MAKE_VIDEO_STD_VERSION(1, 0, 0) + +#define VK_STD_VULKAN_VIDEO_CODEC_AV1_DECODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_AV1_DECODE_API_VERSION_1_0_0 +#define VK_STD_VULKAN_VIDEO_CODEC_AV1_DECODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_av1_decode" +typedef struct StdVideoDecodeAV1PictureInfoFlags { + uint32_t error_resilient_mode : 1; + uint32_t disable_cdf_update : 1; + uint32_t use_superres : 1; + uint32_t render_and_frame_size_different : 1; + uint32_t allow_screen_content_tools : 1; + uint32_t is_filter_switchable : 1; + uint32_t force_integer_mv : 1; + uint32_t frame_size_override_flag : 1; + uint32_t buffer_removal_time_present_flag : 1; + uint32_t allow_intrabc : 1; + uint32_t frame_refs_short_signaling : 1; + uint32_t allow_high_precision_mv : 1; + uint32_t is_motion_mode_switchable : 1; + uint32_t use_ref_frame_mvs : 1; + uint32_t disable_frame_end_update_cdf : 1; + uint32_t allow_warped_motion : 1; + uint32_t reduced_tx_set : 1; + uint32_t reference_select : 1; + uint32_t skip_mode_present : 1; + uint32_t delta_q_present : 1; + uint32_t delta_lf_present : 1; + uint32_t delta_lf_multi : 1; + uint32_t segmentation_enabled : 1; + uint32_t segmentation_update_map : 1; + uint32_t segmentation_temporal_update : 1; + uint32_t segmentation_update_data : 1; + uint32_t UsesLr : 1; + uint32_t usesChromaLr : 1; + uint32_t apply_grain : 1; + uint32_t reserved : 3; +} StdVideoDecodeAV1PictureInfoFlags; + +typedef struct StdVideoDecodeAV1PictureInfo { + StdVideoDecodeAV1PictureInfoFlags flags; + StdVideoAV1FrameType frame_type; + uint32_t current_frame_id; + uint8_t OrderHint; + uint8_t primary_ref_frame; + uint8_t refresh_frame_flags; + uint8_t reserved1; + StdVideoAV1InterpolationFilter interpolation_filter; + StdVideoAV1TxMode TxMode; + uint8_t delta_q_res; + uint8_t delta_lf_res; + uint8_t SkipModeFrame[STD_VIDEO_AV1_SKIP_MODE_FRAMES]; + uint8_t coded_denom; + uint8_t reserved2[3]; + uint8_t OrderHints[STD_VIDEO_AV1_NUM_REF_FRAMES]; + uint32_t expectedFrameId[STD_VIDEO_AV1_NUM_REF_FRAMES]; + const StdVideoAV1TileInfo* pTileInfo; + const StdVideoAV1Quantization* pQuantization; + const StdVideoAV1Segmentation* pSegmentation; + const StdVideoAV1LoopFilter* pLoopFilter; + const StdVideoAV1CDEF* pCDEF; + const StdVideoAV1LoopRestoration* pLoopRestoration; + const StdVideoAV1GlobalMotion* pGlobalMotion; + const StdVideoAV1FilmGrain* pFilmGrain; +} StdVideoDecodeAV1PictureInfo; + +typedef struct StdVideoDecodeAV1ReferenceInfoFlags { + uint32_t disable_frame_end_update_cdf : 1; + uint32_t segmentation_enabled : 1; + uint32_t reserved : 30; +} StdVideoDecodeAV1ReferenceInfoFlags; + +typedef struct StdVideoDecodeAV1ReferenceInfo { + StdVideoDecodeAV1ReferenceInfoFlags flags; + uint8_t frame_type; + uint8_t RefFrameSignBias; + uint8_t OrderHint; + uint8_t SavedOrderHints[STD_VIDEO_AV1_NUM_REF_FRAMES]; +} StdVideoDecodeAV1ReferenceInfo; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std_encode.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std_encode.h new file mode 100644 index 0000000000..3602fe1250 --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_av1std_encode.h @@ -0,0 +1,143 @@ +#ifndef VULKAN_VIDEO_CODEC_AV1STD_ENCODE_H_ +#define VULKAN_VIDEO_CODEC_AV1STD_ENCODE_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_av1std_encode is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_av1std_encode 1 +#include "vulkan_video_codec_av1std.h" + +#define VK_STD_VULKAN_VIDEO_CODEC_AV1_ENCODE_API_VERSION_1_0_0 VK_MAKE_VIDEO_STD_VERSION(1, 0, 0) + +#define VK_STD_VULKAN_VIDEO_CODEC_AV1_ENCODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_AV1_ENCODE_API_VERSION_1_0_0 +#define VK_STD_VULKAN_VIDEO_CODEC_AV1_ENCODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_av1_encode" +typedef struct StdVideoEncodeAV1DecoderModelInfo { + uint8_t buffer_delay_length_minus_1; + uint8_t buffer_removal_time_length_minus_1; + uint8_t frame_presentation_time_length_minus_1; + uint8_t reserved1; + uint32_t num_units_in_decoding_tick; +} StdVideoEncodeAV1DecoderModelInfo; + +typedef struct StdVideoEncodeAV1ExtensionHeader { + uint8_t temporal_id; + uint8_t spatial_id; +} StdVideoEncodeAV1ExtensionHeader; + +typedef struct StdVideoEncodeAV1OperatingPointInfoFlags { + uint32_t decoder_model_present_for_this_op : 1; + uint32_t low_delay_mode_flag : 1; + uint32_t initial_display_delay_present_for_this_op : 1; + uint32_t reserved : 29; +} StdVideoEncodeAV1OperatingPointInfoFlags; + +typedef struct StdVideoEncodeAV1OperatingPointInfo { + StdVideoEncodeAV1OperatingPointInfoFlags flags; + uint16_t operating_point_idc; + uint8_t seq_level_idx; + uint8_t seq_tier; + uint32_t decoder_buffer_delay; + uint32_t encoder_buffer_delay; + uint8_t initial_display_delay_minus_1; +} StdVideoEncodeAV1OperatingPointInfo; + +typedef struct StdVideoEncodeAV1PictureInfoFlags { + uint32_t error_resilient_mode : 1; + uint32_t disable_cdf_update : 1; + uint32_t use_superres : 1; + uint32_t render_and_frame_size_different : 1; + uint32_t allow_screen_content_tools : 1; + uint32_t is_filter_switchable : 1; + uint32_t force_integer_mv : 1; + uint32_t frame_size_override_flag : 1; + uint32_t buffer_removal_time_present_flag : 1; + uint32_t allow_intrabc : 1; + uint32_t frame_refs_short_signaling : 1; + uint32_t allow_high_precision_mv : 1; + uint32_t is_motion_mode_switchable : 1; + uint32_t use_ref_frame_mvs : 1; + uint32_t disable_frame_end_update_cdf : 1; + uint32_t allow_warped_motion : 1; + uint32_t reduced_tx_set : 1; + uint32_t skip_mode_present : 1; + uint32_t delta_q_present : 1; + uint32_t delta_lf_present : 1; + uint32_t delta_lf_multi : 1; + uint32_t segmentation_enabled : 1; + uint32_t segmentation_update_map : 1; + uint32_t segmentation_temporal_update : 1; + uint32_t segmentation_update_data : 1; + uint32_t UsesLr : 1; + uint32_t usesChromaLr : 1; + uint32_t show_frame : 1; + uint32_t showable_frame : 1; + uint32_t reserved : 3; +} StdVideoEncodeAV1PictureInfoFlags; + +typedef struct StdVideoEncodeAV1PictureInfo { + StdVideoEncodeAV1PictureInfoFlags flags; + StdVideoAV1FrameType frame_type; + uint32_t frame_presentation_time; + uint32_t current_frame_id; + uint8_t order_hint; + uint8_t primary_ref_frame; + uint8_t refresh_frame_flags; + uint8_t coded_denom; + uint16_t render_width_minus_1; + uint16_t render_height_minus_1; + StdVideoAV1InterpolationFilter interpolation_filter; + StdVideoAV1TxMode TxMode; + uint8_t delta_q_res; + uint8_t delta_lf_res; + uint8_t ref_order_hint[STD_VIDEO_AV1_NUM_REF_FRAMES]; + int8_t ref_frame_idx[STD_VIDEO_AV1_REFS_PER_FRAME]; + uint8_t reserved1[3]; + uint32_t delta_frame_id_minus_1[STD_VIDEO_AV1_REFS_PER_FRAME]; + const StdVideoAV1TileInfo* pTileInfo; + const StdVideoAV1Quantization* pQuantization; + const StdVideoAV1Segmentation* pSegmentation; + const StdVideoAV1LoopFilter* pLoopFilter; + const StdVideoAV1CDEF* pCDEF; + const StdVideoAV1LoopRestoration* pLoopRestoration; + const StdVideoAV1GlobalMotion* pGlobalMotion; + const StdVideoEncodeAV1ExtensionHeader* pExtensionHeader; + const uint32_t* pBufferRemovalTimes; +} StdVideoEncodeAV1PictureInfo; + +typedef struct StdVideoEncodeAV1ReferenceInfoFlags { + uint32_t disable_frame_end_update_cdf : 1; + uint32_t segmentation_enabled : 1; + uint32_t reserved : 30; +} StdVideoEncodeAV1ReferenceInfoFlags; + +typedef struct StdVideoEncodeAV1ReferenceInfo { + StdVideoEncodeAV1ReferenceInfoFlags flags; + uint32_t RefFrameId; + StdVideoAV1FrameType frame_type; + uint8_t OrderHint; + uint8_t reserved1[3]; + const StdVideoEncodeAV1ExtensionHeader* pExtensionHeader; +} StdVideoEncodeAV1ReferenceInfo; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std.h new file mode 100644 index 0000000000..48621b6954 --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std.h @@ -0,0 +1,312 @@ +#ifndef VULKAN_VIDEO_CODEC_H264STD_H_ +#define VULKAN_VIDEO_CODEC_H264STD_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_h264std is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_h264std 1 +#include "vulkan_video_codecs_common.h" +#define STD_VIDEO_H264_CPB_CNT_LIST_SIZE 32U +#define STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS 6U +#define STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS 16U +#define STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS 6U +#define STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS 64U +#define STD_VIDEO_H264_MAX_NUM_LIST_REF 32U +#define STD_VIDEO_H264_MAX_CHROMA_PLANES 2U +#define STD_VIDEO_H264_NO_REFERENCE_PICTURE 0xFFU + +typedef enum StdVideoH264ChromaFormatIdc { + STD_VIDEO_H264_CHROMA_FORMAT_IDC_MONOCHROME = 0, + STD_VIDEO_H264_CHROMA_FORMAT_IDC_420 = 1, + STD_VIDEO_H264_CHROMA_FORMAT_IDC_422 = 2, + STD_VIDEO_H264_CHROMA_FORMAT_IDC_444 = 3, + STD_VIDEO_H264_CHROMA_FORMAT_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_CHROMA_FORMAT_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264ChromaFormatIdc; + +typedef enum StdVideoH264ProfileIdc { + STD_VIDEO_H264_PROFILE_IDC_BASELINE = 66, + STD_VIDEO_H264_PROFILE_IDC_MAIN = 77, + STD_VIDEO_H264_PROFILE_IDC_HIGH = 100, + STD_VIDEO_H264_PROFILE_IDC_HIGH_444_PREDICTIVE = 244, + STD_VIDEO_H264_PROFILE_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_PROFILE_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264ProfileIdc; + +typedef enum StdVideoH264LevelIdc { + STD_VIDEO_H264_LEVEL_IDC_1_0 = 0, + STD_VIDEO_H264_LEVEL_IDC_1_1 = 1, + STD_VIDEO_H264_LEVEL_IDC_1_2 = 2, + STD_VIDEO_H264_LEVEL_IDC_1_3 = 3, + STD_VIDEO_H264_LEVEL_IDC_2_0 = 4, + STD_VIDEO_H264_LEVEL_IDC_2_1 = 5, + STD_VIDEO_H264_LEVEL_IDC_2_2 = 6, + STD_VIDEO_H264_LEVEL_IDC_3_0 = 7, + STD_VIDEO_H264_LEVEL_IDC_3_1 = 8, + STD_VIDEO_H264_LEVEL_IDC_3_2 = 9, + STD_VIDEO_H264_LEVEL_IDC_4_0 = 10, + STD_VIDEO_H264_LEVEL_IDC_4_1 = 11, + STD_VIDEO_H264_LEVEL_IDC_4_2 = 12, + STD_VIDEO_H264_LEVEL_IDC_5_0 = 13, + STD_VIDEO_H264_LEVEL_IDC_5_1 = 14, + STD_VIDEO_H264_LEVEL_IDC_5_2 = 15, + STD_VIDEO_H264_LEVEL_IDC_6_0 = 16, + STD_VIDEO_H264_LEVEL_IDC_6_1 = 17, + STD_VIDEO_H264_LEVEL_IDC_6_2 = 18, + STD_VIDEO_H264_LEVEL_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_LEVEL_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264LevelIdc; + +typedef enum StdVideoH264PocType { + STD_VIDEO_H264_POC_TYPE_0 = 0, + STD_VIDEO_H264_POC_TYPE_1 = 1, + STD_VIDEO_H264_POC_TYPE_2 = 2, + STD_VIDEO_H264_POC_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_POC_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264PocType; + +typedef enum StdVideoH264AspectRatioIdc { + STD_VIDEO_H264_ASPECT_RATIO_IDC_UNSPECIFIED = 0, + STD_VIDEO_H264_ASPECT_RATIO_IDC_SQUARE = 1, + STD_VIDEO_H264_ASPECT_RATIO_IDC_12_11 = 2, + STD_VIDEO_H264_ASPECT_RATIO_IDC_10_11 = 3, + STD_VIDEO_H264_ASPECT_RATIO_IDC_16_11 = 4, + STD_VIDEO_H264_ASPECT_RATIO_IDC_40_33 = 5, + STD_VIDEO_H264_ASPECT_RATIO_IDC_24_11 = 6, + STD_VIDEO_H264_ASPECT_RATIO_IDC_20_11 = 7, + STD_VIDEO_H264_ASPECT_RATIO_IDC_32_11 = 8, + STD_VIDEO_H264_ASPECT_RATIO_IDC_80_33 = 9, + STD_VIDEO_H264_ASPECT_RATIO_IDC_18_11 = 10, + STD_VIDEO_H264_ASPECT_RATIO_IDC_15_11 = 11, + STD_VIDEO_H264_ASPECT_RATIO_IDC_64_33 = 12, + STD_VIDEO_H264_ASPECT_RATIO_IDC_160_99 = 13, + STD_VIDEO_H264_ASPECT_RATIO_IDC_4_3 = 14, + STD_VIDEO_H264_ASPECT_RATIO_IDC_3_2 = 15, + STD_VIDEO_H264_ASPECT_RATIO_IDC_2_1 = 16, + STD_VIDEO_H264_ASPECT_RATIO_IDC_EXTENDED_SAR = 255, + STD_VIDEO_H264_ASPECT_RATIO_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_ASPECT_RATIO_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264AspectRatioIdc; + +typedef enum StdVideoH264WeightedBipredIdc { + STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_DEFAULT = 0, + STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_EXPLICIT = 1, + STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_IMPLICIT = 2, + STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_WEIGHTED_BIPRED_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264WeightedBipredIdc; + +typedef enum StdVideoH264ModificationOfPicNumsIdc { + STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_SHORT_TERM_SUBTRACT = 0, + STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_SHORT_TERM_ADD = 1, + STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_LONG_TERM = 2, + STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_END = 3, + STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_MODIFICATION_OF_PIC_NUMS_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264ModificationOfPicNumsIdc; + +typedef enum StdVideoH264MemMgmtControlOp { + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_END = 0, + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_SHORT_TERM = 1, + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_LONG_TERM = 2, + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_MARK_LONG_TERM = 3, + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_SET_MAX_LONG_TERM_INDEX = 4, + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_UNMARK_ALL = 5, + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_MARK_CURRENT_AS_LONG_TERM = 6, + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_MEM_MGMT_CONTROL_OP_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264MemMgmtControlOp; + +typedef enum StdVideoH264CabacInitIdc { + STD_VIDEO_H264_CABAC_INIT_IDC_0 = 0, + STD_VIDEO_H264_CABAC_INIT_IDC_1 = 1, + STD_VIDEO_H264_CABAC_INIT_IDC_2 = 2, + STD_VIDEO_H264_CABAC_INIT_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_CABAC_INIT_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264CabacInitIdc; + +typedef enum StdVideoH264DisableDeblockingFilterIdc { + STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_DISABLED = 0, + STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_ENABLED = 1, + STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_PARTIAL = 2, + STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_DISABLE_DEBLOCKING_FILTER_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264DisableDeblockingFilterIdc; + +typedef enum StdVideoH264SliceType { + STD_VIDEO_H264_SLICE_TYPE_P = 0, + STD_VIDEO_H264_SLICE_TYPE_B = 1, + STD_VIDEO_H264_SLICE_TYPE_I = 2, + STD_VIDEO_H264_SLICE_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_SLICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264SliceType; + +typedef enum StdVideoH264PictureType { + STD_VIDEO_H264_PICTURE_TYPE_P = 0, + STD_VIDEO_H264_PICTURE_TYPE_B = 1, + STD_VIDEO_H264_PICTURE_TYPE_I = 2, + STD_VIDEO_H264_PICTURE_TYPE_IDR = 5, + STD_VIDEO_H264_PICTURE_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_PICTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264PictureType; + +typedef enum StdVideoH264NonVclNaluType { + STD_VIDEO_H264_NON_VCL_NALU_TYPE_SPS = 0, + STD_VIDEO_H264_NON_VCL_NALU_TYPE_PPS = 1, + STD_VIDEO_H264_NON_VCL_NALU_TYPE_AUD = 2, + STD_VIDEO_H264_NON_VCL_NALU_TYPE_PREFIX = 3, + STD_VIDEO_H264_NON_VCL_NALU_TYPE_END_OF_SEQUENCE = 4, + STD_VIDEO_H264_NON_VCL_NALU_TYPE_END_OF_STREAM = 5, + STD_VIDEO_H264_NON_VCL_NALU_TYPE_PRECODED = 6, + STD_VIDEO_H264_NON_VCL_NALU_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_H264_NON_VCL_NALU_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoH264NonVclNaluType; +typedef struct StdVideoH264SpsVuiFlags { + uint32_t aspect_ratio_info_present_flag : 1; + uint32_t overscan_info_present_flag : 1; + uint32_t overscan_appropriate_flag : 1; + uint32_t video_signal_type_present_flag : 1; + uint32_t video_full_range_flag : 1; + uint32_t color_description_present_flag : 1; + uint32_t chroma_loc_info_present_flag : 1; + uint32_t timing_info_present_flag : 1; + uint32_t fixed_frame_rate_flag : 1; + uint32_t bitstream_restriction_flag : 1; + uint32_t nal_hrd_parameters_present_flag : 1; + uint32_t vcl_hrd_parameters_present_flag : 1; +} StdVideoH264SpsVuiFlags; + +typedef struct StdVideoH264HrdParameters { + uint8_t cpb_cnt_minus1; + uint8_t bit_rate_scale; + uint8_t cpb_size_scale; + uint8_t reserved1; + uint32_t bit_rate_value_minus1[STD_VIDEO_H264_CPB_CNT_LIST_SIZE]; + uint32_t cpb_size_value_minus1[STD_VIDEO_H264_CPB_CNT_LIST_SIZE]; + uint8_t cbr_flag[STD_VIDEO_H264_CPB_CNT_LIST_SIZE]; + uint32_t initial_cpb_removal_delay_length_minus1; + uint32_t cpb_removal_delay_length_minus1; + uint32_t dpb_output_delay_length_minus1; + uint32_t time_offset_length; +} StdVideoH264HrdParameters; + +typedef struct StdVideoH264SequenceParameterSetVui { + StdVideoH264SpsVuiFlags flags; + StdVideoH264AspectRatioIdc aspect_ratio_idc; + uint16_t sar_width; + uint16_t sar_height; + uint8_t video_format; + uint8_t colour_primaries; + uint8_t transfer_characteristics; + uint8_t matrix_coefficients; + uint32_t num_units_in_tick; + uint32_t time_scale; + uint8_t max_num_reorder_frames; + uint8_t max_dec_frame_buffering; + uint8_t chroma_sample_loc_type_top_field; + uint8_t chroma_sample_loc_type_bottom_field; + uint32_t reserved1; + const StdVideoH264HrdParameters* pHrdParameters; +} StdVideoH264SequenceParameterSetVui; + +typedef struct StdVideoH264SpsFlags { + uint32_t constraint_set0_flag : 1; + uint32_t constraint_set1_flag : 1; + uint32_t constraint_set2_flag : 1; + uint32_t constraint_set3_flag : 1; + uint32_t constraint_set4_flag : 1; + uint32_t constraint_set5_flag : 1; + uint32_t direct_8x8_inference_flag : 1; + uint32_t mb_adaptive_frame_field_flag : 1; + uint32_t frame_mbs_only_flag : 1; + uint32_t delta_pic_order_always_zero_flag : 1; + uint32_t separate_colour_plane_flag : 1; + uint32_t gaps_in_frame_num_value_allowed_flag : 1; + uint32_t qpprime_y_zero_transform_bypass_flag : 1; + uint32_t frame_cropping_flag : 1; + uint32_t seq_scaling_matrix_present_flag : 1; + uint32_t vui_parameters_present_flag : 1; +} StdVideoH264SpsFlags; + +typedef struct StdVideoH264ScalingLists { + uint16_t scaling_list_present_mask; + uint16_t use_default_scaling_matrix_mask; + uint8_t ScalingList4x4[STD_VIDEO_H264_SCALING_LIST_4X4_NUM_LISTS][STD_VIDEO_H264_SCALING_LIST_4X4_NUM_ELEMENTS]; + uint8_t ScalingList8x8[STD_VIDEO_H264_SCALING_LIST_8X8_NUM_LISTS][STD_VIDEO_H264_SCALING_LIST_8X8_NUM_ELEMENTS]; +} StdVideoH264ScalingLists; + +typedef struct StdVideoH264SequenceParameterSet { + StdVideoH264SpsFlags flags; + StdVideoH264ProfileIdc profile_idc; + StdVideoH264LevelIdc level_idc; + StdVideoH264ChromaFormatIdc chroma_format_idc; + uint8_t seq_parameter_set_id; + uint8_t bit_depth_luma_minus8; + uint8_t bit_depth_chroma_minus8; + uint8_t log2_max_frame_num_minus4; + StdVideoH264PocType pic_order_cnt_type; + int32_t offset_for_non_ref_pic; + int32_t offset_for_top_to_bottom_field; + uint8_t log2_max_pic_order_cnt_lsb_minus4; + uint8_t num_ref_frames_in_pic_order_cnt_cycle; + uint8_t max_num_ref_frames; + uint8_t reserved1; + uint32_t pic_width_in_mbs_minus1; + uint32_t pic_height_in_map_units_minus1; + uint32_t frame_crop_left_offset; + uint32_t frame_crop_right_offset; + uint32_t frame_crop_top_offset; + uint32_t frame_crop_bottom_offset; + uint32_t reserved2; + const int32_t* pOffsetForRefFrame; + const StdVideoH264ScalingLists* pScalingLists; + const StdVideoH264SequenceParameterSetVui* pSequenceParameterSetVui; +} StdVideoH264SequenceParameterSet; + +typedef struct StdVideoH264PpsFlags { + uint32_t transform_8x8_mode_flag : 1; + uint32_t redundant_pic_cnt_present_flag : 1; + uint32_t constrained_intra_pred_flag : 1; + uint32_t deblocking_filter_control_present_flag : 1; + uint32_t weighted_pred_flag : 1; + uint32_t bottom_field_pic_order_in_frame_present_flag : 1; + uint32_t entropy_coding_mode_flag : 1; + uint32_t pic_scaling_matrix_present_flag : 1; +} StdVideoH264PpsFlags; + +typedef struct StdVideoH264PictureParameterSet { + StdVideoH264PpsFlags flags; + uint8_t seq_parameter_set_id; + uint8_t pic_parameter_set_id; + uint8_t num_ref_idx_l0_default_active_minus1; + uint8_t num_ref_idx_l1_default_active_minus1; + StdVideoH264WeightedBipredIdc weighted_bipred_idc; + int8_t pic_init_qp_minus26; + int8_t pic_init_qs_minus26; + int8_t chroma_qp_index_offset; + int8_t second_chroma_qp_index_offset; + const StdVideoH264ScalingLists* pScalingLists; +} StdVideoH264PictureParameterSet; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std_decode.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std_decode.h new file mode 100644 index 0000000000..a6bfe9d822 --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std_decode.h @@ -0,0 +1,77 @@ +#ifndef VULKAN_VIDEO_CODEC_H264STD_DECODE_H_ +#define VULKAN_VIDEO_CODEC_H264STD_DECODE_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_h264std_decode is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_h264std_decode 1 +#include "vulkan_video_codec_h264std.h" + +#define VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_API_VERSION_1_0_0 VK_MAKE_VIDEO_STD_VERSION(1, 0, 0) + +#define VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_API_VERSION_1_0_0 +#define VK_STD_VULKAN_VIDEO_CODEC_H264_DECODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_h264_decode" +#define STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_LIST_SIZE 2U + +typedef enum StdVideoDecodeH264FieldOrderCount { + STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_TOP = 0, + STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_BOTTOM = 1, + STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_INVALID = 0x7FFFFFFF, + STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_MAX_ENUM = 0x7FFFFFFF +} StdVideoDecodeH264FieldOrderCount; +typedef struct StdVideoDecodeH264PictureInfoFlags { + uint32_t field_pic_flag : 1; + uint32_t is_intra : 1; + uint32_t IdrPicFlag : 1; + uint32_t bottom_field_flag : 1; + uint32_t is_reference : 1; + uint32_t complementary_field_pair : 1; +} StdVideoDecodeH264PictureInfoFlags; + +typedef struct StdVideoDecodeH264PictureInfo { + StdVideoDecodeH264PictureInfoFlags flags; + uint8_t seq_parameter_set_id; + uint8_t pic_parameter_set_id; + uint8_t reserved1; + uint8_t reserved2; + uint16_t frame_num; + uint16_t idr_pic_id; + int32_t PicOrderCnt[STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_LIST_SIZE]; +} StdVideoDecodeH264PictureInfo; + +typedef struct StdVideoDecodeH264ReferenceInfoFlags { + uint32_t top_field_flag : 1; + uint32_t bottom_field_flag : 1; + uint32_t used_for_long_term_reference : 1; + uint32_t is_non_existing : 1; +} StdVideoDecodeH264ReferenceInfoFlags; + +typedef struct StdVideoDecodeH264ReferenceInfo { + StdVideoDecodeH264ReferenceInfoFlags flags; + uint16_t FrameNum; + uint16_t reserved; + int32_t PicOrderCnt[STD_VIDEO_DECODE_H264_FIELD_ORDER_COUNT_LIST_SIZE]; +} StdVideoDecodeH264ReferenceInfo; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std_encode.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std_encode.h new file mode 100644 index 0000000000..2d42ac321b --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h264std_encode.h @@ -0,0 +1,147 @@ +#ifndef VULKAN_VIDEO_CODEC_H264STD_ENCODE_H_ +#define VULKAN_VIDEO_CODEC_H264STD_ENCODE_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_h264std_encode is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_h264std_encode 1 +#include "vulkan_video_codec_h264std.h" + +#define VK_STD_VULKAN_VIDEO_CODEC_H264_ENCODE_API_VERSION_1_0_0 VK_MAKE_VIDEO_STD_VERSION(1, 0, 0) + +#define VK_STD_VULKAN_VIDEO_CODEC_H264_ENCODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H264_ENCODE_API_VERSION_1_0_0 +#define VK_STD_VULKAN_VIDEO_CODEC_H264_ENCODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_h264_encode" +typedef struct StdVideoEncodeH264WeightTableFlags { + uint32_t luma_weight_l0_flag; + uint32_t chroma_weight_l0_flag; + uint32_t luma_weight_l1_flag; + uint32_t chroma_weight_l1_flag; +} StdVideoEncodeH264WeightTableFlags; + +typedef struct StdVideoEncodeH264WeightTable { + StdVideoEncodeH264WeightTableFlags flags; + uint8_t luma_log2_weight_denom; + uint8_t chroma_log2_weight_denom; + int8_t luma_weight_l0[STD_VIDEO_H264_MAX_NUM_LIST_REF]; + int8_t luma_offset_l0[STD_VIDEO_H264_MAX_NUM_LIST_REF]; + int8_t chroma_weight_l0[STD_VIDEO_H264_MAX_NUM_LIST_REF][STD_VIDEO_H264_MAX_CHROMA_PLANES]; + int8_t chroma_offset_l0[STD_VIDEO_H264_MAX_NUM_LIST_REF][STD_VIDEO_H264_MAX_CHROMA_PLANES]; + int8_t luma_weight_l1[STD_VIDEO_H264_MAX_NUM_LIST_REF]; + int8_t luma_offset_l1[STD_VIDEO_H264_MAX_NUM_LIST_REF]; + int8_t chroma_weight_l1[STD_VIDEO_H264_MAX_NUM_LIST_REF][STD_VIDEO_H264_MAX_CHROMA_PLANES]; + int8_t chroma_offset_l1[STD_VIDEO_H264_MAX_NUM_LIST_REF][STD_VIDEO_H264_MAX_CHROMA_PLANES]; +} StdVideoEncodeH264WeightTable; + +typedef struct StdVideoEncodeH264SliceHeaderFlags { + uint32_t direct_spatial_mv_pred_flag : 1; + uint32_t num_ref_idx_active_override_flag : 1; + uint32_t reserved : 30; +} StdVideoEncodeH264SliceHeaderFlags; + +typedef struct StdVideoEncodeH264PictureInfoFlags { + uint32_t IdrPicFlag : 1; + uint32_t is_reference : 1; + uint32_t no_output_of_prior_pics_flag : 1; + uint32_t long_term_reference_flag : 1; + uint32_t adaptive_ref_pic_marking_mode_flag : 1; + uint32_t reserved : 27; +} StdVideoEncodeH264PictureInfoFlags; + +typedef struct StdVideoEncodeH264ReferenceInfoFlags { + uint32_t used_for_long_term_reference : 1; + uint32_t reserved : 31; +} StdVideoEncodeH264ReferenceInfoFlags; + +typedef struct StdVideoEncodeH264ReferenceListsInfoFlags { + uint32_t ref_pic_list_modification_flag_l0 : 1; + uint32_t ref_pic_list_modification_flag_l1 : 1; + uint32_t reserved : 30; +} StdVideoEncodeH264ReferenceListsInfoFlags; + +typedef struct StdVideoEncodeH264RefListModEntry { + StdVideoH264ModificationOfPicNumsIdc modification_of_pic_nums_idc; + uint16_t abs_diff_pic_num_minus1; + uint16_t long_term_pic_num; +} StdVideoEncodeH264RefListModEntry; + +typedef struct StdVideoEncodeH264RefPicMarkingEntry { + StdVideoH264MemMgmtControlOp memory_management_control_operation; + uint16_t difference_of_pic_nums_minus1; + uint16_t long_term_pic_num; + uint16_t long_term_frame_idx; + uint16_t max_long_term_frame_idx_plus1; +} StdVideoEncodeH264RefPicMarkingEntry; + +typedef struct StdVideoEncodeH264ReferenceListsInfo { + StdVideoEncodeH264ReferenceListsInfoFlags flags; + uint8_t num_ref_idx_l0_active_minus1; + uint8_t num_ref_idx_l1_active_minus1; + uint8_t RefPicList0[STD_VIDEO_H264_MAX_NUM_LIST_REF]; + uint8_t RefPicList1[STD_VIDEO_H264_MAX_NUM_LIST_REF]; + uint8_t refList0ModOpCount; + uint8_t refList1ModOpCount; + uint8_t refPicMarkingOpCount; + uint8_t reserved1[7]; + const StdVideoEncodeH264RefListModEntry* pRefList0ModOperations; + const StdVideoEncodeH264RefListModEntry* pRefList1ModOperations; + const StdVideoEncodeH264RefPicMarkingEntry* pRefPicMarkingOperations; +} StdVideoEncodeH264ReferenceListsInfo; + +typedef struct StdVideoEncodeH264PictureInfo { + StdVideoEncodeH264PictureInfoFlags flags; + uint8_t seq_parameter_set_id; + uint8_t pic_parameter_set_id; + uint16_t idr_pic_id; + StdVideoH264PictureType primary_pic_type; + uint32_t frame_num; + int32_t PicOrderCnt; + uint8_t temporal_id; + uint8_t reserved1[3]; + const StdVideoEncodeH264ReferenceListsInfo* pRefLists; +} StdVideoEncodeH264PictureInfo; + +typedef struct StdVideoEncodeH264ReferenceInfo { + StdVideoEncodeH264ReferenceInfoFlags flags; + StdVideoH264PictureType primary_pic_type; + uint32_t FrameNum; + int32_t PicOrderCnt; + uint16_t long_term_pic_num; + uint16_t long_term_frame_idx; + uint8_t temporal_id; +} StdVideoEncodeH264ReferenceInfo; + +typedef struct StdVideoEncodeH264SliceHeader { + StdVideoEncodeH264SliceHeaderFlags flags; + uint32_t first_mb_in_slice; + StdVideoH264SliceType slice_type; + int8_t slice_alpha_c0_offset_div2; + int8_t slice_beta_offset_div2; + int8_t slice_qp_delta; + uint8_t reserved1; + StdVideoH264CabacInitIdc cabac_init_idc; + StdVideoH264DisableDeblockingFilterIdc disable_deblocking_filter_idc; + const StdVideoEncodeH264WeightTable* pWeightTable; +} StdVideoEncodeH264SliceHeader; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std.h new file mode 100644 index 0000000000..23617b8f96 --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std.h @@ -0,0 +1,446 @@ +#ifndef VULKAN_VIDEO_CODEC_H265STD_H_ +#define VULKAN_VIDEO_CODEC_H265STD_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_h265std is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_h265std 1 +#include "vulkan_video_codecs_common.h" +#define STD_VIDEO_H265_CPB_CNT_LIST_SIZE 32U +#define STD_VIDEO_H265_SUBLAYERS_LIST_SIZE 7U +#define STD_VIDEO_H265_SCALING_LIST_4X4_NUM_LISTS 6U +#define STD_VIDEO_H265_SCALING_LIST_4X4_NUM_ELEMENTS 16U +#define STD_VIDEO_H265_SCALING_LIST_8X8_NUM_LISTS 6U +#define STD_VIDEO_H265_SCALING_LIST_8X8_NUM_ELEMENTS 64U +#define STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS 6U +#define STD_VIDEO_H265_SCALING_LIST_16X16_NUM_ELEMENTS 64U +#define STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS 2U +#define STD_VIDEO_H265_SCALING_LIST_32X32_NUM_ELEMENTS 64U +#define STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE 6U +#define STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_COLS_LIST_SIZE 19U +#define STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_ROWS_LIST_SIZE 21U +#define STD_VIDEO_H265_PREDICTOR_PALETTE_COMPONENTS_LIST_SIZE 3U +#define STD_VIDEO_H265_PREDICTOR_PALETTE_COMP_ENTRIES_LIST_SIZE 128U +#define STD_VIDEO_H265_MAX_NUM_LIST_REF 15U +#define STD_VIDEO_H265_MAX_CHROMA_PLANES 2U +#define STD_VIDEO_H265_MAX_SHORT_TERM_REF_PIC_SETS 64U +#define STD_VIDEO_H265_MAX_DPB_SIZE 16U +#define STD_VIDEO_H265_MAX_LONG_TERM_REF_PICS_SPS 32U +#define STD_VIDEO_H265_MAX_LONG_TERM_PICS 16U +#define STD_VIDEO_H265_MAX_DELTA_POC 48U +#define STD_VIDEO_H265_NO_REFERENCE_PICTURE 0xFFU + +typedef enum StdVideoH265ChromaFormatIdc { + STD_VIDEO_H265_CHROMA_FORMAT_IDC_MONOCHROME = 0, + STD_VIDEO_H265_CHROMA_FORMAT_IDC_420 = 1, + STD_VIDEO_H265_CHROMA_FORMAT_IDC_422 = 2, + STD_VIDEO_H265_CHROMA_FORMAT_IDC_444 = 3, + STD_VIDEO_H265_CHROMA_FORMAT_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H265_CHROMA_FORMAT_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH265ChromaFormatIdc; + +typedef enum StdVideoH265ProfileIdc { + STD_VIDEO_H265_PROFILE_IDC_MAIN = 1, + STD_VIDEO_H265_PROFILE_IDC_MAIN_10 = 2, + STD_VIDEO_H265_PROFILE_IDC_MAIN_STILL_PICTURE = 3, + STD_VIDEO_H265_PROFILE_IDC_FORMAT_RANGE_EXTENSIONS = 4, + STD_VIDEO_H265_PROFILE_IDC_SCC_EXTENSIONS = 9, + STD_VIDEO_H265_PROFILE_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H265_PROFILE_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH265ProfileIdc; + +typedef enum StdVideoH265LevelIdc { + STD_VIDEO_H265_LEVEL_IDC_1_0 = 0, + STD_VIDEO_H265_LEVEL_IDC_2_0 = 1, + STD_VIDEO_H265_LEVEL_IDC_2_1 = 2, + STD_VIDEO_H265_LEVEL_IDC_3_0 = 3, + STD_VIDEO_H265_LEVEL_IDC_3_1 = 4, + STD_VIDEO_H265_LEVEL_IDC_4_0 = 5, + STD_VIDEO_H265_LEVEL_IDC_4_1 = 6, + STD_VIDEO_H265_LEVEL_IDC_5_0 = 7, + STD_VIDEO_H265_LEVEL_IDC_5_1 = 8, + STD_VIDEO_H265_LEVEL_IDC_5_2 = 9, + STD_VIDEO_H265_LEVEL_IDC_6_0 = 10, + STD_VIDEO_H265_LEVEL_IDC_6_1 = 11, + STD_VIDEO_H265_LEVEL_IDC_6_2 = 12, + STD_VIDEO_H265_LEVEL_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H265_LEVEL_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH265LevelIdc; + +typedef enum StdVideoH265SliceType { + STD_VIDEO_H265_SLICE_TYPE_B = 0, + STD_VIDEO_H265_SLICE_TYPE_P = 1, + STD_VIDEO_H265_SLICE_TYPE_I = 2, + STD_VIDEO_H265_SLICE_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_H265_SLICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoH265SliceType; + +typedef enum StdVideoH265PictureType { + STD_VIDEO_H265_PICTURE_TYPE_P = 0, + STD_VIDEO_H265_PICTURE_TYPE_B = 1, + STD_VIDEO_H265_PICTURE_TYPE_I = 2, + STD_VIDEO_H265_PICTURE_TYPE_IDR = 3, + STD_VIDEO_H265_PICTURE_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_H265_PICTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoH265PictureType; + +typedef enum StdVideoH265AspectRatioIdc { + STD_VIDEO_H265_ASPECT_RATIO_IDC_UNSPECIFIED = 0, + STD_VIDEO_H265_ASPECT_RATIO_IDC_SQUARE = 1, + STD_VIDEO_H265_ASPECT_RATIO_IDC_12_11 = 2, + STD_VIDEO_H265_ASPECT_RATIO_IDC_10_11 = 3, + STD_VIDEO_H265_ASPECT_RATIO_IDC_16_11 = 4, + STD_VIDEO_H265_ASPECT_RATIO_IDC_40_33 = 5, + STD_VIDEO_H265_ASPECT_RATIO_IDC_24_11 = 6, + STD_VIDEO_H265_ASPECT_RATIO_IDC_20_11 = 7, + STD_VIDEO_H265_ASPECT_RATIO_IDC_32_11 = 8, + STD_VIDEO_H265_ASPECT_RATIO_IDC_80_33 = 9, + STD_VIDEO_H265_ASPECT_RATIO_IDC_18_11 = 10, + STD_VIDEO_H265_ASPECT_RATIO_IDC_15_11 = 11, + STD_VIDEO_H265_ASPECT_RATIO_IDC_64_33 = 12, + STD_VIDEO_H265_ASPECT_RATIO_IDC_160_99 = 13, + STD_VIDEO_H265_ASPECT_RATIO_IDC_4_3 = 14, + STD_VIDEO_H265_ASPECT_RATIO_IDC_3_2 = 15, + STD_VIDEO_H265_ASPECT_RATIO_IDC_2_1 = 16, + STD_VIDEO_H265_ASPECT_RATIO_IDC_EXTENDED_SAR = 255, + STD_VIDEO_H265_ASPECT_RATIO_IDC_INVALID = 0x7FFFFFFF, + STD_VIDEO_H265_ASPECT_RATIO_IDC_MAX_ENUM = 0x7FFFFFFF +} StdVideoH265AspectRatioIdc; +typedef struct StdVideoH265DecPicBufMgr { + uint32_t max_latency_increase_plus1[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE]; + uint8_t max_dec_pic_buffering_minus1[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE]; + uint8_t max_num_reorder_pics[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE]; +} StdVideoH265DecPicBufMgr; + +typedef struct StdVideoH265SubLayerHrdParameters { + uint32_t bit_rate_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE]; + uint32_t cpb_size_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE]; + uint32_t cpb_size_du_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE]; + uint32_t bit_rate_du_value_minus1[STD_VIDEO_H265_CPB_CNT_LIST_SIZE]; + uint32_t cbr_flag; +} StdVideoH265SubLayerHrdParameters; + +typedef struct StdVideoH265HrdFlags { + uint32_t nal_hrd_parameters_present_flag : 1; + uint32_t vcl_hrd_parameters_present_flag : 1; + uint32_t sub_pic_hrd_params_present_flag : 1; + uint32_t sub_pic_cpb_params_in_pic_timing_sei_flag : 1; + uint32_t fixed_pic_rate_general_flag : 8; + uint32_t fixed_pic_rate_within_cvs_flag : 8; + uint32_t low_delay_hrd_flag : 8; +} StdVideoH265HrdFlags; + +typedef struct StdVideoH265HrdParameters { + StdVideoH265HrdFlags flags; + uint8_t tick_divisor_minus2; + uint8_t du_cpb_removal_delay_increment_length_minus1; + uint8_t dpb_output_delay_du_length_minus1; + uint8_t bit_rate_scale; + uint8_t cpb_size_scale; + uint8_t cpb_size_du_scale; + uint8_t initial_cpb_removal_delay_length_minus1; + uint8_t au_cpb_removal_delay_length_minus1; + uint8_t dpb_output_delay_length_minus1; + uint8_t cpb_cnt_minus1[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE]; + uint16_t elemental_duration_in_tc_minus1[STD_VIDEO_H265_SUBLAYERS_LIST_SIZE]; + uint16_t reserved[3]; + const StdVideoH265SubLayerHrdParameters* pSubLayerHrdParametersNal; + const StdVideoH265SubLayerHrdParameters* pSubLayerHrdParametersVcl; +} StdVideoH265HrdParameters; + +typedef struct StdVideoH265VpsFlags { + uint32_t vps_temporal_id_nesting_flag : 1; + uint32_t vps_sub_layer_ordering_info_present_flag : 1; + uint32_t vps_timing_info_present_flag : 1; + uint32_t vps_poc_proportional_to_timing_flag : 1; +} StdVideoH265VpsFlags; + +typedef struct StdVideoH265ProfileTierLevelFlags { + uint32_t general_tier_flag : 1; + uint32_t general_progressive_source_flag : 1; + uint32_t general_interlaced_source_flag : 1; + uint32_t general_non_packed_constraint_flag : 1; + uint32_t general_frame_only_constraint_flag : 1; +} StdVideoH265ProfileTierLevelFlags; + +typedef struct StdVideoH265ProfileTierLevel { + StdVideoH265ProfileTierLevelFlags flags; + StdVideoH265ProfileIdc general_profile_idc; + StdVideoH265LevelIdc general_level_idc; +} StdVideoH265ProfileTierLevel; + +typedef struct StdVideoH265VideoParameterSet { + StdVideoH265VpsFlags flags; + uint8_t vps_video_parameter_set_id; + uint8_t vps_max_sub_layers_minus1; + uint8_t reserved1; + uint8_t reserved2; + uint32_t vps_num_units_in_tick; + uint32_t vps_time_scale; + uint32_t vps_num_ticks_poc_diff_one_minus1; + uint32_t reserved3; + const StdVideoH265DecPicBufMgr* pDecPicBufMgr; + const StdVideoH265HrdParameters* pHrdParameters; + const StdVideoH265ProfileTierLevel* pProfileTierLevel; +} StdVideoH265VideoParameterSet; + +typedef struct StdVideoH265ScalingLists { + uint8_t ScalingList4x4[STD_VIDEO_H265_SCALING_LIST_4X4_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_4X4_NUM_ELEMENTS]; + uint8_t ScalingList8x8[STD_VIDEO_H265_SCALING_LIST_8X8_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_8X8_NUM_ELEMENTS]; + uint8_t ScalingList16x16[STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_16X16_NUM_ELEMENTS]; + uint8_t ScalingList32x32[STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS][STD_VIDEO_H265_SCALING_LIST_32X32_NUM_ELEMENTS]; + uint8_t ScalingListDCCoef16x16[STD_VIDEO_H265_SCALING_LIST_16X16_NUM_LISTS]; + uint8_t ScalingListDCCoef32x32[STD_VIDEO_H265_SCALING_LIST_32X32_NUM_LISTS]; +} StdVideoH265ScalingLists; + +typedef struct StdVideoH265SpsVuiFlags { + uint32_t aspect_ratio_info_present_flag : 1; + uint32_t overscan_info_present_flag : 1; + uint32_t overscan_appropriate_flag : 1; + uint32_t video_signal_type_present_flag : 1; + uint32_t video_full_range_flag : 1; + uint32_t colour_description_present_flag : 1; + uint32_t chroma_loc_info_present_flag : 1; + uint32_t neutral_chroma_indication_flag : 1; + uint32_t field_seq_flag : 1; + uint32_t frame_field_info_present_flag : 1; + uint32_t default_display_window_flag : 1; + uint32_t vui_timing_info_present_flag : 1; + uint32_t vui_poc_proportional_to_timing_flag : 1; + uint32_t vui_hrd_parameters_present_flag : 1; + uint32_t bitstream_restriction_flag : 1; + uint32_t tiles_fixed_structure_flag : 1; + uint32_t motion_vectors_over_pic_boundaries_flag : 1; + uint32_t restricted_ref_pic_lists_flag : 1; +} StdVideoH265SpsVuiFlags; + +typedef struct StdVideoH265SequenceParameterSetVui { + StdVideoH265SpsVuiFlags flags; + StdVideoH265AspectRatioIdc aspect_ratio_idc; + uint16_t sar_width; + uint16_t sar_height; + uint8_t video_format; + uint8_t colour_primaries; + uint8_t transfer_characteristics; + uint8_t matrix_coeffs; + uint8_t chroma_sample_loc_type_top_field; + uint8_t chroma_sample_loc_type_bottom_field; + uint8_t reserved1; + uint8_t reserved2; + uint16_t def_disp_win_left_offset; + uint16_t def_disp_win_right_offset; + uint16_t def_disp_win_top_offset; + uint16_t def_disp_win_bottom_offset; + uint32_t vui_num_units_in_tick; + uint32_t vui_time_scale; + uint32_t vui_num_ticks_poc_diff_one_minus1; + uint16_t min_spatial_segmentation_idc; + uint16_t reserved3; + uint8_t max_bytes_per_pic_denom; + uint8_t max_bits_per_min_cu_denom; + uint8_t log2_max_mv_length_horizontal; + uint8_t log2_max_mv_length_vertical; + const StdVideoH265HrdParameters* pHrdParameters; +} StdVideoH265SequenceParameterSetVui; + +typedef struct StdVideoH265PredictorPaletteEntries { + uint16_t PredictorPaletteEntries[STD_VIDEO_H265_PREDICTOR_PALETTE_COMPONENTS_LIST_SIZE][STD_VIDEO_H265_PREDICTOR_PALETTE_COMP_ENTRIES_LIST_SIZE]; +} StdVideoH265PredictorPaletteEntries; + +typedef struct StdVideoH265SpsFlags { + uint32_t sps_temporal_id_nesting_flag : 1; + uint32_t separate_colour_plane_flag : 1; + uint32_t conformance_window_flag : 1; + uint32_t sps_sub_layer_ordering_info_present_flag : 1; + uint32_t scaling_list_enabled_flag : 1; + uint32_t sps_scaling_list_data_present_flag : 1; + uint32_t amp_enabled_flag : 1; + uint32_t sample_adaptive_offset_enabled_flag : 1; + uint32_t pcm_enabled_flag : 1; + uint32_t pcm_loop_filter_disabled_flag : 1; + uint32_t long_term_ref_pics_present_flag : 1; + uint32_t sps_temporal_mvp_enabled_flag : 1; + uint32_t strong_intra_smoothing_enabled_flag : 1; + uint32_t vui_parameters_present_flag : 1; + uint32_t sps_extension_present_flag : 1; + uint32_t sps_range_extension_flag : 1; + uint32_t transform_skip_rotation_enabled_flag : 1; + uint32_t transform_skip_context_enabled_flag : 1; + uint32_t implicit_rdpcm_enabled_flag : 1; + uint32_t explicit_rdpcm_enabled_flag : 1; + uint32_t extended_precision_processing_flag : 1; + uint32_t intra_smoothing_disabled_flag : 1; + uint32_t high_precision_offsets_enabled_flag : 1; + uint32_t persistent_rice_adaptation_enabled_flag : 1; + uint32_t cabac_bypass_alignment_enabled_flag : 1; + uint32_t sps_scc_extension_flag : 1; + uint32_t sps_curr_pic_ref_enabled_flag : 1; + uint32_t palette_mode_enabled_flag : 1; + uint32_t sps_palette_predictor_initializers_present_flag : 1; + uint32_t intra_boundary_filtering_disabled_flag : 1; +} StdVideoH265SpsFlags; + +typedef struct StdVideoH265ShortTermRefPicSetFlags { + uint32_t inter_ref_pic_set_prediction_flag : 1; + uint32_t delta_rps_sign : 1; +} StdVideoH265ShortTermRefPicSetFlags; + +typedef struct StdVideoH265ShortTermRefPicSet { + StdVideoH265ShortTermRefPicSetFlags flags; + uint32_t delta_idx_minus1; + uint16_t use_delta_flag; + uint16_t abs_delta_rps_minus1; + uint16_t used_by_curr_pic_flag; + uint16_t used_by_curr_pic_s0_flag; + uint16_t used_by_curr_pic_s1_flag; + uint16_t reserved1; + uint8_t reserved2; + uint8_t reserved3; + uint8_t num_negative_pics; + uint8_t num_positive_pics; + uint16_t delta_poc_s0_minus1[STD_VIDEO_H265_MAX_DPB_SIZE]; + uint16_t delta_poc_s1_minus1[STD_VIDEO_H265_MAX_DPB_SIZE]; +} StdVideoH265ShortTermRefPicSet; + +typedef struct StdVideoH265LongTermRefPicsSps { + uint32_t used_by_curr_pic_lt_sps_flag; + uint32_t lt_ref_pic_poc_lsb_sps[STD_VIDEO_H265_MAX_LONG_TERM_REF_PICS_SPS]; +} StdVideoH265LongTermRefPicsSps; + +typedef struct StdVideoH265SequenceParameterSet { + StdVideoH265SpsFlags flags; + StdVideoH265ChromaFormatIdc chroma_format_idc; + uint32_t pic_width_in_luma_samples; + uint32_t pic_height_in_luma_samples; + uint8_t sps_video_parameter_set_id; + uint8_t sps_max_sub_layers_minus1; + uint8_t sps_seq_parameter_set_id; + uint8_t bit_depth_luma_minus8; + uint8_t bit_depth_chroma_minus8; + uint8_t log2_max_pic_order_cnt_lsb_minus4; + uint8_t log2_min_luma_coding_block_size_minus3; + uint8_t log2_diff_max_min_luma_coding_block_size; + uint8_t log2_min_luma_transform_block_size_minus2; + uint8_t log2_diff_max_min_luma_transform_block_size; + uint8_t max_transform_hierarchy_depth_inter; + uint8_t max_transform_hierarchy_depth_intra; + uint8_t num_short_term_ref_pic_sets; + uint8_t num_long_term_ref_pics_sps; + uint8_t pcm_sample_bit_depth_luma_minus1; + uint8_t pcm_sample_bit_depth_chroma_minus1; + uint8_t log2_min_pcm_luma_coding_block_size_minus3; + uint8_t log2_diff_max_min_pcm_luma_coding_block_size; + uint8_t reserved1; + uint8_t reserved2; + uint8_t palette_max_size; + uint8_t delta_palette_max_predictor_size; + uint8_t motion_vector_resolution_control_idc; + uint8_t sps_num_palette_predictor_initializers_minus1; + uint32_t conf_win_left_offset; + uint32_t conf_win_right_offset; + uint32_t conf_win_top_offset; + uint32_t conf_win_bottom_offset; + const StdVideoH265ProfileTierLevel* pProfileTierLevel; + const StdVideoH265DecPicBufMgr* pDecPicBufMgr; + const StdVideoH265ScalingLists* pScalingLists; + const StdVideoH265ShortTermRefPicSet* pShortTermRefPicSet; + const StdVideoH265LongTermRefPicsSps* pLongTermRefPicsSps; + const StdVideoH265SequenceParameterSetVui* pSequenceParameterSetVui; + const StdVideoH265PredictorPaletteEntries* pPredictorPaletteEntries; +} StdVideoH265SequenceParameterSet; + +typedef struct StdVideoH265PpsFlags { + uint32_t dependent_slice_segments_enabled_flag : 1; + uint32_t output_flag_present_flag : 1; + uint32_t sign_data_hiding_enabled_flag : 1; + uint32_t cabac_init_present_flag : 1; + uint32_t constrained_intra_pred_flag : 1; + uint32_t transform_skip_enabled_flag : 1; + uint32_t cu_qp_delta_enabled_flag : 1; + uint32_t pps_slice_chroma_qp_offsets_present_flag : 1; + uint32_t weighted_pred_flag : 1; + uint32_t weighted_bipred_flag : 1; + uint32_t transquant_bypass_enabled_flag : 1; + uint32_t tiles_enabled_flag : 1; + uint32_t entropy_coding_sync_enabled_flag : 1; + uint32_t uniform_spacing_flag : 1; + uint32_t loop_filter_across_tiles_enabled_flag : 1; + uint32_t pps_loop_filter_across_slices_enabled_flag : 1; + uint32_t deblocking_filter_control_present_flag : 1; + uint32_t deblocking_filter_override_enabled_flag : 1; + uint32_t pps_deblocking_filter_disabled_flag : 1; + uint32_t pps_scaling_list_data_present_flag : 1; + uint32_t lists_modification_present_flag : 1; + uint32_t slice_segment_header_extension_present_flag : 1; + uint32_t pps_extension_present_flag : 1; + uint32_t cross_component_prediction_enabled_flag : 1; + uint32_t chroma_qp_offset_list_enabled_flag : 1; + uint32_t pps_curr_pic_ref_enabled_flag : 1; + uint32_t residual_adaptive_colour_transform_enabled_flag : 1; + uint32_t pps_slice_act_qp_offsets_present_flag : 1; + uint32_t pps_palette_predictor_initializers_present_flag : 1; + uint32_t monochrome_palette_flag : 1; + uint32_t pps_range_extension_flag : 1; +} StdVideoH265PpsFlags; + +typedef struct StdVideoH265PictureParameterSet { + StdVideoH265PpsFlags flags; + uint8_t pps_pic_parameter_set_id; + uint8_t pps_seq_parameter_set_id; + uint8_t sps_video_parameter_set_id; + uint8_t num_extra_slice_header_bits; + uint8_t num_ref_idx_l0_default_active_minus1; + uint8_t num_ref_idx_l1_default_active_minus1; + int8_t init_qp_minus26; + uint8_t diff_cu_qp_delta_depth; + int8_t pps_cb_qp_offset; + int8_t pps_cr_qp_offset; + int8_t pps_beta_offset_div2; + int8_t pps_tc_offset_div2; + uint8_t log2_parallel_merge_level_minus2; + uint8_t log2_max_transform_skip_block_size_minus2; + uint8_t diff_cu_chroma_qp_offset_depth; + uint8_t chroma_qp_offset_list_len_minus1; + int8_t cb_qp_offset_list[STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE]; + int8_t cr_qp_offset_list[STD_VIDEO_H265_CHROMA_QP_OFFSET_LIST_SIZE]; + uint8_t log2_sao_offset_scale_luma; + uint8_t log2_sao_offset_scale_chroma; + int8_t pps_act_y_qp_offset_plus5; + int8_t pps_act_cb_qp_offset_plus5; + int8_t pps_act_cr_qp_offset_plus3; + uint8_t pps_num_palette_predictor_initializers; + uint8_t luma_bit_depth_entry_minus8; + uint8_t chroma_bit_depth_entry_minus8; + uint8_t num_tile_columns_minus1; + uint8_t num_tile_rows_minus1; + uint8_t reserved1; + uint8_t reserved2; + uint16_t column_width_minus1[STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_COLS_LIST_SIZE]; + uint16_t row_height_minus1[STD_VIDEO_H265_CHROMA_QP_OFFSET_TILE_ROWS_LIST_SIZE]; + uint32_t reserved3; + const StdVideoH265ScalingLists* pScalingLists; + const StdVideoH265PredictorPaletteEntries* pPredictorPaletteEntries; +} StdVideoH265PictureParameterSet; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std_decode.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std_decode.h new file mode 100644 index 0000000000..1758d4a88d --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std_decode.h @@ -0,0 +1,67 @@ +#ifndef VULKAN_VIDEO_CODEC_H265STD_DECODE_H_ +#define VULKAN_VIDEO_CODEC_H265STD_DECODE_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_h265std_decode is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_h265std_decode 1 +#include "vulkan_video_codec_h265std.h" + +#define VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_API_VERSION_1_0_0 VK_MAKE_VIDEO_STD_VERSION(1, 0, 0) + +#define VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_API_VERSION_1_0_0 +#define VK_STD_VULKAN_VIDEO_CODEC_H265_DECODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_h265_decode" +#define STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE 8U +typedef struct StdVideoDecodeH265PictureInfoFlags { + uint32_t IrapPicFlag : 1; + uint32_t IdrPicFlag : 1; + uint32_t IsReference : 1; + uint32_t short_term_ref_pic_set_sps_flag : 1; +} StdVideoDecodeH265PictureInfoFlags; + +typedef struct StdVideoDecodeH265PictureInfo { + StdVideoDecodeH265PictureInfoFlags flags; + uint8_t sps_video_parameter_set_id; + uint8_t pps_seq_parameter_set_id; + uint8_t pps_pic_parameter_set_id; + uint8_t NumDeltaPocsOfRefRpsIdx; + int32_t PicOrderCntVal; + uint16_t NumBitsForSTRefPicSetInSlice; + uint16_t reserved; + uint8_t RefPicSetStCurrBefore[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE]; + uint8_t RefPicSetStCurrAfter[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE]; + uint8_t RefPicSetLtCurr[STD_VIDEO_DECODE_H265_REF_PIC_SET_LIST_SIZE]; +} StdVideoDecodeH265PictureInfo; + +typedef struct StdVideoDecodeH265ReferenceInfoFlags { + uint32_t used_for_long_term_reference : 1; + uint32_t unused_for_reference : 1; +} StdVideoDecodeH265ReferenceInfoFlags; + +typedef struct StdVideoDecodeH265ReferenceInfo { + StdVideoDecodeH265ReferenceInfoFlags flags; + int32_t PicOrderCntVal; +} StdVideoDecodeH265ReferenceInfo; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std_encode.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std_encode.h new file mode 100644 index 0000000000..e584a7262b --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_h265std_encode.h @@ -0,0 +1,157 @@ +#ifndef VULKAN_VIDEO_CODEC_H265STD_ENCODE_H_ +#define VULKAN_VIDEO_CODEC_H265STD_ENCODE_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_h265std_encode is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_h265std_encode 1 +#include "vulkan_video_codec_h265std.h" + +#define VK_STD_VULKAN_VIDEO_CODEC_H265_ENCODE_API_VERSION_1_0_0 VK_MAKE_VIDEO_STD_VERSION(1, 0, 0) + +#define VK_STD_VULKAN_VIDEO_CODEC_H265_ENCODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_H265_ENCODE_API_VERSION_1_0_0 +#define VK_STD_VULKAN_VIDEO_CODEC_H265_ENCODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_h265_encode" +typedef struct StdVideoEncodeH265WeightTableFlags { + uint16_t luma_weight_l0_flag; + uint16_t chroma_weight_l0_flag; + uint16_t luma_weight_l1_flag; + uint16_t chroma_weight_l1_flag; +} StdVideoEncodeH265WeightTableFlags; + +typedef struct StdVideoEncodeH265WeightTable { + StdVideoEncodeH265WeightTableFlags flags; + uint8_t luma_log2_weight_denom; + int8_t delta_chroma_log2_weight_denom; + int8_t delta_luma_weight_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF]; + int8_t luma_offset_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF]; + int8_t delta_chroma_weight_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF][STD_VIDEO_H265_MAX_CHROMA_PLANES]; + int8_t delta_chroma_offset_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF][STD_VIDEO_H265_MAX_CHROMA_PLANES]; + int8_t delta_luma_weight_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF]; + int8_t luma_offset_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF]; + int8_t delta_chroma_weight_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF][STD_VIDEO_H265_MAX_CHROMA_PLANES]; + int8_t delta_chroma_offset_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF][STD_VIDEO_H265_MAX_CHROMA_PLANES]; +} StdVideoEncodeH265WeightTable; + +typedef struct StdVideoEncodeH265SliceSegmentHeaderFlags { + uint32_t first_slice_segment_in_pic_flag : 1; + uint32_t dependent_slice_segment_flag : 1; + uint32_t slice_sao_luma_flag : 1; + uint32_t slice_sao_chroma_flag : 1; + uint32_t num_ref_idx_active_override_flag : 1; + uint32_t mvd_l1_zero_flag : 1; + uint32_t cabac_init_flag : 1; + uint32_t cu_chroma_qp_offset_enabled_flag : 1; + uint32_t deblocking_filter_override_flag : 1; + uint32_t slice_deblocking_filter_disabled_flag : 1; + uint32_t collocated_from_l0_flag : 1; + uint32_t slice_loop_filter_across_slices_enabled_flag : 1; + uint32_t reserved : 20; +} StdVideoEncodeH265SliceSegmentHeaderFlags; + +typedef struct StdVideoEncodeH265SliceSegmentHeader { + StdVideoEncodeH265SliceSegmentHeaderFlags flags; + StdVideoH265SliceType slice_type; + uint32_t slice_segment_address; + uint8_t collocated_ref_idx; + uint8_t MaxNumMergeCand; + int8_t slice_cb_qp_offset; + int8_t slice_cr_qp_offset; + int8_t slice_beta_offset_div2; + int8_t slice_tc_offset_div2; + int8_t slice_act_y_qp_offset; + int8_t slice_act_cb_qp_offset; + int8_t slice_act_cr_qp_offset; + int8_t slice_qp_delta; + uint16_t reserved1; + const StdVideoEncodeH265WeightTable* pWeightTable; +} StdVideoEncodeH265SliceSegmentHeader; + +typedef struct StdVideoEncodeH265ReferenceListsInfoFlags { + uint32_t ref_pic_list_modification_flag_l0 : 1; + uint32_t ref_pic_list_modification_flag_l1 : 1; + uint32_t reserved : 30; +} StdVideoEncodeH265ReferenceListsInfoFlags; + +typedef struct StdVideoEncodeH265ReferenceListsInfo { + StdVideoEncodeH265ReferenceListsInfoFlags flags; + uint8_t num_ref_idx_l0_active_minus1; + uint8_t num_ref_idx_l1_active_minus1; + uint8_t RefPicList0[STD_VIDEO_H265_MAX_NUM_LIST_REF]; + uint8_t RefPicList1[STD_VIDEO_H265_MAX_NUM_LIST_REF]; + uint8_t list_entry_l0[STD_VIDEO_H265_MAX_NUM_LIST_REF]; + uint8_t list_entry_l1[STD_VIDEO_H265_MAX_NUM_LIST_REF]; +} StdVideoEncodeH265ReferenceListsInfo; + +typedef struct StdVideoEncodeH265PictureInfoFlags { + uint32_t is_reference : 1; + uint32_t IrapPicFlag : 1; + uint32_t used_for_long_term_reference : 1; + uint32_t discardable_flag : 1; + uint32_t cross_layer_bla_flag : 1; + uint32_t pic_output_flag : 1; + uint32_t no_output_of_prior_pics_flag : 1; + uint32_t short_term_ref_pic_set_sps_flag : 1; + uint32_t slice_temporal_mvp_enabled_flag : 1; + uint32_t reserved : 23; +} StdVideoEncodeH265PictureInfoFlags; + +typedef struct StdVideoEncodeH265LongTermRefPics { + uint8_t num_long_term_sps; + uint8_t num_long_term_pics; + uint8_t lt_idx_sps[STD_VIDEO_H265_MAX_LONG_TERM_REF_PICS_SPS]; + uint8_t poc_lsb_lt[STD_VIDEO_H265_MAX_LONG_TERM_PICS]; + uint16_t used_by_curr_pic_lt_flag; + uint8_t delta_poc_msb_present_flag[STD_VIDEO_H265_MAX_DELTA_POC]; + uint8_t delta_poc_msb_cycle_lt[STD_VIDEO_H265_MAX_DELTA_POC]; +} StdVideoEncodeH265LongTermRefPics; + +typedef struct StdVideoEncodeH265PictureInfo { + StdVideoEncodeH265PictureInfoFlags flags; + StdVideoH265PictureType pic_type; + uint8_t sps_video_parameter_set_id; + uint8_t pps_seq_parameter_set_id; + uint8_t pps_pic_parameter_set_id; + uint8_t short_term_ref_pic_set_idx; + int32_t PicOrderCntVal; + uint8_t TemporalId; + uint8_t reserved1[7]; + const StdVideoEncodeH265ReferenceListsInfo* pRefLists; + const StdVideoH265ShortTermRefPicSet* pShortTermRefPicSet; + const StdVideoEncodeH265LongTermRefPics* pLongTermRefPics; +} StdVideoEncodeH265PictureInfo; + +typedef struct StdVideoEncodeH265ReferenceInfoFlags { + uint32_t used_for_long_term_reference : 1; + uint32_t unused_for_reference : 1; + uint32_t reserved : 30; +} StdVideoEncodeH265ReferenceInfoFlags; + +typedef struct StdVideoEncodeH265ReferenceInfo { + StdVideoEncodeH265ReferenceInfoFlags flags; + StdVideoH265PictureType pic_type; + int32_t PicOrderCntVal; + uint8_t TemporalId; +} StdVideoEncodeH265ReferenceInfo; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_vp9std.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_vp9std.h new file mode 100644 index 0000000000..3c62f287e7 --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_vp9std.h @@ -0,0 +1,151 @@ +#ifndef VULKAN_VIDEO_CODEC_VP9STD_H_ +#define VULKAN_VIDEO_CODEC_VP9STD_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_vp9std is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_vp9std 1 +#include "vulkan_video_codecs_common.h" +#define STD_VIDEO_VP9_NUM_REF_FRAMES 8U +#define STD_VIDEO_VP9_REFS_PER_FRAME 3U +#define STD_VIDEO_VP9_MAX_REF_FRAMES 4U +#define STD_VIDEO_VP9_LOOP_FILTER_ADJUSTMENTS 2U +#define STD_VIDEO_VP9_MAX_SEGMENTS 8U +#define STD_VIDEO_VP9_SEG_LVL_MAX 4U +#define STD_VIDEO_VP9_MAX_SEGMENTATION_TREE_PROBS 7U +#define STD_VIDEO_VP9_MAX_SEGMENTATION_PRED_PROB 3U + +typedef enum StdVideoVP9Profile { + STD_VIDEO_VP9_PROFILE_0 = 0, + STD_VIDEO_VP9_PROFILE_1 = 1, + STD_VIDEO_VP9_PROFILE_2 = 2, + STD_VIDEO_VP9_PROFILE_3 = 3, + STD_VIDEO_VP9_PROFILE_INVALID = 0x7FFFFFFF, + STD_VIDEO_VP9_PROFILE_MAX_ENUM = 0x7FFFFFFF +} StdVideoVP9Profile; + +typedef enum StdVideoVP9Level { + STD_VIDEO_VP9_LEVEL_1_0 = 0, + STD_VIDEO_VP9_LEVEL_1_1 = 1, + STD_VIDEO_VP9_LEVEL_2_0 = 2, + STD_VIDEO_VP9_LEVEL_2_1 = 3, + STD_VIDEO_VP9_LEVEL_3_0 = 4, + STD_VIDEO_VP9_LEVEL_3_1 = 5, + STD_VIDEO_VP9_LEVEL_4_0 = 6, + STD_VIDEO_VP9_LEVEL_4_1 = 7, + STD_VIDEO_VP9_LEVEL_5_0 = 8, + STD_VIDEO_VP9_LEVEL_5_1 = 9, + STD_VIDEO_VP9_LEVEL_5_2 = 10, + STD_VIDEO_VP9_LEVEL_6_0 = 11, + STD_VIDEO_VP9_LEVEL_6_1 = 12, + STD_VIDEO_VP9_LEVEL_6_2 = 13, + STD_VIDEO_VP9_LEVEL_INVALID = 0x7FFFFFFF, + STD_VIDEO_VP9_LEVEL_MAX_ENUM = 0x7FFFFFFF +} StdVideoVP9Level; + +typedef enum StdVideoVP9FrameType { + STD_VIDEO_VP9_FRAME_TYPE_KEY = 0, + STD_VIDEO_VP9_FRAME_TYPE_NON_KEY = 1, + STD_VIDEO_VP9_FRAME_TYPE_INVALID = 0x7FFFFFFF, + STD_VIDEO_VP9_FRAME_TYPE_MAX_ENUM = 0x7FFFFFFF +} StdVideoVP9FrameType; + +typedef enum StdVideoVP9ReferenceName { + STD_VIDEO_VP9_REFERENCE_NAME_INTRA_FRAME = 0, + STD_VIDEO_VP9_REFERENCE_NAME_LAST_FRAME = 1, + STD_VIDEO_VP9_REFERENCE_NAME_GOLDEN_FRAME = 2, + STD_VIDEO_VP9_REFERENCE_NAME_ALTREF_FRAME = 3, + STD_VIDEO_VP9_REFERENCE_NAME_INVALID = 0x7FFFFFFF, + STD_VIDEO_VP9_REFERENCE_NAME_MAX_ENUM = 0x7FFFFFFF +} StdVideoVP9ReferenceName; + +typedef enum StdVideoVP9InterpolationFilter { + STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP = 0, + STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1, + STD_VIDEO_VP9_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2, + STD_VIDEO_VP9_INTERPOLATION_FILTER_BILINEAR = 3, + STD_VIDEO_VP9_INTERPOLATION_FILTER_SWITCHABLE = 4, + STD_VIDEO_VP9_INTERPOLATION_FILTER_INVALID = 0x7FFFFFFF, + STD_VIDEO_VP9_INTERPOLATION_FILTER_MAX_ENUM = 0x7FFFFFFF +} StdVideoVP9InterpolationFilter; + +typedef enum StdVideoVP9ColorSpace { + STD_VIDEO_VP9_COLOR_SPACE_UNKNOWN = 0, + STD_VIDEO_VP9_COLOR_SPACE_BT_601 = 1, + STD_VIDEO_VP9_COLOR_SPACE_BT_709 = 2, + STD_VIDEO_VP9_COLOR_SPACE_SMPTE_170 = 3, + STD_VIDEO_VP9_COLOR_SPACE_SMPTE_240 = 4, + STD_VIDEO_VP9_COLOR_SPACE_BT_2020 = 5, + STD_VIDEO_VP9_COLOR_SPACE_RESERVED = 6, + STD_VIDEO_VP9_COLOR_SPACE_RGB = 7, + STD_VIDEO_VP9_COLOR_SPACE_INVALID = 0x7FFFFFFF, + STD_VIDEO_VP9_COLOR_SPACE_MAX_ENUM = 0x7FFFFFFF +} StdVideoVP9ColorSpace; +typedef struct StdVideoVP9ColorConfigFlags { + uint32_t color_range : 1; + uint32_t reserved : 31; +} StdVideoVP9ColorConfigFlags; + +typedef struct StdVideoVP9ColorConfig { + StdVideoVP9ColorConfigFlags flags; + uint8_t BitDepth; + uint8_t subsampling_x; + uint8_t subsampling_y; + uint8_t reserved1; + StdVideoVP9ColorSpace color_space; +} StdVideoVP9ColorConfig; + +typedef struct StdVideoVP9LoopFilterFlags { + uint32_t loop_filter_delta_enabled : 1; + uint32_t loop_filter_delta_update : 1; + uint32_t reserved : 30; +} StdVideoVP9LoopFilterFlags; + +typedef struct StdVideoVP9LoopFilter { + StdVideoVP9LoopFilterFlags flags; + uint8_t loop_filter_level; + uint8_t loop_filter_sharpness; + uint8_t update_ref_delta; + int8_t loop_filter_ref_deltas[STD_VIDEO_VP9_MAX_REF_FRAMES]; + uint8_t update_mode_delta; + int8_t loop_filter_mode_deltas[STD_VIDEO_VP9_LOOP_FILTER_ADJUSTMENTS]; +} StdVideoVP9LoopFilter; + +typedef struct StdVideoVP9SegmentationFlags { + uint32_t segmentation_update_map : 1; + uint32_t segmentation_temporal_update : 1; + uint32_t segmentation_update_data : 1; + uint32_t segmentation_abs_or_delta_update : 1; + uint32_t reserved : 28; +} StdVideoVP9SegmentationFlags; + +typedef struct StdVideoVP9Segmentation { + StdVideoVP9SegmentationFlags flags; + uint8_t segmentation_tree_probs[STD_VIDEO_VP9_MAX_SEGMENTATION_TREE_PROBS]; + uint8_t segmentation_pred_prob[STD_VIDEO_VP9_MAX_SEGMENTATION_PRED_PROB]; + uint8_t FeatureEnabled[STD_VIDEO_VP9_MAX_SEGMENTS]; + int16_t FeatureData[STD_VIDEO_VP9_MAX_SEGMENTS][STD_VIDEO_VP9_SEG_LVL_MAX]; +} StdVideoVP9Segmentation; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_vp9std_decode.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_vp9std_decode.h new file mode 100644 index 0000000000..ac7fa7be8c --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codec_vp9std_decode.h @@ -0,0 +1,68 @@ +#ifndef VULKAN_VIDEO_CODEC_VP9STD_DECODE_H_ +#define VULKAN_VIDEO_CODEC_VP9STD_DECODE_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codec_vp9std_decode is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codec_vp9std_decode 1 +#include "vulkan_video_codec_vp9std.h" + +#define VK_STD_VULKAN_VIDEO_CODEC_VP9_DECODE_API_VERSION_1_0_0 VK_MAKE_VIDEO_STD_VERSION(1, 0, 0) + +#define VK_STD_VULKAN_VIDEO_CODEC_VP9_DECODE_SPEC_VERSION VK_STD_VULKAN_VIDEO_CODEC_VP9_DECODE_API_VERSION_1_0_0 +#define VK_STD_VULKAN_VIDEO_CODEC_VP9_DECODE_EXTENSION_NAME "VK_STD_vulkan_video_codec_vp9_decode" +typedef struct StdVideoDecodeVP9PictureInfoFlags { + uint32_t error_resilient_mode : 1; + uint32_t intra_only : 1; + uint32_t allow_high_precision_mv : 1; + uint32_t refresh_frame_context : 1; + uint32_t frame_parallel_decoding_mode : 1; + uint32_t segmentation_enabled : 1; + uint32_t show_frame : 1; + uint32_t UsePrevFrameMvs : 1; + uint32_t reserved : 24; +} StdVideoDecodeVP9PictureInfoFlags; + +typedef struct StdVideoDecodeVP9PictureInfo { + StdVideoDecodeVP9PictureInfoFlags flags; + StdVideoVP9Profile profile; + StdVideoVP9FrameType frame_type; + uint8_t frame_context_idx; + uint8_t reset_frame_context; + uint8_t refresh_frame_flags; + uint8_t ref_frame_sign_bias_mask; + StdVideoVP9InterpolationFilter interpolation_filter; + uint8_t base_q_idx; + int8_t delta_q_y_dc; + int8_t delta_q_uv_dc; + int8_t delta_q_uv_ac; + uint8_t tile_cols_log2; + uint8_t tile_rows_log2; + uint16_t reserved1[3]; + const StdVideoVP9ColorConfig* pColorConfig; + const StdVideoVP9LoopFilter* pLoopFilter; + const StdVideoVP9Segmentation* pSegmentation; +} StdVideoDecodeVP9PictureInfo; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codecs_common.h b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codecs_common.h new file mode 100644 index 0000000000..3c4d0553d0 --- /dev/null +++ b/code/renderer_vulkan/vulkan/vk_video/vulkan_video_codecs_common.h @@ -0,0 +1,36 @@ +#ifndef VULKAN_VIDEO_CODECS_COMMON_H_ +#define VULKAN_VIDEO_CODECS_COMMON_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// vulkan_video_codecs_common is a preprocessor guard. Do not pass it to API calls. +#define vulkan_video_codecs_common 1 +#if !defined(VK_NO_STDINT_H) + #include +#endif + +#define VK_MAKE_VIDEO_STD_VERSION(major, minor, patch) \ + ((((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch))) + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/code/renderer_vulkan/vulkan/vulkan.h b/code/renderer_vulkan/vulkan/vulkan.h new file mode 100644 index 0000000000..2ed8d9c7ab --- /dev/null +++ b/code/renderer_vulkan/vulkan/vulkan.h @@ -0,0 +1,103 @@ +#ifndef VULKAN_H_ +#define VULKAN_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +#include "vk_platform.h" +#include "vulkan_core.h" + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#include "vulkan_android.h" +#endif + +#ifdef VK_USE_PLATFORM_FUCHSIA +#include +#include "vulkan_fuchsia.h" +#endif + +#ifdef VK_USE_PLATFORM_IOS_MVK +#include "vulkan_ios.h" +#endif + + +#ifdef VK_USE_PLATFORM_MACOS_MVK +#include "vulkan_macos.h" +#endif + +#ifdef VK_USE_PLATFORM_METAL_EXT +#include "vulkan_metal.h" +#endif + +#ifdef VK_USE_PLATFORM_VI_NN +#include "vulkan_vi.h" +#endif + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#include "vulkan_wayland.h" +#endif + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include +#include "vulkan_win32.h" +#endif + + +#ifdef VK_USE_PLATFORM_XCB_KHR +#include +#include "vulkan_xcb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#include +#include "vulkan_xlib.h" +#endif + + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT +#include +#include "vulkan_directfb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +#include +#include +#include "vulkan_xlib_xrandr.h" +#endif + + +#ifdef VK_USE_PLATFORM_GGP +#include +#include "vulkan_ggp.h" +#endif + + +#ifdef VK_USE_PLATFORM_SCREEN_QNX +#include +#include "vulkan_screen.h" +#endif + + +#ifdef VK_USE_PLATFORM_SCI +#include +#include +#include "vulkan_sci.h" +#endif + + +#ifdef VK_ENABLE_BETA_EXTENSIONS +#include "vulkan_beta.h" +#endif + +#ifdef VK_USE_PLATFORM_OHOS +#include "vulkan_ohos.h" +#endif + +#endif // VULKAN_H_ diff --git a/code/renderer_vulkan/vulkan/vulkan_core.h b/code/renderer_vulkan/vulkan/vulkan_core.h new file mode 100644 index 0000000000..32507d29eb --- /dev/null +++ b/code/renderer_vulkan/vulkan/vulkan_core.h @@ -0,0 +1,25278 @@ +#ifndef VULKAN_CORE_H_ +#define VULKAN_CORE_H_ 1 + +/* +** Copyright 2015-2026 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +// VK_VERSION_1_0 is a preprocessor guard. Do not pass it to API calls. +#define VK_VERSION_1_0 1 +#include "vk_platform.h" + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + + +#ifndef VK_USE_64_BIT_PTR_DEFINES + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) || (defined(__riscv) && __riscv_xlen == 64) + #define VK_USE_64_BIT_PTR_DEFINES 1 + #else + #define VK_USE_64_BIT_PTR_DEFINES 0 + #endif +#endif + + +#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE + #if (VK_USE_64_BIT_PTR_DEFINES==1) + #if (defined(__cplusplus) && (__cplusplus >= 201103L)) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L)) + #define VK_NULL_HANDLE nullptr + #else + #define VK_NULL_HANDLE ((void*)0) + #endif + #else + #define VK_NULL_HANDLE 0ULL + #endif +#endif +#ifndef VK_NULL_HANDLE + #define VK_NULL_HANDLE 0 +#endif + + +#ifndef VK_DEFINE_NON_DISPATCHABLE_HANDLE + #if (VK_USE_64_BIT_PTR_DEFINES==1) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; + #else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; + #endif +#endif + +#define VK_MAKE_API_VERSION(variant, major, minor, patch) \ + ((((uint32_t)(variant)) << 29U) | (((uint32_t)(major)) << 22U) | (((uint32_t)(minor)) << 12U) | ((uint32_t)(patch))) + + +//#define VK_API_VERSION VK_MAKE_API_VERSION(0, 1, 0, 0) // Patch version should always be set to 0 + +// Version of this file +#define VK_HEADER_VERSION 341 + +// Complete version of this file +#define VK_HEADER_VERSION_COMPLETE VK_MAKE_API_VERSION(0, 1, 4, VK_HEADER_VERSION) + + +#define VK_MAKE_VERSION(major, minor, patch) \ + ((((uint32_t)(major)) << 22U) | (((uint32_t)(minor)) << 12U) | ((uint32_t)(patch))) + + +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22U) + + +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12U) & 0x3FFU) + + +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) + +#define VK_API_VERSION_VARIANT(version) ((uint32_t)(version) >> 29U) +#define VK_API_VERSION_MAJOR(version) (((uint32_t)(version) >> 22U) & 0x7FU) +#define VK_API_VERSION_MINOR(version) (((uint32_t)(version) >> 12U) & 0x3FFU) +#define VK_API_VERSION_PATCH(version) ((uint32_t)(version) & 0xFFFU) +// Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_API_VERSION(0, 1, 0, 0)// Patch version should always be set to 0 + +typedef uint32_t VkBool32; +typedef uint64_t VkDeviceAddress; +typedef uint64_t VkDeviceSize; +typedef uint32_t VkFlags; +typedef uint32_t VkSampleMask; +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) +VK_DEFINE_HANDLE(VkCommandBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) +#define VK_FALSE 0U +#define VK_LOD_CLAMP_NONE 1000.0F +#define VK_QUEUE_FAMILY_IGNORED (~0U) +#define VK_REMAINING_ARRAY_LAYERS (~0U) +#define VK_REMAINING_MIP_LEVELS (~0U) +#define VK_TRUE 1U +#define VK_WHOLE_SIZE (~0ULL) +#define VK_MAX_MEMORY_TYPES 32U +#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256U +#define VK_UUID_SIZE 16U +#define VK_MAX_EXTENSION_NAME_SIZE 256U +#define VK_MAX_DESCRIPTION_SIZE 256U +#define VK_MAX_MEMORY_HEAPS 16U +#define VK_ATTACHMENT_UNUSED (~0U) +#define VK_SUBPASS_EXTERNAL (~0U) + +typedef enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, + VK_ERROR_INITIALIZATION_FAILED = -3, + VK_ERROR_DEVICE_LOST = -4, + VK_ERROR_MEMORY_MAP_FAILED = -5, + VK_ERROR_LAYER_NOT_PRESENT = -6, + VK_ERROR_EXTENSION_NOT_PRESENT = -7, + VK_ERROR_FEATURE_NOT_PRESENT = -8, + VK_ERROR_INCOMPATIBLE_DRIVER = -9, + VK_ERROR_TOO_MANY_OBJECTS = -10, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11, + VK_ERROR_FRAGMENTED_POOL = -12, + VK_ERROR_UNKNOWN = -13, + VK_ERROR_VALIDATION_FAILED = -1000011001, + VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000, + VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000, + VK_ERROR_FRAGMENTATION = -1000161000, + VK_PIPELINE_COMPILE_REQUIRED = 1000297000, + VK_ERROR_NOT_PERMITTED = -1000174001, + VK_ERROR_SURFACE_LOST_KHR = -1000000000, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, + VK_SUBOPTIMAL_KHR = 1000001003, + VK_ERROR_OUT_OF_DATE_KHR = -1000001004, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, + VK_ERROR_INVALID_SHADER_NV = -1000012000, + VK_ERROR_IMAGE_USAGE_NOT_SUPPORTED_KHR = -1000023000, + VK_ERROR_VIDEO_PICTURE_LAYOUT_NOT_SUPPORTED_KHR = -1000023001, + VK_ERROR_VIDEO_PROFILE_OPERATION_NOT_SUPPORTED_KHR = -1000023002, + VK_ERROR_VIDEO_PROFILE_FORMAT_NOT_SUPPORTED_KHR = -1000023003, + VK_ERROR_VIDEO_PROFILE_CODEC_NOT_SUPPORTED_KHR = -1000023004, + VK_ERROR_VIDEO_STD_VERSION_NOT_SUPPORTED_KHR = -1000023005, + VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000, + VK_ERROR_PRESENT_TIMING_QUEUE_FULL_EXT = -1000208000, + VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000, + VK_THREAD_IDLE_KHR = 1000268000, + VK_THREAD_DONE_KHR = 1000268001, + VK_OPERATION_DEFERRED_KHR = 1000268002, + VK_OPERATION_NOT_DEFERRED_KHR = 1000268003, + VK_ERROR_INVALID_VIDEO_STD_PARAMETERS_KHR = -1000299000, + VK_ERROR_COMPRESSION_EXHAUSTED_EXT = -1000338000, + VK_INCOMPATIBLE_SHADER_BINARY_EXT = 1000482000, + VK_PIPELINE_BINARY_MISSING_KHR = 1000483000, + VK_ERROR_NOT_ENOUGH_SPACE_KHR = -1000483000, + VK_ERROR_VALIDATION_FAILED_EXT = VK_ERROR_VALIDATION_FAILED, + VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, + VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, + VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION, + VK_ERROR_NOT_PERMITTED_EXT = VK_ERROR_NOT_PERMITTED, + VK_ERROR_NOT_PERMITTED_KHR = VK_ERROR_NOT_PERMITTED, + VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED, + VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED, + // VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT is a legacy alias + VK_ERROR_INCOMPATIBLE_SHADER_BINARY_EXT = VK_INCOMPATIBLE_SHADER_BINARY_EXT, + VK_RESULT_MAX_ENUM = 0x7FFFFFFF +} VkResult; + +typedef enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, + VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES = 49, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES = 50, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES = 51, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES = 52, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO = 1000147000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES = 1000196000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES = 1000211000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES = 1000261000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES = 1000207000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES = 1000207001, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO = 1000207002, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO = 1000207003, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO = 1000207004, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO = 1000207005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES = 1000257000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO = 1000244001, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES = 1000177000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES = 1000180000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES = 1000082000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES = 1000197000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO = 1000161000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES = 1000161001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES = 1000161002, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO = 1000161003, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT = 1000161004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES = 1000221000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES = 1000130000, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO = 1000130001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES = 1000253000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES = 1000175000, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2 = 1000109000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2 = 1000109001, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2 = 1000109002, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2 = 1000109003, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2 = 1000109004, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO = 1000109005, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO = 1000109006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES = 1000199000, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE = 1000199001, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO = 1000246000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES = 1000108000, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO = 1000108001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO = 1000108002, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO = 1000108003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES = 1000241000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT = 1000241001, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT = 1000241002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES = 53, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_PROPERTIES = 54, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES = 1000245000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES = 1000295000, + VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO = 1000295001, + VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO = 1000295002, + VK_STRUCTURE_TYPE_MEMORY_BARRIER_2 = 1000314000, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2 = 1000314001, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2 = 1000314002, + VK_STRUCTURE_TYPE_DEPENDENCY_INFO = 1000314003, + VK_STRUCTURE_TYPE_SUBMIT_INFO_2 = 1000314004, + VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO = 1000314005, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO = 1000314006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES = 1000314007, + VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2 = 1000337000, + VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2 = 1000337001, + VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2 = 1000337002, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2 = 1000337003, + VK_STRUCTURE_TYPE_BUFFER_COPY_2 = 1000337006, + VK_STRUCTURE_TYPE_IMAGE_COPY_2 = 1000337007, + VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2 = 1000337009, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES = 1000066000, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3 = 1000360000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES = 1000413000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES = 1000413001, + VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS = 1000413002, + VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS = 1000413003, + VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO = 1000192000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES = 1000215000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES = 1000276000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES = 1000297000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES = 1000325000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES = 1000335000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES = 1000225000, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO = 1000225001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES = 1000225002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES = 1000138000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES = 1000138001, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK = 1000138002, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO = 1000138003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES = 1000280000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES = 1000280001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES = 1000281001, + VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2 = 1000337004, + VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2 = 1000337005, + VK_STRUCTURE_TYPE_IMAGE_BLIT_2 = 1000337008, + VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2 = 1000337010, + VK_STRUCTURE_TYPE_RENDERING_INFO = 1000044000, + VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO = 1000044001, + VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO = 1000044002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES = 1000044003, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO = 1000044004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_FEATURES = 55, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_4_PROPERTIES = 56, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO = 1000174000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES = 1000388000, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES = 1000388001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES = 1000265000, + VK_STRUCTURE_TYPE_MEMORY_MAP_INFO = 1000271000, + VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO = 1000271001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES = 1000470000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES = 1000470001, + VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO = 1000470004, + VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2 = 1000338002, + VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2 = 1000338003, + VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO = 1000470006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES = 1000545000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES = 1000545001, + VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS = 1000545002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES = 1000270000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES = 1000270001, + VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY = 1000270002, + VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY = 1000270003, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO = 1000270004, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO = 1000270005, + VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO = 1000270006, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO = 1000270007, + VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE = 1000270008, + VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY = 1000270009, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES = 1000416000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES = 1000528000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES = 1000544000, + VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO = 1000470005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES = 1000080000, + VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO = 1000545003, + VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO = 1000545004, + VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO = 1000545005, + VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO = 1000545006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES = 1000466000, + VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO = 1000068000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES = 1000068001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES = 1000068002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES = 1000259000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO = 1000259001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES = 1000259002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES = 1000525000, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO = 1000190001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES = 1000190002, + VK_STRUCTURE_TYPE_RENDERING_AREA_INFO = 1000470003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES = 1000232000, + VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO = 1000232001, + VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO = 1000232002, + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, + VK_STRUCTURE_TYPE_VIDEO_PROFILE_INFO_KHR = 1000023000, + VK_STRUCTURE_TYPE_VIDEO_CAPABILITIES_KHR = 1000023001, + VK_STRUCTURE_TYPE_VIDEO_PICTURE_RESOURCE_INFO_KHR = 1000023002, + VK_STRUCTURE_TYPE_VIDEO_SESSION_MEMORY_REQUIREMENTS_KHR = 1000023003, + VK_STRUCTURE_TYPE_BIND_VIDEO_SESSION_MEMORY_INFO_KHR = 1000023004, + VK_STRUCTURE_TYPE_VIDEO_SESSION_CREATE_INFO_KHR = 1000023005, + VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000023006, + VK_STRUCTURE_TYPE_VIDEO_SESSION_PARAMETERS_UPDATE_INFO_KHR = 1000023007, + VK_STRUCTURE_TYPE_VIDEO_BEGIN_CODING_INFO_KHR = 1000023008, + VK_STRUCTURE_TYPE_VIDEO_END_CODING_INFO_KHR = 1000023009, + VK_STRUCTURE_TYPE_VIDEO_CODING_CONTROL_INFO_KHR = 1000023010, + VK_STRUCTURE_TYPE_VIDEO_REFERENCE_SLOT_INFO_KHR = 1000023011, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_VIDEO_PROPERTIES_KHR = 1000023012, + VK_STRUCTURE_TYPE_VIDEO_PROFILE_LIST_INFO_KHR = 1000023013, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_FORMAT_INFO_KHR = 1000023014, + VK_STRUCTURE_TYPE_VIDEO_FORMAT_PROPERTIES_KHR = 1000023015, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_QUERY_RESULT_STATUS_PROPERTIES_KHR = 1000023016, + VK_STRUCTURE_TYPE_VIDEO_DECODE_INFO_KHR = 1000024000, + VK_STRUCTURE_TYPE_VIDEO_DECODE_CAPABILITIES_KHR = 1000024001, + VK_STRUCTURE_TYPE_VIDEO_DECODE_USAGE_INFO_KHR = 1000024002, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT = 1000028000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT = 1000028001, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT = 1000028002, + VK_STRUCTURE_TYPE_CU_MODULE_CREATE_INFO_NVX = 1000029000, + VK_STRUCTURE_TYPE_CU_FUNCTION_CREATE_INFO_NVX = 1000029001, + VK_STRUCTURE_TYPE_CU_LAUNCH_INFO_NVX = 1000029002, + VK_STRUCTURE_TYPE_CU_MODULE_TEXTURING_MODE_CREATE_INFO_NVX = 1000029004, + VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX = 1000030000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX = 1000030001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_CAPABILITIES_KHR = 1000038000, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000038001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_ADD_INFO_KHR = 1000038002, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PICTURE_INFO_KHR = 1000038003, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_DPB_SLOT_INFO_KHR = 1000038004, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_NALU_SLICE_INFO_KHR = 1000038005, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_GOP_REMAINING_FRAME_INFO_KHR = 1000038006, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_PROFILE_INFO_KHR = 1000038007, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_INFO_KHR = 1000038008, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_RATE_CONTROL_LAYER_INFO_KHR = 1000038009, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_CREATE_INFO_KHR = 1000038010, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_QUALITY_LEVEL_PROPERTIES_KHR = 1000038011, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_GET_INFO_KHR = 1000038012, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_SESSION_PARAMETERS_FEEDBACK_INFO_KHR = 1000038013, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_CAPABILITIES_KHR = 1000039000, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000039001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_ADD_INFO_KHR = 1000039002, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PICTURE_INFO_KHR = 1000039003, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_DPB_SLOT_INFO_KHR = 1000039004, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_NALU_SLICE_SEGMENT_INFO_KHR = 1000039005, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_GOP_REMAINING_FRAME_INFO_KHR = 1000039006, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_PROFILE_INFO_KHR = 1000039007, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_INFO_KHR = 1000039009, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_RATE_CONTROL_LAYER_INFO_KHR = 1000039010, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_CREATE_INFO_KHR = 1000039011, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_QUALITY_LEVEL_PROPERTIES_KHR = 1000039012, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_GET_INFO_KHR = 1000039013, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_SESSION_PARAMETERS_FEEDBACK_INFO_KHR = 1000039014, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_CAPABILITIES_KHR = 1000040000, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PICTURE_INFO_KHR = 1000040001, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_PROFILE_INFO_KHR = 1000040003, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000040004, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_SESSION_PARAMETERS_ADD_INFO_KHR = 1000040005, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_DPB_SLOT_INFO_KHR = 1000040006, + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, + VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_ATTRIBUTES_INFO_NVX = 1000044009, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT = 1000102000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT = 1000102001, + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RELAXED_LINE_RASTERIZATION_FEATURES_IMG = 1000110000, + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR = 1000116000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR = 1000116001, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR = 1000116002, + VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR = 1000116003, + VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR = 1000116004, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR = 1000116005, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR = 1000116006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001, + VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004, + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, + VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, + VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, + VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_2_ANDROID = 1000129006, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ENQUEUE_FEATURES_AMDX = 1000134000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ENQUEUE_PROPERTIES_AMDX = 1000134001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_EXECUTION_GRAPH_PIPELINE_SCRATCH_SIZE_AMDX = 1000134002, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_EXECUTION_GRAPH_PIPELINE_CREATE_INFO_AMDX = 1000134003, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_NODE_CREATE_INFO_AMDX = 1000134004, +#endif + VK_STRUCTURE_TYPE_TEXEL_BUFFER_DESCRIPTOR_INFO_EXT = 1000135000, + VK_STRUCTURE_TYPE_IMAGE_DESCRIPTOR_INFO_EXT = 1000135001, + VK_STRUCTURE_TYPE_RESOURCE_DESCRIPTOR_INFO_EXT = 1000135002, + VK_STRUCTURE_TYPE_BIND_HEAP_INFO_EXT = 1000135003, + VK_STRUCTURE_TYPE_PUSH_DATA_INFO_EXT = 1000135004, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_AND_BINDING_MAPPING_EXT = 1000135005, + VK_STRUCTURE_TYPE_SHADER_DESCRIPTOR_SET_AND_BINDING_MAPPING_INFO_EXT = 1000135006, + VK_STRUCTURE_TYPE_OPAQUE_CAPTURE_DATA_CREATE_INFO_EXT = 1000135007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_HEAP_PROPERTIES_EXT = 1000135008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_HEAP_FEATURES_EXT = 1000135009, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_DESCRIPTOR_HEAP_INFO_EXT = 1000135010, + VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_INDEX_CREATE_INFO_EXT = 1000135011, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_PUSH_DATA_TOKEN_NV = 1000135012, + VK_STRUCTURE_TYPE_SUBSAMPLED_IMAGE_FORMAT_PROPERTIES_EXT = 1000135013, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_HEAP_TENSOR_PROPERTIES_ARM = 1000135014, + VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD = 1000044008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_BFLOAT16_FEATURES_KHR = 1000141000, + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000150007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR = 1000150000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR = 1000150002, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR = 1000150003, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR = 1000150004, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR = 1000150005, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR = 1000150006, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR = 1000150009, + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR = 1000150010, + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR = 1000150011, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR = 1000150012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR = 1000150013, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR = 1000150014, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR = 1000150020, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR = 1000347000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR = 1000347001, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR = 1000150015, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR = 1000150016, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR = 1000150018, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR = 1000348013, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001, + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005, + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_2_EXT = 1000158006, + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR = 1000163000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR = 1000163001, +#endif + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV = 1000164000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, + VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000, + VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, + VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000, + VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_CAPABILITIES_KHR = 1000187000, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000187001, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_SESSION_PARAMETERS_ADD_INFO_KHR = 1000187002, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PROFILE_INFO_KHR = 1000187003, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_PICTURE_INFO_KHR = 1000187004, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_DPB_SLOT_INFO_KHR = 1000187005, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, + VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002, + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_2_NV = 1000314008, + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_2_NV = 1000314009, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_TIMING_FEATURES_EXT = 1000208000, + VK_STRUCTURE_TYPE_SWAPCHAIN_TIMING_PROPERTIES_EXT = 1000208001, + VK_STRUCTURE_TYPE_SWAPCHAIN_TIME_DOMAIN_PROPERTIES_EXT = 1000208002, + VK_STRUCTURE_TYPE_PRESENT_TIMINGS_INFO_EXT = 1000208003, + VK_STRUCTURE_TYPE_PRESENT_TIMING_INFO_EXT = 1000208004, + VK_STRUCTURE_TYPE_PAST_PRESENTATION_TIMING_INFO_EXT = 1000208005, + VK_STRUCTURE_TYPE_PAST_PRESENTATION_TIMING_PROPERTIES_EXT = 1000208006, + VK_STRUCTURE_TYPE_PAST_PRESENTATION_TIMING_EXT = 1000208007, + VK_STRUCTURE_TYPE_PRESENT_TIMING_SURFACE_CAPABILITIES_EXT = 1000208008, + VK_STRUCTURE_TYPE_SWAPCHAIN_CALIBRATED_TIMESTAMP_INFO_EXT = 1000208009, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL = 1000210000, + VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001, + VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL = 1000210002, + VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL = 1000210003, + VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL = 1000210004, + VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL = 1000210005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT = 1000212000, + VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000, + VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001, + VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000, + VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001, + VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002, + VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_INFO_EXT = 1000044007, + VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000226000, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR = 1000226001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR = 1000226002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR = 1000226003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR = 1000226004, + VK_STRUCTURE_TYPE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000044006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT = 1000234000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_QUAD_CONTROL_FEATURES_KHR = 1000235000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000, + VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001, + VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR = 1000239000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002, + VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_FEATURES_KHR = 1000248000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000, + VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV = 1000250000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV = 1000250001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV = 1000250002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT = 1000251000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT = 1000252000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_FEATURES_EXT = 1000254000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_PROVOKING_VERTEX_STATE_CREATE_INFO_EXT = 1000254001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROVOKING_VERTEX_PROPERTIES_EXT = 1000254002, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT = 1000255000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001, + VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT = 1000260000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT = 1000267000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000, + VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_FEATURES_EXT = 1000272000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAP_MEMORY_PLACED_PROPERTIES_EXT = 1000272001, + VK_STRUCTURE_TYPE_MEMORY_MAP_PLACED_INFO_EXT = 1000272002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_2_FEATURES_EXT = 1000273000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV = 1000277000, + VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV = 1000277001, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV = 1000277002, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV = 1000277003, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV = 1000277004, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV = 1000277005, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV = 1000277006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV = 1000277007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INHERITED_VIEWPORT_SCISSOR_FEATURES_NV = 1000278000, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_VIEWPORT_SCISSOR_INFO_NV = 1000278001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000, + VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_BIAS_CONTROL_FEATURES_EXT = 1000283000, + VK_STRUCTURE_TYPE_DEPTH_BIAS_INFO_EXT = 1000283001, + VK_STRUCTURE_TYPE_DEPTH_BIAS_REPRESENTATION_INFO_EXT = 1000283002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT = 1000284000, + VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT = 1000284001, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT = 1000284002, + VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT = 1000287000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT = 1000287001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT = 1000287002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_3D_FEATURES_EXT = 1000288000, + VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR = 1000290000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_BARRIER_FEATURES_NV = 1000292000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_PRESENT_BARRIER_NV = 1000292001, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_BARRIER_CREATE_INFO_NV = 1000292002, + VK_STRUCTURE_TYPE_PRESENT_ID_KHR = 1000294000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_FEATURES_KHR = 1000294001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_INFO_KHR = 1000299000, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_INFO_KHR = 1000299001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_RATE_CONTROL_LAYER_INFO_KHR = 1000299002, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_CAPABILITIES_KHR = 1000299003, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_USAGE_INFO_KHR = 1000299004, + VK_STRUCTURE_TYPE_QUERY_POOL_VIDEO_ENCODE_FEEDBACK_CREATE_INFO_KHR = 1000299005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_QUALITY_LEVEL_INFO_KHR = 1000299006, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUALITY_LEVEL_PROPERTIES_KHR = 1000299007, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUALITY_LEVEL_INFO_KHR = 1000299008, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_SESSION_PARAMETERS_GET_INFO_KHR = 1000299009, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_SESSION_PARAMETERS_FEEDBACK_INFO_KHR = 1000299010, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000, + VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_CUDA_MODULE_CREATE_INFO_NV = 1000307000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_CUDA_FUNCTION_CREATE_INFO_NV = 1000307001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_CUDA_LAUNCH_INFO_NV = 1000307002, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUDA_KERNEL_LAUNCH_FEATURES_NV = 1000307003, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUDA_KERNEL_LAUNCH_PROPERTIES_NV = 1000307004, +#endif + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_SHADING_FEATURES_QCOM = 1000309000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_SHADING_PROPERTIES_QCOM = 1000309001, + VK_STRUCTURE_TYPE_RENDER_PASS_TILE_SHADING_CREATE_INFO_QCOM = 1000309002, + VK_STRUCTURE_TYPE_PER_TILE_BEGIN_INFO_QCOM = 1000309003, + VK_STRUCTURE_TYPE_PER_TILE_END_INFO_QCOM = 1000309004, + VK_STRUCTURE_TYPE_DISPATCH_TILE_INFO_QCOM = 1000309005, + VK_STRUCTURE_TYPE_QUERY_LOW_LATENCY_SUPPORT_NV = 1000310000, + VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECT_CREATE_INFO_EXT = 1000311000, + VK_STRUCTURE_TYPE_EXPORT_METAL_OBJECTS_INFO_EXT = 1000311001, + VK_STRUCTURE_TYPE_EXPORT_METAL_DEVICE_INFO_EXT = 1000311002, + VK_STRUCTURE_TYPE_EXPORT_METAL_COMMAND_QUEUE_INFO_EXT = 1000311003, + VK_STRUCTURE_TYPE_EXPORT_METAL_BUFFER_INFO_EXT = 1000311004, + VK_STRUCTURE_TYPE_IMPORT_METAL_BUFFER_INFO_EXT = 1000311005, + VK_STRUCTURE_TYPE_EXPORT_METAL_TEXTURE_INFO_EXT = 1000311006, + VK_STRUCTURE_TYPE_IMPORT_METAL_TEXTURE_INFO_EXT = 1000311007, + VK_STRUCTURE_TYPE_EXPORT_METAL_IO_SURFACE_INFO_EXT = 1000311008, + VK_STRUCTURE_TYPE_IMPORT_METAL_IO_SURFACE_INFO_EXT = 1000311009, + VK_STRUCTURE_TYPE_EXPORT_METAL_SHARED_EVENT_INFO_EXT = 1000311010, + VK_STRUCTURE_TYPE_IMPORT_METAL_SHARED_EVENT_INFO_EXT = 1000311011, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_PROPERTIES_EXT = 1000316000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_DENSITY_MAP_PROPERTIES_EXT = 1000316001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_FEATURES_EXT = 1000316002, + VK_STRUCTURE_TYPE_DESCRIPTOR_ADDRESS_INFO_EXT = 1000316003, + VK_STRUCTURE_TYPE_DESCRIPTOR_GET_INFO_EXT = 1000316004, + VK_STRUCTURE_TYPE_BUFFER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT = 1000316005, + VK_STRUCTURE_TYPE_IMAGE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT = 1000316006, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CAPTURE_DESCRIPTOR_DATA_INFO_EXT = 1000316007, + VK_STRUCTURE_TYPE_SAMPLER_CAPTURE_DESCRIPTOR_DATA_INFO_EXT = 1000316008, + VK_STRUCTURE_TYPE_OPAQUE_CAPTURE_DESCRIPTOR_DATA_CREATE_INFO_EXT = 1000316010, + VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_INFO_EXT = 1000316011, + VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_PUSH_DESCRIPTOR_BUFFER_HANDLE_EXT = 1000316012, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CAPTURE_DESCRIPTOR_DATA_INFO_EXT = 1000316009, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_FEATURES_EXT = 1000320000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GRAPHICS_PIPELINE_LIBRARY_PROPERTIES_EXT = 1000320001, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT = 1000320002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_FEATURES_AMD = 1000321000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR = 1000203000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_PROPERTIES_KHR = 1000322000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_FEATURES_KHR = 1000323000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV = 1000326000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV = 1000326001, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV = 1000326002, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_MOTION_TRIANGLES_DATA_NV = 1000327000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MOTION_BLUR_FEATURES_NV = 1000327001, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MOTION_INFO_NV = 1000327002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_EXT = 1000328000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_EXT = 1000328001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_2_PLANE_444_FORMATS_FEATURES_EXT = 1000330000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT = 1000332000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT = 1000332001, + VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM = 1000333000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_FEATURES_KHR = 1000336000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_FEATURES_EXT = 1000338000, + VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT = 1000338001, + VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_PROPERTIES_EXT = 1000338004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_FEATURES_EXT = 1000339000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT = 1000340000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT = 1000341000, + VK_STRUCTURE_TYPE_DEVICE_FAULT_COUNTS_EXT = 1000341001, + VK_STRUCTURE_TYPE_DEVICE_FAULT_INFO_EXT = 1000341002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RGBA10X6_FORMATS_FEATURES_EXT = 1000344000, + VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT = 1000346000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_INPUT_DYNAMIC_STATE_FEATURES_EXT = 1000352000, + VK_STRUCTURE_TYPE_VERTEX_INPUT_BINDING_DESCRIPTION_2_EXT = 1000352001, + VK_STRUCTURE_TYPE_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_2_EXT = 1000352002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT = 1000353000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ADDRESS_BINDING_REPORT_FEATURES_EXT = 1000354000, + VK_STRUCTURE_TYPE_DEVICE_ADDRESS_BINDING_CALLBACK_DATA_EXT = 1000354001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_CONTROL_FEATURES_EXT = 1000355000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLIP_CONTROL_CREATE_INFO_EXT = 1000355001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVE_TOPOLOGY_LIST_RESTART_FEATURES_EXT = 1000356000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364000, + VK_STRUCTURE_TYPE_MEMORY_ZIRCON_HANDLE_PROPERTIES_FUCHSIA = 1000364001, + VK_STRUCTURE_TYPE_MEMORY_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000364002, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_ZIRCON_HANDLE_INFO_FUCHSIA = 1000365001, + VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA = 1000366000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA = 1000366001, + VK_STRUCTURE_TYPE_BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA = 1000366002, + VK_STRUCTURE_TYPE_BUFFER_COLLECTION_PROPERTIES_FUCHSIA = 1000366003, + VK_STRUCTURE_TYPE_BUFFER_CONSTRAINTS_INFO_FUCHSIA = 1000366004, + VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA = 1000366005, + VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA = 1000366006, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA = 1000366007, + VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA = 1000366008, + VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA = 1000366009, + VK_STRUCTURE_TYPE_SUBPASS_SHADING_PIPELINE_CREATE_INFO_HUAWEI = 1000369000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_FEATURES_HUAWEI = 1000369001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_SHADING_PROPERTIES_HUAWEI = 1000369002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INVOCATION_MASK_FEATURES_HUAWEI = 1000370000, + VK_STRUCTURE_TYPE_MEMORY_GET_REMOTE_ADDRESS_INFO_NV = 1000371000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_RDMA_FEATURES_NV = 1000371001, + VK_STRUCTURE_TYPE_PIPELINE_PROPERTIES_IDENTIFIER_EXT = 1000372000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROPERTIES_FEATURES_EXT = 1000372001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAME_BOUNDARY_FEATURES_EXT = 1000375000, + VK_STRUCTURE_TYPE_FRAME_BOUNDARY_EXT = 1000375001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_FEATURES_EXT = 1000376000, + VK_STRUCTURE_TYPE_SUBPASS_RESOLVE_PERFORMANCE_QUERY_EXT = 1000376001, + VK_STRUCTURE_TYPE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT = 1000376002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_2_FEATURES_EXT = 1000377000, + VK_STRUCTURE_TYPE_SCREEN_SURFACE_CREATE_INFO_QNX = 1000378000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COLOR_WRITE_ENABLE_FEATURES_EXT = 1000381000, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_WRITE_CREATE_INFO_EXT = 1000381001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIMITIVES_GENERATED_QUERY_FEATURES_EXT = 1000382000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_MAINTENANCE_1_FEATURES_KHR = 1000386000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_UNTYPED_POINTERS_FEATURES_KHR = 1000387000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_RGB_CONVERSION_FEATURES_VALVE = 1000390000, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_RGB_CONVERSION_CAPABILITIES_VALVE = 1000390001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_PROFILE_RGB_CONVERSION_INFO_VALVE = 1000390002, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_SESSION_RGB_CONVERSION_CREATE_INFO_VALVE = 1000390003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_MIN_LOD_FEATURES_EXT = 1000391000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_MIN_LOD_CREATE_INFO_EXT = 1000391001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_FEATURES_EXT = 1000392000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTI_DRAW_PROPERTIES_EXT = 1000392001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_2D_VIEW_OF_3D_FEATURES_EXT = 1000393000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TILE_IMAGE_FEATURES_EXT = 1000395000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TILE_IMAGE_PROPERTIES_EXT = 1000395001, + VK_STRUCTURE_TYPE_MICROMAP_BUILD_INFO_EXT = 1000396000, + VK_STRUCTURE_TYPE_MICROMAP_VERSION_INFO_EXT = 1000396001, + VK_STRUCTURE_TYPE_COPY_MICROMAP_INFO_EXT = 1000396002, + VK_STRUCTURE_TYPE_COPY_MICROMAP_TO_MEMORY_INFO_EXT = 1000396003, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_MICROMAP_INFO_EXT = 1000396004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPACITY_MICROMAP_FEATURES_EXT = 1000396005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPACITY_MICROMAP_PROPERTIES_EXT = 1000396006, + VK_STRUCTURE_TYPE_MICROMAP_CREATE_INFO_EXT = 1000396007, + VK_STRUCTURE_TYPE_MICROMAP_BUILD_SIZES_INFO_EXT = 1000396008, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_OPACITY_MICROMAP_EXT = 1000396009, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_FEATURES_NV = 1000397000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISPLACEMENT_MICROMAP_PROPERTIES_NV = 1000397001, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_TRIANGLES_DISPLACEMENT_MICROMAP_NV = 1000397002, +#endif + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_FEATURES_HUAWEI = 1000404000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_PROPERTIES_HUAWEI = 1000404001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_CULLING_SHADER_VRS_FEATURES_HUAWEI = 1000404002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BORDER_COLOR_SWIZZLE_FEATURES_EXT = 1000411000, + VK_STRUCTURE_TYPE_SAMPLER_BORDER_COLOR_COMPONENT_MAPPING_CREATE_INFO_EXT = 1000411001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT = 1000412000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_ARM = 1000415000, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_SHADER_CORE_CONTROL_CREATE_INFO_ARM = 1000417000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_FEATURES_ARM = 1000417001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_PROPERTIES_ARM = 1000417002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_SLICED_VIEW_OF_3D_FEATURES_EXT = 1000418000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_SLICED_CREATE_INFO_EXT = 1000418001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_SET_HOST_MAPPING_FEATURES_VALVE = 1000420000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_BINDING_REFERENCE_VALVE = 1000420001, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_HOST_MAPPING_INFO_VALVE = 1000420002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NON_SEAMLESS_CUBE_MAP_FEATURES_EXT = 1000422000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_FEATURES_ARM = 1000424000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RENDER_PASS_STRIPED_PROPERTIES_ARM = 1000424001, + VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_BEGIN_INFO_ARM = 1000424002, + VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_INFO_ARM = 1000424003, + VK_STRUCTURE_TYPE_RENDER_PASS_STRIPE_SUBMIT_INFO_ARM = 1000424004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_NV = 1000426000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_COMPUTE_FEATURES_NV = 1000428000, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_INDIRECT_BUFFER_INFO_NV = 1000428001, + VK_STRUCTURE_TYPE_PIPELINE_INDIRECT_DEVICE_ADDRESS_INFO_NV = 1000428002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_LINEAR_SWEPT_SPHERES_FEATURES_NV = 1000429008, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_LINEAR_SWEPT_SPHERES_DATA_NV = 1000429009, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_SPHERES_DATA_NV = 1000429010, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINEAR_COLOR_ATTACHMENT_FEATURES_NV = 1000430000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MAXIMAL_RECONVERGENCE_FEATURES_KHR = 1000434000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_FEATURES_EXT = 1000437000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_FEATURES_QCOM = 1000440000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_PROPERTIES_QCOM = 1000440001, + VK_STRUCTURE_TYPE_IMAGE_VIEW_SAMPLE_WEIGHT_CREATE_INFO_QCOM = 1000440002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_FEATURES_EXT = 1000451000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_NESTED_COMMAND_BUFFER_PROPERTIES_EXT = 1000451001, + VK_STRUCTURE_TYPE_NATIVE_BUFFER_USAGE_OHOS = 1000452000, + VK_STRUCTURE_TYPE_NATIVE_BUFFER_PROPERTIES_OHOS = 1000452001, + VK_STRUCTURE_TYPE_NATIVE_BUFFER_FORMAT_PROPERTIES_OHOS = 1000452002, + VK_STRUCTURE_TYPE_IMPORT_NATIVE_BUFFER_INFO_OHOS = 1000452003, + VK_STRUCTURE_TYPE_MEMORY_GET_NATIVE_BUFFER_INFO_OHOS = 1000452004, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_OHOS = 1000452005, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXT = 1000453000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_FEATURES_EXT = 1000455000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_3_PROPERTIES_EXT = 1000455001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBPASS_MERGE_FEEDBACK_FEATURES_EXT = 1000458000, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_CONTROL_EXT = 1000458001, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000458002, + VK_STRUCTURE_TYPE_RENDER_PASS_SUBPASS_FEEDBACK_CREATE_INFO_EXT = 1000458003, + VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_INFO_LUNARG = 1000459000, + VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG = 1000459001, + VK_STRUCTURE_TYPE_TENSOR_CREATE_INFO_ARM = 1000460000, + VK_STRUCTURE_TYPE_TENSOR_VIEW_CREATE_INFO_ARM = 1000460001, + VK_STRUCTURE_TYPE_BIND_TENSOR_MEMORY_INFO_ARM = 1000460002, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_TENSOR_ARM = 1000460003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TENSOR_PROPERTIES_ARM = 1000460004, + VK_STRUCTURE_TYPE_TENSOR_FORMAT_PROPERTIES_ARM = 1000460005, + VK_STRUCTURE_TYPE_TENSOR_DESCRIPTION_ARM = 1000460006, + VK_STRUCTURE_TYPE_TENSOR_MEMORY_REQUIREMENTS_INFO_ARM = 1000460007, + VK_STRUCTURE_TYPE_TENSOR_MEMORY_BARRIER_ARM = 1000460008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TENSOR_FEATURES_ARM = 1000460009, + VK_STRUCTURE_TYPE_DEVICE_TENSOR_MEMORY_REQUIREMENTS_ARM = 1000460010, + VK_STRUCTURE_TYPE_COPY_TENSOR_INFO_ARM = 1000460011, + VK_STRUCTURE_TYPE_TENSOR_COPY_ARM = 1000460012, + VK_STRUCTURE_TYPE_TENSOR_DEPENDENCY_INFO_ARM = 1000460013, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_TENSOR_ARM = 1000460014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_TENSOR_INFO_ARM = 1000460015, + VK_STRUCTURE_TYPE_EXTERNAL_TENSOR_PROPERTIES_ARM = 1000460016, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_TENSOR_CREATE_INFO_ARM = 1000460017, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_TENSOR_FEATURES_ARM = 1000460018, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_BUFFER_TENSOR_PROPERTIES_ARM = 1000460019, + VK_STRUCTURE_TYPE_DESCRIPTOR_GET_TENSOR_INFO_ARM = 1000460020, + VK_STRUCTURE_TYPE_TENSOR_CAPTURE_DESCRIPTOR_DATA_INFO_ARM = 1000460021, + VK_STRUCTURE_TYPE_TENSOR_VIEW_CAPTURE_DESCRIPTOR_DATA_INFO_ARM = 1000460022, + VK_STRUCTURE_TYPE_FRAME_BOUNDARY_TENSORS_ARM = 1000460023, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_FEATURES_EXT = 1000462000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_MODULE_IDENTIFIER_PROPERTIES_EXT = 1000462001, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT = 1000462002, + VK_STRUCTURE_TYPE_SHADER_MODULE_IDENTIFIER_EXT = 1000462003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT = 1000342000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_FEATURES_NV = 1000464000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_OPTICAL_FLOW_PROPERTIES_NV = 1000464001, + VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_INFO_NV = 1000464002, + VK_STRUCTURE_TYPE_OPTICAL_FLOW_IMAGE_FORMAT_PROPERTIES_NV = 1000464003, + VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_INFO_NV = 1000464004, + VK_STRUCTURE_TYPE_OPTICAL_FLOW_EXECUTE_INFO_NV = 1000464005, + VK_STRUCTURE_TYPE_OPTICAL_FLOW_SESSION_CREATE_PRIVATE_DATA_INFO_NV = 1000464010, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_DITHERING_FEATURES_EXT = 1000465000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_FEATURES_ANDROID = 1000468000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FORMAT_RESOLVE_PROPERTIES_ANDROID = 1000468001, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_RESOLVE_PROPERTIES_ANDROID = 1000468002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ANTI_LAG_FEATURES_AMD = 1000476000, + VK_STRUCTURE_TYPE_ANTI_LAG_DATA_AMD = 1000476001, + VK_STRUCTURE_TYPE_ANTI_LAG_PRESENTATION_INFO_AMD = 1000476002, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DENSE_GEOMETRY_FORMAT_FEATURES_AMDX = 1000478000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DENSE_GEOMETRY_FORMAT_TRIANGLES_DATA_AMDX = 1000478001, +#endif + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_PRESENT_ID_2_KHR = 1000479000, + VK_STRUCTURE_TYPE_PRESENT_ID_2_KHR = 1000479001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_ID_2_FEATURES_KHR = 1000479002, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_PRESENT_WAIT_2_KHR = 1000480000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_WAIT_2_FEATURES_KHR = 1000480001, + VK_STRUCTURE_TYPE_PRESENT_WAIT_2_INFO_KHR = 1000480002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_POSITION_FETCH_FEATURES_KHR = 1000481000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_FEATURES_EXT = 1000482000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_OBJECT_PROPERTIES_EXT = 1000482001, + VK_STRUCTURE_TYPE_SHADER_CREATE_INFO_EXT = 1000482002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_BINARY_FEATURES_KHR = 1000483000, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_CREATE_INFO_KHR = 1000483001, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_INFO_KHR = 1000483002, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_KEY_KHR = 1000483003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_BINARY_PROPERTIES_KHR = 1000483004, + VK_STRUCTURE_TYPE_RELEASE_CAPTURED_PIPELINE_DATA_INFO_KHR = 1000483005, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_DATA_INFO_KHR = 1000483006, + VK_STRUCTURE_TYPE_PIPELINE_CREATE_INFO_KHR = 1000483007, + VK_STRUCTURE_TYPE_DEVICE_PIPELINE_BINARY_INTERNAL_CACHE_CONTROL_KHR = 1000483008, + VK_STRUCTURE_TYPE_PIPELINE_BINARY_HANDLES_INFO_KHR = 1000483009, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_PROPERTIES_FEATURES_QCOM = 1000484000, + VK_STRUCTURE_TYPE_TILE_PROPERTIES_QCOM = 1000484001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_AMIGO_PROFILING_FEATURES_SEC = 1000485000, + VK_STRUCTURE_TYPE_AMIGO_PROFILING_SUBMIT_INFO_SEC = 1000485001, + VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_KHR = 1000274000, + VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_KHR = 1000274001, + VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_KHR = 1000274002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_KHR = 1000275000, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_FENCE_INFO_KHR = 1000275001, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODES_CREATE_INFO_KHR = 1000275002, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_KHR = 1000275003, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_SCALING_CREATE_INFO_KHR = 1000275004, + VK_STRUCTURE_TYPE_RELEASE_SWAPCHAIN_IMAGES_INFO_KHR = 1000275005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_VIEWPORTS_FEATURES_QCOM = 1000488000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_FEATURES_NV = 1000490000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_PROPERTIES_NV = 1000490001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_VECTOR_FEATURES_NV = 1000491000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_VECTOR_PROPERTIES_NV = 1000491001, + VK_STRUCTURE_TYPE_COOPERATIVE_VECTOR_PROPERTIES_NV = 1000491002, + VK_STRUCTURE_TYPE_CONVERT_COOPERATIVE_VECTOR_MATRIX_INFO_NV = 1000491004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_FEATURES_NV = 1000492000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_SPARSE_ADDRESS_SPACE_PROPERTIES_NV = 1000492001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT = 1000351000, + VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT = 1000351002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_VERTEX_ATTRIBUTES_FEATURES_EXT = 1000495000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LEGACY_VERTEX_ATTRIBUTES_PROPERTIES_EXT = 1000495001, + VK_STRUCTURE_TYPE_LAYER_SETTINGS_CREATE_INFO_EXT = 1000496000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_FEATURES_ARM = 1000497000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_BUILTINS_PROPERTIES_ARM = 1000497001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_LIBRARY_GROUP_HANDLES_FEATURES_EXT = 1000498000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_FEATURES_EXT = 1000499000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INTERNALLY_SYNCHRONIZED_QUEUES_FEATURES_KHR = 1000504000, + VK_STRUCTURE_TYPE_LATENCY_SLEEP_MODE_INFO_NV = 1000505000, + VK_STRUCTURE_TYPE_LATENCY_SLEEP_INFO_NV = 1000505001, + VK_STRUCTURE_TYPE_SET_LATENCY_MARKER_INFO_NV = 1000505002, + VK_STRUCTURE_TYPE_GET_LATENCY_MARKER_INFO_NV = 1000505003, + VK_STRUCTURE_TYPE_LATENCY_TIMINGS_FRAME_REPORT_NV = 1000505004, + VK_STRUCTURE_TYPE_LATENCY_SUBMISSION_PRESENT_ID_NV = 1000505005, + VK_STRUCTURE_TYPE_OUT_OF_BAND_QUEUE_TYPE_INFO_NV = 1000505006, + VK_STRUCTURE_TYPE_SWAPCHAIN_LATENCY_CREATE_INFO_NV = 1000505007, + VK_STRUCTURE_TYPE_LATENCY_SURFACE_CAPABILITIES_NV = 1000505008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR = 1000506000, + VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_KHR = 1000506001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_KHR = 1000506002, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_CREATE_INFO_ARM = 1000507000, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_SESSION_CREATE_INFO_ARM = 1000507001, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_RESOURCE_INFO_ARM = 1000507002, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_CONSTANT_ARM = 1000507003, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_SESSION_MEMORY_REQUIREMENTS_INFO_ARM = 1000507004, + VK_STRUCTURE_TYPE_BIND_DATA_GRAPH_PIPELINE_SESSION_MEMORY_INFO_ARM = 1000507005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DATA_GRAPH_FEATURES_ARM = 1000507006, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_SHADER_MODULE_CREATE_INFO_ARM = 1000507007, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_PROPERTY_QUERY_RESULT_ARM = 1000507008, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_INFO_ARM = 1000507009, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_COMPILER_CONTROL_CREATE_INFO_ARM = 1000507010, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_SESSION_BIND_POINT_REQUIREMENTS_INFO_ARM = 1000507011, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_SESSION_BIND_POINT_REQUIREMENT_ARM = 1000507012, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_IDENTIFIER_CREATE_INFO_ARM = 1000507013, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_DISPATCH_INFO_ARM = 1000507014, + VK_STRUCTURE_TYPE_DATA_GRAPH_PROCESSING_ENGINE_CREATE_INFO_ARM = 1000507016, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_DATA_GRAPH_PROCESSING_ENGINE_PROPERTIES_ARM = 1000507017, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_DATA_GRAPH_PROPERTIES_ARM = 1000507018, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_QUEUE_FAMILY_DATA_GRAPH_PROCESSING_ENGINE_INFO_ARM = 1000507019, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_CONSTANT_TENSOR_SEMI_STRUCTURED_SPARSITY_INFO_ARM = 1000507015, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_RENDER_AREAS_FEATURES_QCOM = 1000510000, + VK_STRUCTURE_TYPE_MULTIVIEW_PER_VIEW_RENDER_AREAS_RENDER_PASS_BEGIN_INFO_QCOM = 1000510001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_KHR = 1000201000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_PROPERTIES_KHR = 1000511000, + VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_CAPABILITIES_KHR = 1000512000, + VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PICTURE_INFO_KHR = 1000512001, + VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_PROFILE_INFO_KHR = 1000512003, + VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000512004, + VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_DPB_SLOT_INFO_KHR = 1000512005, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_CAPABILITIES_KHR = 1000513000, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000513001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_PICTURE_INFO_KHR = 1000513002, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_DPB_SLOT_INFO_KHR = 1000513003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_AV1_FEATURES_KHR = 1000513004, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_PROFILE_INFO_KHR = 1000513005, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_RATE_CONTROL_INFO_KHR = 1000513006, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_RATE_CONTROL_LAYER_INFO_KHR = 1000513007, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_QUALITY_LEVEL_PROPERTIES_KHR = 1000513008, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_SESSION_CREATE_INFO_KHR = 1000513009, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_GOP_REMAINING_FRAME_INFO_KHR = 1000513010, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_DECODE_VP9_FEATURES_KHR = 1000514000, + VK_STRUCTURE_TYPE_VIDEO_DECODE_VP9_CAPABILITIES_KHR = 1000514001, + VK_STRUCTURE_TYPE_VIDEO_DECODE_VP9_PICTURE_INFO_KHR = 1000514002, + VK_STRUCTURE_TYPE_VIDEO_DECODE_VP9_PROFILE_INFO_KHR = 1000514003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_MAINTENANCE_1_FEATURES_KHR = 1000515000, + VK_STRUCTURE_TYPE_VIDEO_INLINE_QUERY_INFO_KHR = 1000515001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PER_STAGE_DESCRIPTOR_SET_FEATURES_NV = 1000516000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_2_FEATURES_QCOM = 1000518000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_PROCESSING_2_PROPERTIES_QCOM = 1000518001, + VK_STRUCTURE_TYPE_SAMPLER_BLOCK_MATCH_WINDOW_CREATE_INFO_QCOM = 1000518002, + VK_STRUCTURE_TYPE_SAMPLER_CUBIC_WEIGHTS_CREATE_INFO_QCOM = 1000519000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_WEIGHTS_FEATURES_QCOM = 1000519001, + VK_STRUCTURE_TYPE_BLIT_IMAGE_CUBIC_WEIGHTS_INFO_QCOM = 1000519002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_DEGAMMA_FEATURES_QCOM = 1000520000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_YCBCR_DEGAMMA_CREATE_INFO_QCOM = 1000520001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUBIC_CLAMP_FEATURES_QCOM = 1000521000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_FEATURES_EXT = 1000524000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFIED_IMAGE_LAYOUTS_FEATURES_KHR = 1000527000, + VK_STRUCTURE_TYPE_ATTACHMENT_FEEDBACK_LOOP_INFO_EXT = 1000527001, + VK_STRUCTURE_TYPE_SCREEN_BUFFER_PROPERTIES_QNX = 1000529000, + VK_STRUCTURE_TYPE_SCREEN_BUFFER_FORMAT_PROPERTIES_QNX = 1000529001, + VK_STRUCTURE_TYPE_IMPORT_SCREEN_BUFFER_INFO_QNX = 1000529002, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_QNX = 1000529003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_SCREEN_BUFFER_FEATURES_QNX = 1000529004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_DRIVER_PROPERTIES_MSFT = 1000530000, + VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR = 1000184000, + VK_STRUCTURE_TYPE_SET_DESCRIPTOR_BUFFER_OFFSETS_INFO_EXT = 1000545007, + VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_BUFFER_EMBEDDED_SAMPLERS_INFO_EXT = 1000545008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_POOL_OVERALLOCATION_FEATURES_NV = 1000546000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_MEMORY_HEAP_FEATURES_QCOM = 1000547000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TILE_MEMORY_HEAP_PROPERTIES_QCOM = 1000547001, + VK_STRUCTURE_TYPE_TILE_MEMORY_REQUIREMENTS_QCOM = 1000547002, + VK_STRUCTURE_TYPE_TILE_MEMORY_BIND_INFO_QCOM = 1000547003, + VK_STRUCTURE_TYPE_TILE_MEMORY_SIZE_INFO_QCOM = 1000547004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_FEATURES_KHR = 1000549000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_PROPERTIES_KHR = 1000426001, + VK_STRUCTURE_TYPE_COPY_MEMORY_INDIRECT_INFO_KHR = 1000549002, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INDIRECT_INFO_KHR = 1000549003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_EXT = 1000427000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_PROPERTIES_EXT = 1000427001, + VK_STRUCTURE_TYPE_DECOMPRESS_MEMORY_INFO_EXT = 1000550002, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_STEREO_CREATE_INFO_NV = 1000551000, + VK_STRUCTURE_TYPE_DISPLAY_MODE_STEREO_PROPERTIES_NV = 1000551001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_INTRA_REFRESH_CAPABILITIES_KHR = 1000552000, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_SESSION_INTRA_REFRESH_CREATE_INFO_KHR = 1000552001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_INTRA_REFRESH_INFO_KHR = 1000552002, + VK_STRUCTURE_TYPE_VIDEO_REFERENCE_INTRA_REFRESH_INFO_KHR = 1000552003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_INTRA_REFRESH_FEATURES_KHR = 1000552004, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUANTIZATION_MAP_CAPABILITIES_KHR = 1000553000, + VK_STRUCTURE_TYPE_VIDEO_FORMAT_QUANTIZATION_MAP_PROPERTIES_KHR = 1000553001, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUANTIZATION_MAP_INFO_KHR = 1000553002, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_QUANTIZATION_MAP_SESSION_PARAMETERS_CREATE_INFO_KHR = 1000553005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_ENCODE_QUANTIZATION_MAP_FEATURES_KHR = 1000553009, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H264_QUANTIZATION_MAP_CAPABILITIES_KHR = 1000553003, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_H265_QUANTIZATION_MAP_CAPABILITIES_KHR = 1000553004, + VK_STRUCTURE_TYPE_VIDEO_FORMAT_H265_QUANTIZATION_MAP_PROPERTIES_KHR = 1000553006, + VK_STRUCTURE_TYPE_VIDEO_ENCODE_AV1_QUANTIZATION_MAP_CAPABILITIES_KHR = 1000553007, + VK_STRUCTURE_TYPE_VIDEO_FORMAT_AV1_QUANTIZATION_MAP_PROPERTIES_KHR = 1000553008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAW_ACCESS_CHAINS_FEATURES_NV = 1000555000, + VK_STRUCTURE_TYPE_EXTERNAL_COMPUTE_QUEUE_DEVICE_CREATE_INFO_NV = 1000556000, + VK_STRUCTURE_TYPE_EXTERNAL_COMPUTE_QUEUE_CREATE_INFO_NV = 1000556001, + VK_STRUCTURE_TYPE_EXTERNAL_COMPUTE_QUEUE_DATA_PARAMS_NV = 1000556002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_COMPUTE_QUEUE_PROPERTIES_NV = 1000556003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_RELAXED_EXTENDED_INSTRUCTION_FEATURES_KHR = 1000558000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMMAND_BUFFER_INHERITANCE_FEATURES_NV = 1000559000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_7_FEATURES_KHR = 1000562000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_7_PROPERTIES_KHR = 1000562001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_PROPERTIES_LIST_KHR = 1000562002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_PROPERTIES_KHR = 1000562003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LAYERED_API_VULKAN_PROPERTIES_KHR = 1000562004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT16_VECTOR_FEATURES_NV = 1000563000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_REPLICATED_COMPOSITES_FEATURES_EXT = 1000564000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT8_FEATURES_EXT = 1000567000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_VALIDATION_FEATURES_NV = 1000568000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_ACCELERATION_STRUCTURE_FEATURES_NV = 1000569000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CLUSTER_ACCELERATION_STRUCTURE_PROPERTIES_NV = 1000569001, + VK_STRUCTURE_TYPE_CLUSTER_ACCELERATION_STRUCTURE_CLUSTERS_BOTTOM_LEVEL_INPUT_NV = 1000569002, + VK_STRUCTURE_TYPE_CLUSTER_ACCELERATION_STRUCTURE_TRIANGLE_CLUSTER_INPUT_NV = 1000569003, + VK_STRUCTURE_TYPE_CLUSTER_ACCELERATION_STRUCTURE_MOVE_OBJECTS_INPUT_NV = 1000569004, + VK_STRUCTURE_TYPE_CLUSTER_ACCELERATION_STRUCTURE_INPUT_INFO_NV = 1000569005, + VK_STRUCTURE_TYPE_CLUSTER_ACCELERATION_STRUCTURE_COMMANDS_INFO_NV = 1000569006, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CLUSTER_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000569007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PARTITIONED_ACCELERATION_STRUCTURE_FEATURES_NV = 1000570000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PARTITIONED_ACCELERATION_STRUCTURE_PROPERTIES_NV = 1000570001, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_PARTITIONED_ACCELERATION_STRUCTURE_NV = 1000570002, + VK_STRUCTURE_TYPE_PARTITIONED_ACCELERATION_STRUCTURE_INSTANCES_INPUT_NV = 1000570003, + VK_STRUCTURE_TYPE_BUILD_PARTITIONED_ACCELERATION_STRUCTURE_INFO_NV = 1000570004, + VK_STRUCTURE_TYPE_PARTITIONED_ACCELERATION_STRUCTURE_FLAGS_NV = 1000570005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_EXT = 1000572000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_EXT = 1000572001, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_EXT = 1000572002, + VK_STRUCTURE_TYPE_INDIRECT_EXECUTION_SET_CREATE_INFO_EXT = 1000572003, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_EXT = 1000572004, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_EXT = 1000572006, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_EXT = 1000572007, + VK_STRUCTURE_TYPE_WRITE_INDIRECT_EXECUTION_SET_PIPELINE_EXT = 1000572008, + VK_STRUCTURE_TYPE_WRITE_INDIRECT_EXECUTION_SET_SHADER_EXT = 1000572009, + VK_STRUCTURE_TYPE_INDIRECT_EXECUTION_SET_PIPELINE_INFO_EXT = 1000572010, + VK_STRUCTURE_TYPE_INDIRECT_EXECUTION_SET_SHADER_INFO_EXT = 1000572011, + VK_STRUCTURE_TYPE_INDIRECT_EXECUTION_SET_SHADER_LAYOUT_INFO_EXT = 1000572012, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_PIPELINE_INFO_EXT = 1000572013, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_SHADER_INFO_EXT = 1000572014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_8_FEATURES_KHR = 1000574000, + VK_STRUCTURE_TYPE_MEMORY_BARRIER_ACCESS_FLAGS_3_KHR = 1000574002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ALIGNMENT_CONTROL_FEATURES_MESA = 1000575000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ALIGNMENT_CONTROL_PROPERTIES_MESA = 1000575001, + VK_STRUCTURE_TYPE_IMAGE_ALIGNMENT_CONTROL_CREATE_INFO_MESA = 1000575002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FMA_FEATURES_KHR = 1000579000, + VK_STRUCTURE_TYPE_PUSH_CONSTANT_BANK_INFO_NV = 1000580000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_CONSTANT_BANK_FEATURES_NV = 1000580001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_CONSTANT_BANK_PROPERTIES_NV = 1000580002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_FEATURES_EXT = 1000581000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_INVOCATION_REORDER_PROPERTIES_EXT = 1000581001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_CONTROL_FEATURES_EXT = 1000582000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_DEPTH_CLAMP_CONTROL_CREATE_INFO_EXT = 1000582001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_9_FEATURES_KHR = 1000584000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_9_PROPERTIES_KHR = 1000584001, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_OWNERSHIP_TRANSFER_PROPERTIES_KHR = 1000584002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VIDEO_MAINTENANCE_2_FEATURES_KHR = 1000586000, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H264_INLINE_SESSION_PARAMETERS_INFO_KHR = 1000586001, + VK_STRUCTURE_TYPE_VIDEO_DECODE_H265_INLINE_SESSION_PARAMETERS_INFO_KHR = 1000586002, + VK_STRUCTURE_TYPE_VIDEO_DECODE_AV1_INLINE_SESSION_PARAMETERS_INFO_KHR = 1000586003, + VK_STRUCTURE_TYPE_SURFACE_CREATE_INFO_OHOS = 1000685000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HDR_VIVID_FEATURES_HUAWEI = 1000590000, + VK_STRUCTURE_TYPE_HDR_VIVID_DYNAMIC_METADATA_HUAWEI = 1000590001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_2_FEATURES_NV = 1000593000, + VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_FLEXIBLE_DIMENSIONS_PROPERTIES_NV = 1000593001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_2_PROPERTIES_NV = 1000593002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_OPACITY_MICROMAP_FEATURES_ARM = 1000596000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_METAL_HANDLE_INFO_EXT = 1000602000, + VK_STRUCTURE_TYPE_MEMORY_METAL_HANDLE_PROPERTIES_EXT = 1000602001, + VK_STRUCTURE_TYPE_MEMORY_GET_METAL_HANDLE_INFO_EXT = 1000602002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_KHR = 1000421000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_COUNTERS_BY_REGION_FEATURES_ARM = 1000605000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_COUNTERS_BY_REGION_PROPERTIES_ARM = 1000605001, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_ARM = 1000605002, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_ARM = 1000605003, + VK_STRUCTURE_TYPE_RENDER_PASS_PERFORMANCE_COUNTERS_BY_REGION_BEGIN_INFO_ARM = 1000605004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_ROBUSTNESS_FEATURES_EXT = 1000608000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FORMAT_PACK_FEATURES_ARM = 1000609000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_LAYERED_FEATURES_VALVE = 1000611000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_LAYERED_PROPERTIES_VALVE = 1000611001, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_DENSITY_MAP_LAYERED_CREATE_INFO_VALVE = 1000611002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_KHR = 1000286000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_KHR = 1000286001, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_SET_PRESENT_CONFIG_NV = 1000613000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_METERING_FEATURES_NV = 1000613001, +#endif + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_EXT = 1000425000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_EXT = 1000425001, + VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_EXT = 1000425002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_DEVICE_MEMORY_FEATURES_EXT = 1000620000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_MODE_FIFO_LATEST_READY_FEATURES_KHR = 1000361000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_64_BIT_INDEXING_FEATURES_EXT = 1000627000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_RESOLVE_FEATURES_EXT = 1000628000, + VK_STRUCTURE_TYPE_BEGIN_CUSTOM_RESOLVE_INFO_EXT = 1000628001, + VK_STRUCTURE_TYPE_CUSTOM_RESOLVE_CREATE_INFO_EXT = 1000628002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DATA_GRAPH_MODEL_FEATURES_QCOM = 1000629000, + VK_STRUCTURE_TYPE_DATA_GRAPH_PIPELINE_BUILTIN_MODEL_CREATE_INFO_QCOM = 1000629001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_10_FEATURES_KHR = 1000630000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_10_PROPERTIES_KHR = 1000630001, + VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_FLAGS_INFO_KHR = 1000630002, + VK_STRUCTURE_TYPE_RENDERING_END_INFO_KHR = 1000619003, + VK_STRUCTURE_TYPE_RESOLVE_IMAGE_MODE_INFO_KHR = 1000630004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_LONG_VECTOR_FEATURES_EXT = 1000635000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_LONG_VECTOR_PROPERTIES_EXT = 1000635001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CACHE_INCREMENTAL_MODE_FEATURES_SEC = 1000637000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_UNIFORM_BUFFER_UNSIZED_ARRAY_FEATURES_EXT = 1000642000, + VK_STRUCTURE_TYPE_COMPUTE_OCCUPANCY_PRIORITY_PARAMETERS_NV = 1000645000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_OCCUPANCY_PRIORITY_FEATURES_NV = 1000645001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_PARTITIONED_FEATURES_EXT = 1000662000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, + // VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT is a legacy alias + VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + VK_STRUCTURE_TYPE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_INFO, + VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO, + VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO_KHR = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDERING_INFO, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES, + VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_ROBUSTNESS_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + // VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT is a legacy alias + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO, + VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_NV = VK_STRUCTURE_TYPE_ATTACHMENT_SAMPLE_COUNT_INFO_AMD, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, + VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES, + VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, + // VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL is a legacy alias + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_LOCAL_READ_FEATURES, + VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_LOCATION_INFO, + VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_INPUT_ATTACHMENT_INDEX_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_IMAGE_COPY_PROPERTIES, + VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY_EXT = VK_STRUCTURE_TYPE_MEMORY_TO_IMAGE_COPY, + VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY_EXT = VK_STRUCTURE_TYPE_IMAGE_TO_MEMORY_COPY, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO_EXT = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_MEMORY_INFO, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO_EXT = VK_STRUCTURE_TYPE_COPY_MEMORY_TO_IMAGE_INFO, + VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO_EXT = VK_STRUCTURE_TYPE_HOST_IMAGE_LAYOUT_TRANSITION_INFO, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO_EXT = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_IMAGE_INFO, + VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE_EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_HOST_MEMCPY_SIZE, + VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY_EXT = VK_STRUCTURE_TYPE_HOST_IMAGE_COPY_DEVICE_PERFORMANCE_QUERY, + VK_STRUCTURE_TYPE_MEMORY_MAP_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_MAP_INFO, + VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_UNMAP_INFO, + VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_EXT = VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_KHR, + VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_EXT = VK_STRUCTURE_TYPE_SURFACE_PRESENT_SCALING_CAPABILITIES_KHR, + VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_EXT = VK_STRUCTURE_TYPE_SURFACE_PRESENT_MODE_COMPATIBILITY_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_KHR, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_FENCE_INFO_EXT = VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_FENCE_INFO_KHR, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODES_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODES_CREATE_INFO_KHR, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_EXT = VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_MODE_INFO_KHR, + VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_SCALING_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SWAPCHAIN_PRESENT_SCALING_CREATE_INFO_KHR, + VK_STRUCTURE_TYPE_RELEASE_SWAPCHAIN_IMAGES_INFO_EXT = VK_STRUCTURE_TYPE_RELEASE_SWAPCHAIN_IMAGES_INFO_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_DOT_PRODUCT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES, + VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO, + VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES, + VK_STRUCTURE_TYPE_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_MEMORY_BARRIER_2, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER_2, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2, + VK_STRUCTURE_TYPE_DEPENDENCY_INFO_KHR = VK_STRUCTURE_TYPE_DEPENDENCY_INFO, + VK_STRUCTURE_TYPE_SUBMIT_INFO_2_KHR = VK_STRUCTURE_TYPE_SUBMIT_INFO_2, + VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SUBMIT_INFO, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_COMMAND_BUFFER_SUBMIT_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ZERO_INITIALIZE_WORKGROUP_MEMORY_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES, + VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2, + VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2, + VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2, + VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2, + VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2, + VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_COPY_2, + VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_IMAGE_COPY_2, + VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = VK_STRUCTURE_TYPE_IMAGE_BLIT_2, + VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2, + VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2, + VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_EXT = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2, + VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_EXT = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_ARM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_FEATURES_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_EXT, + VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE = VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_3, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_MODE_FIFO_LATEST_READY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRESENT_MODE_FIFO_LATEST_READY_FEATURES_KHR, + VK_STRUCTURE_TYPE_PIPELINE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GLOBAL_PRIORITY_QUERY_FEATURES, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES_EXT = VK_STRUCTURE_TYPE_QUEUE_FAMILY_GLOBAL_PRIORITY_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_PROPERTIES, + VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS, + VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_ROTATE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLAMP_ZERO_ONE_FEATURES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_QCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_FEATURES_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_QCOM = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_OFFSET_PROPERTIES_EXT, + VK_STRUCTURE_TYPE_SUBPASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_QCOM = VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_OFFSET_END_INFO_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_PROPERTIES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COPY_MEMORY_INDIRECT_PROPERTIES_KHR, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_FEATURES_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_PROPERTIES_NV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_DECOMPRESSION_PROPERTIES_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_PROTECTED_ACCESS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_5_PROPERTIES, + VK_STRUCTURE_TYPE_RENDERING_AREA_INFO_KHR = VK_STRUCTURE_TYPE_RENDERING_AREA_INFO, + VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_IMAGE_SUBRESOURCE_INFO, + VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2_KHR = VK_STRUCTURE_TYPE_SUBRESOURCE_LAYOUT_2, + VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SUBRESOURCE_2, + VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_CREATE_FLAGS_2_CREATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_USAGE_FLAGS_2_CREATE_INFO, + VK_STRUCTURE_TYPE_SHADER_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT_CONTROLS_2_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_EXPECT_ASSUME_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_6_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS_KHR = VK_STRUCTURE_TYPE_BIND_MEMORY_STATUS, + VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO_KHR = VK_STRUCTURE_TYPE_BIND_DESCRIPTOR_SETS_INFO, + VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO_KHR = VK_STRUCTURE_TYPE_PUSH_CONSTANTS_INFO, + VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO_KHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_INFO, + VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO_KHR = VK_STRUCTURE_TYPE_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_INFO, + VK_STRUCTURE_TYPE_RENDERING_END_INFO_EXT = VK_STRUCTURE_TYPE_RENDERING_END_INFO_KHR, + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkStructureType; + +typedef enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL = 1000241000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL = 1000241001, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL = 1000241002, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003, + VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL = 1000314000, + VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL = 1000314001, + VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ = 1000232000, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, + VK_IMAGE_LAYOUT_VIDEO_DECODE_DST_KHR = 1000024000, + VK_IMAGE_LAYOUT_VIDEO_DECODE_SRC_KHR = 1000024001, + VK_IMAGE_LAYOUT_VIDEO_DECODE_DPB_KHR = 1000024002, + VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, + VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000, + VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR = 1000164003, + VK_IMAGE_LAYOUT_VIDEO_ENCODE_DST_KHR = 1000299000, + VK_IMAGE_LAYOUT_VIDEO_ENCODE_SRC_KHR = 1000299001, + VK_IMAGE_LAYOUT_VIDEO_ENCODE_DPB_KHR = 1000299002, + VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT = 1000339000, + VK_IMAGE_LAYOUT_TENSOR_ALIASING_ARM = 1000460000, + VK_IMAGE_LAYOUT_VIDEO_ENCODE_QUANTIZATION_MAP_KHR = 1000553000, + VK_IMAGE_LAYOUT_ZERO_INITIALIZED_EXT = 1000620000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR, + VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ_KHR = VK_IMAGE_LAYOUT_RENDERING_LOCAL_READ, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF +} VkImageLayout; + +typedef enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000, + VK_OBJECT_TYPE_PRIVATE_DATA_SLOT = 1000295000, + VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, + VK_OBJECT_TYPE_VIDEO_SESSION_KHR = 1000023000, + VK_OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR = 1000023001, + VK_OBJECT_TYPE_CU_MODULE_NVX = 1000029000, + VK_OBJECT_TYPE_CU_FUNCTION_NVX = 1000029001, + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000, + VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1000268000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1000277000, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_OBJECT_TYPE_CUDA_MODULE_NV = 1000307000, +#endif +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_OBJECT_TYPE_CUDA_FUNCTION_NV = 1000307001, +#endif + VK_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA = 1000366000, + VK_OBJECT_TYPE_MICROMAP_EXT = 1000396000, + VK_OBJECT_TYPE_TENSOR_ARM = 1000460000, + VK_OBJECT_TYPE_TENSOR_VIEW_ARM = 1000460001, + VK_OBJECT_TYPE_OPTICAL_FLOW_SESSION_NV = 1000464000, + VK_OBJECT_TYPE_SHADER_EXT = 1000482000, + VK_OBJECT_TYPE_PIPELINE_BINARY_KHR = 1000483000, + VK_OBJECT_TYPE_DATA_GRAPH_PIPELINE_SESSION_ARM = 1000507000, + VK_OBJECT_TYPE_EXTERNAL_COMPUTE_QUEUE_NV = 1000556000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_EXT = 1000572000, + VK_OBJECT_TYPE_INDIRECT_EXECUTION_SET_EXT = 1000572001, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = VK_OBJECT_TYPE_PRIVATE_DATA_SLOT, + VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkObjectType; + +typedef enum VkVendorId { + VK_VENDOR_ID_KHRONOS = 0x10000, + VK_VENDOR_ID_VIV = 0x10001, + VK_VENDOR_ID_VSI = 0x10002, + VK_VENDOR_ID_KAZAN = 0x10003, + VK_VENDOR_ID_CODEPLAY = 0x10004, + VK_VENDOR_ID_MESA = 0x10005, + VK_VENDOR_ID_POCL = 0x10006, + VK_VENDOR_ID_MOBILEYE = 0x10007, + VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF +} VkVendorId; + +typedef enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, + VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF +} VkSystemAllocationScope; + +typedef enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, + VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkInternalAllocationType; + +typedef enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, + VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000, + VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006, + VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016, + VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026, + VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027, + VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033, + VK_FORMAT_G8_B8R8_2PLANE_444_UNORM = 1000330000, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16 = 1000330001, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16 = 1000330002, + VK_FORMAT_G16_B16R16_2PLANE_444_UNORM = 1000330003, + VK_FORMAT_A4R4G4B4_UNORM_PACK16 = 1000340000, + VK_FORMAT_A4B4G4R4_UNORM_PACK16 = 1000340001, + VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK = 1000066000, + VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK = 1000066001, + VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK = 1000066002, + VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK = 1000066003, + VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK = 1000066004, + VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK = 1000066005, + VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK = 1000066006, + VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK = 1000066007, + VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK = 1000066008, + VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK = 1000066009, + VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK = 1000066010, + VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK = 1000066011, + VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK = 1000066012, + VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK = 1000066013, + VK_FORMAT_A1B5G5R5_UNORM_PACK16 = 1000470000, + VK_FORMAT_A8_UNORM = 1000470001, + VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, + VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, + VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, + VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, + VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, + VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, + VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, + VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, + VK_FORMAT_ASTC_3x3x3_UNORM_BLOCK_EXT = 1000288000, + VK_FORMAT_ASTC_3x3x3_SRGB_BLOCK_EXT = 1000288001, + VK_FORMAT_ASTC_3x3x3_SFLOAT_BLOCK_EXT = 1000288002, + VK_FORMAT_ASTC_4x3x3_UNORM_BLOCK_EXT = 1000288003, + VK_FORMAT_ASTC_4x3x3_SRGB_BLOCK_EXT = 1000288004, + VK_FORMAT_ASTC_4x3x3_SFLOAT_BLOCK_EXT = 1000288005, + VK_FORMAT_ASTC_4x4x3_UNORM_BLOCK_EXT = 1000288006, + VK_FORMAT_ASTC_4x4x3_SRGB_BLOCK_EXT = 1000288007, + VK_FORMAT_ASTC_4x4x3_SFLOAT_BLOCK_EXT = 1000288008, + VK_FORMAT_ASTC_4x4x4_UNORM_BLOCK_EXT = 1000288009, + VK_FORMAT_ASTC_4x4x4_SRGB_BLOCK_EXT = 1000288010, + VK_FORMAT_ASTC_4x4x4_SFLOAT_BLOCK_EXT = 1000288011, + VK_FORMAT_ASTC_5x4x4_UNORM_BLOCK_EXT = 1000288012, + VK_FORMAT_ASTC_5x4x4_SRGB_BLOCK_EXT = 1000288013, + VK_FORMAT_ASTC_5x4x4_SFLOAT_BLOCK_EXT = 1000288014, + VK_FORMAT_ASTC_5x5x4_UNORM_BLOCK_EXT = 1000288015, + VK_FORMAT_ASTC_5x5x4_SRGB_BLOCK_EXT = 1000288016, + VK_FORMAT_ASTC_5x5x4_SFLOAT_BLOCK_EXT = 1000288017, + VK_FORMAT_ASTC_5x5x5_UNORM_BLOCK_EXT = 1000288018, + VK_FORMAT_ASTC_5x5x5_SRGB_BLOCK_EXT = 1000288019, + VK_FORMAT_ASTC_5x5x5_SFLOAT_BLOCK_EXT = 1000288020, + VK_FORMAT_ASTC_6x5x5_UNORM_BLOCK_EXT = 1000288021, + VK_FORMAT_ASTC_6x5x5_SRGB_BLOCK_EXT = 1000288022, + VK_FORMAT_ASTC_6x5x5_SFLOAT_BLOCK_EXT = 1000288023, + VK_FORMAT_ASTC_6x6x5_UNORM_BLOCK_EXT = 1000288024, + VK_FORMAT_ASTC_6x6x5_SRGB_BLOCK_EXT = 1000288025, + VK_FORMAT_ASTC_6x6x5_SFLOAT_BLOCK_EXT = 1000288026, + VK_FORMAT_ASTC_6x6x6_UNORM_BLOCK_EXT = 1000288027, + VK_FORMAT_ASTC_6x6x6_SRGB_BLOCK_EXT = 1000288028, + VK_FORMAT_ASTC_6x6x6_SFLOAT_BLOCK_EXT = 1000288029, + VK_FORMAT_R8_BOOL_ARM = 1000460000, + VK_FORMAT_R16G16_SFIXED5_NV = 1000464000, + VK_FORMAT_R10X6_UINT_PACK16_ARM = 1000609000, + VK_FORMAT_R10X6G10X6_UINT_2PACK16_ARM = 1000609001, + VK_FORMAT_R10X6G10X6B10X6A10X6_UINT_4PACK16_ARM = 1000609002, + VK_FORMAT_R12X4_UINT_PACK16_ARM = 1000609003, + VK_FORMAT_R12X4G12X4_UINT_2PACK16_ARM = 1000609004, + VK_FORMAT_R12X4G12X4B12X4A12X4_UINT_4PACK16_ARM = 1000609005, + VK_FORMAT_R14X2_UINT_PACK16_ARM = 1000609006, + VK_FORMAT_R14X2G14X2_UINT_2PACK16_ARM = 1000609007, + VK_FORMAT_R14X2G14X2B14X2A14X2_UINT_4PACK16_ARM = 1000609008, + VK_FORMAT_R14X2_UNORM_PACK16_ARM = 1000609009, + VK_FORMAT_R14X2G14X2_UNORM_2PACK16_ARM = 1000609010, + VK_FORMAT_R14X2G14X2B14X2A14X2_UNORM_4PACK16_ARM = 1000609011, + VK_FORMAT_G14X2_B14X2R14X2_2PLANE_420_UNORM_3PACK16_ARM = 1000609012, + VK_FORMAT_G14X2_B14X2R14X2_2PLANE_422_UNORM_3PACK16_ARM = 1000609013, + VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK, + VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK, + VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK, + VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK, + VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK, + VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK, + VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK, + VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK, + VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK, + VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK, + VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK, + VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK, + VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK, + VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK, + VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM, + VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM, + VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_444_UNORM_EXT = VK_FORMAT_G8_B8R8_2PLANE_444_UNORM, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16, + VK_FORMAT_G16_B16R16_2PLANE_444_UNORM_EXT = VK_FORMAT_G16_B16R16_2PLANE_444_UNORM, + VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = VK_FORMAT_A4R4G4B4_UNORM_PACK16, + VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = VK_FORMAT_A4B4G4R4_UNORM_PACK16, + // VK_FORMAT_R16G16_S10_5_NV is a legacy alias + VK_FORMAT_R16G16_S10_5_NV = VK_FORMAT_R16G16_SFIXED5_NV, + VK_FORMAT_A1B5G5R5_UNORM_PACK16_KHR = VK_FORMAT_A1B5G5R5_UNORM_PACK16, + VK_FORMAT_A8_UNORM_KHR = VK_FORMAT_A8_UNORM, + VK_FORMAT_MAX_ENUM = 0x7FFFFFFF +} VkFormat; + +typedef enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, + VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT = 1000158000, + VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF +} VkImageTiling; + +typedef enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, + VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageType; + +typedef enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, + VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkPhysicalDeviceType; + +typedef enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, + VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR = 1000023000, + VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, + VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR = 1000116000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1000150000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1000150001, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, + VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL = 1000210000, + VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR = 1000299000, + VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT = 1000328000, + VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT = 1000382000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR = 1000386000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR = 1000386001, + VK_QUERY_TYPE_MICROMAP_SERIALIZATION_SIZE_EXT = 1000396000, + VK_QUERY_TYPE_MICROMAP_COMPACTED_SIZE_EXT = 1000396001, + VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkQueryType; + +typedef enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, + VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSharingMode; + +typedef enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, + VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF +} VkComponentSwizzle; + +typedef enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, + VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageViewType; + +typedef enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, + VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferLevel; + +typedef enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_UINT8 = 1000265000, + VK_INDEX_TYPE_NONE_KHR = 1000165000, + VK_INDEX_TYPE_NONE_NV = VK_INDEX_TYPE_NONE_KHR, + VK_INDEX_TYPE_UINT8_EXT = VK_INDEX_TYPE_UINT8, + VK_INDEX_TYPE_UINT8_KHR = VK_INDEX_TYPE_UINT8, + VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkIndexType; + +typedef enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, + VK_PIPELINE_CACHE_HEADER_VERSION_DATA_GRAPH_QCOM = 1000629000, + VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheHeaderVersion; + +typedef enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, + VK_BORDER_COLOR_FLOAT_CUSTOM_EXT = 1000287003, + VK_BORDER_COLOR_INT_CUSTOM_EXT = 1000287004, + VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF +} VkBorderColor; + +typedef enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, + VK_FILTER_CUBIC_EXT = 1000015000, + VK_FILTER_CUBIC_IMG = VK_FILTER_CUBIC_EXT, + VK_FILTER_MAX_ENUM = 0x7FFFFFFF +} VkFilter; + +typedef enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + // VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR is a legacy alias + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, + VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerAddressMode; + +typedef enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, + VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerMipmapMode; + +typedef enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, + VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF +} VkCompareOp; + +typedef enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK = 1000138000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_DESCRIPTOR_TYPE_SAMPLE_WEIGHT_IMAGE_QCOM = 1000440000, + VK_DESCRIPTOR_TYPE_BLOCK_MATCH_IMAGE_QCOM = 1000440001, + VK_DESCRIPTOR_TYPE_TENSOR_ARM = 1000460000, + VK_DESCRIPTOR_TYPE_MUTABLE_EXT = 1000351000, + VK_DESCRIPTOR_TYPE_PARTITIONED_ACCELERATION_STRUCTURE_NV = 1000570000, + VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK, + VK_DESCRIPTOR_TYPE_MUTABLE_VALVE = VK_DESCRIPTOR_TYPE_MUTABLE_EXT, + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorType; + +typedef enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_PIPELINE_BIND_POINT_EXECUTION_GRAPH_AMDX = 1000134000, +#endif + VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR = 1000165000, + VK_PIPELINE_BIND_POINT_SUBPASS_SHADING_HUAWEI = 1000369003, + VK_PIPELINE_BIND_POINT_DATA_GRAPH_ARM = 1000507000, + VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, + VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF +} VkPipelineBindPoint; + +typedef enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, + VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF +} VkBlendFactor; + +typedef enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, + VK_BLEND_OP_ZERO_EXT = 1000148000, + VK_BLEND_OP_SRC_EXT = 1000148001, + VK_BLEND_OP_DST_EXT = 1000148002, + VK_BLEND_OP_SRC_OVER_EXT = 1000148003, + VK_BLEND_OP_DST_OVER_EXT = 1000148004, + VK_BLEND_OP_SRC_IN_EXT = 1000148005, + VK_BLEND_OP_DST_IN_EXT = 1000148006, + VK_BLEND_OP_SRC_OUT_EXT = 1000148007, + VK_BLEND_OP_DST_OUT_EXT = 1000148008, + VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, + VK_BLEND_OP_DST_ATOP_EXT = 1000148010, + VK_BLEND_OP_XOR_EXT = 1000148011, + VK_BLEND_OP_MULTIPLY_EXT = 1000148012, + VK_BLEND_OP_SCREEN_EXT = 1000148013, + VK_BLEND_OP_OVERLAY_EXT = 1000148014, + VK_BLEND_OP_DARKEN_EXT = 1000148015, + VK_BLEND_OP_LIGHTEN_EXT = 1000148016, + VK_BLEND_OP_COLORDODGE_EXT = 1000148017, + VK_BLEND_OP_COLORBURN_EXT = 1000148018, + VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, + VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, + VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, + VK_BLEND_OP_EXCLUSION_EXT = 1000148022, + VK_BLEND_OP_INVERT_EXT = 1000148023, + VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, + VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, + VK_BLEND_OP_LINEARBURN_EXT = 1000148026, + VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, + VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, + VK_BLEND_OP_PINLIGHT_EXT = 1000148029, + VK_BLEND_OP_HARDMIX_EXT = 1000148030, + VK_BLEND_OP_HSL_HUE_EXT = 1000148031, + VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, + VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, + VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, + VK_BLEND_OP_PLUS_EXT = 1000148035, + VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, + VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, + VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, + VK_BLEND_OP_MINUS_EXT = 1000148039, + VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, + VK_BLEND_OP_CONTRAST_EXT = 1000148041, + VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, + VK_BLEND_OP_RED_EXT = 1000148043, + VK_BLEND_OP_GREEN_EXT = 1000148044, + VK_BLEND_OP_BLUE_EXT = 1000148045, + VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF +} VkBlendOp; + +typedef enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_CULL_MODE = 1000267000, + VK_DYNAMIC_STATE_FRONT_FACE = 1000267001, + VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY = 1000267002, + VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT = 1000267003, + VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT = 1000267004, + VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE = 1000267005, + VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE = 1000267006, + VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE = 1000267007, + VK_DYNAMIC_STATE_DEPTH_COMPARE_OP = 1000267008, + VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE = 1000267009, + VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE = 1000267010, + VK_DYNAMIC_STATE_STENCIL_OP = 1000267011, + VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE = 1000377001, + VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE = 1000377002, + VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE = 1000377004, + VK_DYNAMIC_STATE_LINE_STIPPLE = 1000259000, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_ENABLE_EXT = 1000099001, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_MODE_EXT = 1000099002, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR = 1000347000, + VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004, + VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006, + VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_ENABLE_NV = 1000205000, + VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001, + VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR = 1000226000, + VK_DYNAMIC_STATE_VERTEX_INPUT_EXT = 1000352000, + VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT = 1000377000, + VK_DYNAMIC_STATE_LOGIC_OP_EXT = 1000377003, + VK_DYNAMIC_STATE_COLOR_WRITE_ENABLE_EXT = 1000381000, + VK_DYNAMIC_STATE_DEPTH_CLAMP_ENABLE_EXT = 1000455003, + VK_DYNAMIC_STATE_POLYGON_MODE_EXT = 1000455004, + VK_DYNAMIC_STATE_RASTERIZATION_SAMPLES_EXT = 1000455005, + VK_DYNAMIC_STATE_SAMPLE_MASK_EXT = 1000455006, + VK_DYNAMIC_STATE_ALPHA_TO_COVERAGE_ENABLE_EXT = 1000455007, + VK_DYNAMIC_STATE_ALPHA_TO_ONE_ENABLE_EXT = 1000455008, + VK_DYNAMIC_STATE_LOGIC_OP_ENABLE_EXT = 1000455009, + VK_DYNAMIC_STATE_COLOR_BLEND_ENABLE_EXT = 1000455010, + VK_DYNAMIC_STATE_COLOR_BLEND_EQUATION_EXT = 1000455011, + VK_DYNAMIC_STATE_COLOR_WRITE_MASK_EXT = 1000455012, + VK_DYNAMIC_STATE_TESSELLATION_DOMAIN_ORIGIN_EXT = 1000455002, + VK_DYNAMIC_STATE_RASTERIZATION_STREAM_EXT = 1000455013, + VK_DYNAMIC_STATE_CONSERVATIVE_RASTERIZATION_MODE_EXT = 1000455014, + VK_DYNAMIC_STATE_EXTRA_PRIMITIVE_OVERESTIMATION_SIZE_EXT = 1000455015, + VK_DYNAMIC_STATE_DEPTH_CLIP_ENABLE_EXT = 1000455016, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_ENABLE_EXT = 1000455017, + VK_DYNAMIC_STATE_COLOR_BLEND_ADVANCED_EXT = 1000455018, + VK_DYNAMIC_STATE_PROVOKING_VERTEX_MODE_EXT = 1000455019, + VK_DYNAMIC_STATE_LINE_RASTERIZATION_MODE_EXT = 1000455020, + VK_DYNAMIC_STATE_LINE_STIPPLE_ENABLE_EXT = 1000455021, + VK_DYNAMIC_STATE_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE_EXT = 1000455022, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_ENABLE_NV = 1000455023, + VK_DYNAMIC_STATE_VIEWPORT_SWIZZLE_NV = 1000455024, + VK_DYNAMIC_STATE_COVERAGE_TO_COLOR_ENABLE_NV = 1000455025, + VK_DYNAMIC_STATE_COVERAGE_TO_COLOR_LOCATION_NV = 1000455026, + VK_DYNAMIC_STATE_COVERAGE_MODULATION_MODE_NV = 1000455027, + VK_DYNAMIC_STATE_COVERAGE_MODULATION_TABLE_ENABLE_NV = 1000455028, + VK_DYNAMIC_STATE_COVERAGE_MODULATION_TABLE_NV = 1000455029, + VK_DYNAMIC_STATE_SHADING_RATE_IMAGE_ENABLE_NV = 1000455030, + VK_DYNAMIC_STATE_REPRESENTATIVE_FRAGMENT_TEST_ENABLE_NV = 1000455031, + VK_DYNAMIC_STATE_COVERAGE_REDUCTION_MODE_NV = 1000455032, + VK_DYNAMIC_STATE_ATTACHMENT_FEEDBACK_LOOP_ENABLE_EXT = 1000524000, + VK_DYNAMIC_STATE_DEPTH_CLAMP_RANGE_EXT = 1000582000, + VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = VK_DYNAMIC_STATE_LINE_STIPPLE, + VK_DYNAMIC_STATE_CULL_MODE_EXT = VK_DYNAMIC_STATE_CULL_MODE, + VK_DYNAMIC_STATE_FRONT_FACE_EXT = VK_DYNAMIC_STATE_FRONT_FACE, + VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT = VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY, + VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT = VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT, + VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT = VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT, + VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT = VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE, + VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE, + VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE, + VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT = VK_DYNAMIC_STATE_DEPTH_COMPARE_OP, + VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE, + VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT = VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE, + VK_DYNAMIC_STATE_STENCIL_OP_EXT = VK_DYNAMIC_STATE_STENCIL_OP, + VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT = VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE, + VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT = VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE, + VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT = VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE, + VK_DYNAMIC_STATE_LINE_STIPPLE_KHR = VK_DYNAMIC_STATE_LINE_STIPPLE, + VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF +} VkDynamicState; + +typedef enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, + VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF +} VkFrontFace; + +typedef enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, + VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF +} VkVertexInputRate; + +typedef enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, + VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF +} VkPrimitiveTopology; + +typedef enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, + VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, + VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF +} VkPolygonMode; + +typedef enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, + VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF +} VkStencilOp; + +typedef enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, + VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF +} VkLogicOp; + +typedef enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, + VK_ATTACHMENT_LOAD_OP_NONE = 1000400000, + VK_ATTACHMENT_LOAD_OP_NONE_EXT = VK_ATTACHMENT_LOAD_OP_NONE, + VK_ATTACHMENT_LOAD_OP_NONE_KHR = VK_ATTACHMENT_LOAD_OP_NONE, + VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentLoadOp; + +typedef enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, + VK_ATTACHMENT_STORE_OP_NONE = 1000301000, + VK_ATTACHMENT_STORE_OP_NONE_KHR = VK_ATTACHMENT_STORE_OP_NONE, + VK_ATTACHMENT_STORE_OP_NONE_QCOM = VK_ATTACHMENT_STORE_OP_NONE, + VK_ATTACHMENT_STORE_OP_NONE_EXT = VK_ATTACHMENT_STORE_OP_NONE, + VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentStoreOp; + +typedef enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, + VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_KHR = 1000451000, + VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_EXT = VK_SUBPASS_CONTENTS_INLINE_AND_SECONDARY_COMMAND_BUFFERS_KHR, + VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassContents; + +typedef enum VkAccessFlagBits { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, + VK_ACCESS_INDEX_READ_BIT = 0x00000002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, + VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, + VK_ACCESS_SHADER_READ_BIT = 0x00000020, + VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, + VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, + VK_ACCESS_HOST_READ_BIT = 0x00002000, + VK_ACCESS_HOST_WRITE_BIT = 0x00004000, + VK_ACCESS_MEMORY_READ_BIT = 0x00008000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, + VK_ACCESS_NONE = 0, + VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000, + VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000, + VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000, + VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000, + VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_EXT = 0x00020000, + VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_EXT = 0x00040000, + VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV = VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_EXT, + VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_EXT, + VK_ACCESS_NONE_KHR = VK_ACCESS_NONE, + VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAccessFlagBits; +typedef VkFlags VkAccessFlags; + +typedef enum VkImageAspectFlagBits { + VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, + VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, + VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, + VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, + VK_IMAGE_ASPECT_NONE = 0, + VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT = 0x00000080, + VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT = 0x00000100, + VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT = 0x00000200, + VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT = 0x00000400, + VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, + VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, + VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT, + VK_IMAGE_ASPECT_NONE_KHR = VK_IMAGE_ASPECT_NONE, + VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageAspectFlagBits; +typedef VkFlags VkImageAspectFlags; + +typedef enum VkFormatFeatureFlagBits { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000, + VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000, + VK_FORMAT_FEATURE_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000, + VK_FORMAT_FEATURE_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000, + VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000, + VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000, + VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000, + VK_FORMAT_FEATURE_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000, + VK_FORMAT_FEATURE_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFormatFeatureFlagBits; +typedef VkFlags VkFormatFeatureFlags; + +typedef enum VkImageCreateFlagBits { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, + VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100, + VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800, + VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200, + VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x00002000, + VK_IMAGE_CREATE_DESCRIPTOR_HEAP_CAPTURE_REPLAY_BIT_EXT = 0x00010000, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, + VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x00004000, + VK_IMAGE_CREATE_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_BIT_EXT = 0x00040000, + VK_IMAGE_CREATE_2D_VIEW_COMPATIBLE_BIT_EXT = 0x00020000, + VK_IMAGE_CREATE_VIDEO_PROFILE_INDEPENDENT_BIT_KHR = 0x00100000, + VK_IMAGE_CREATE_FRAGMENT_DENSITY_MAP_OFFSET_BIT_EXT = 0x00008000, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT, + VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT, + VK_IMAGE_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT = VK_IMAGE_CREATE_DESCRIPTOR_HEAP_CAPTURE_REPLAY_BIT_EXT, + VK_IMAGE_CREATE_FRAGMENT_DENSITY_MAP_OFFSET_BIT_QCOM = VK_IMAGE_CREATE_FRAGMENT_DENSITY_MAP_OFFSET_BIT_EXT, + VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageCreateFlagBits; +typedef VkFlags VkImageCreateFlags; + +typedef enum VkSampleCountFlagBits { + VK_SAMPLE_COUNT_1_BIT = 0x00000001, + VK_SAMPLE_COUNT_2_BIT = 0x00000002, + VK_SAMPLE_COUNT_4_BIT = 0x00000004, + VK_SAMPLE_COUNT_8_BIT = 0x00000008, + VK_SAMPLE_COUNT_16_BIT = 0x00000010, + VK_SAMPLE_COUNT_32_BIT = 0x00000020, + VK_SAMPLE_COUNT_64_BIT = 0x00000040, + VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSampleCountFlagBits; +typedef VkFlags VkSampleCountFlags; + +typedef enum VkImageUsageFlagBits { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, + VK_IMAGE_USAGE_HOST_TRANSFER_BIT = 0x00400000, + VK_IMAGE_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00000400, + VK_IMAGE_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00000800, + VK_IMAGE_USAGE_VIDEO_DECODE_DPB_BIT_KHR = 0x00001000, + VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200, + VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00000100, + VK_IMAGE_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00002000, + VK_IMAGE_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00004000, + VK_IMAGE_USAGE_VIDEO_ENCODE_DPB_BIT_KHR = 0x00008000, + VK_IMAGE_USAGE_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x00080000, + VK_IMAGE_USAGE_INVOCATION_MASK_BIT_HUAWEI = 0x00040000, + VK_IMAGE_USAGE_SAMPLE_WEIGHT_BIT_QCOM = 0x00100000, + VK_IMAGE_USAGE_SAMPLE_BLOCK_MATCH_BIT_QCOM = 0x00200000, + VK_IMAGE_USAGE_TENSOR_ALIASING_BIT_ARM = 0x00800000, + VK_IMAGE_USAGE_TILE_MEMORY_BIT_QCOM = 0x08000000, + VK_IMAGE_USAGE_VIDEO_ENCODE_QUANTIZATION_DELTA_MAP_BIT_KHR = 0x02000000, + VK_IMAGE_USAGE_VIDEO_ENCODE_EMPHASIS_MAP_BIT_KHR = 0x04000000, + VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, + VK_IMAGE_USAGE_HOST_TRANSFER_BIT_EXT = VK_IMAGE_USAGE_HOST_TRANSFER_BIT, + VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageUsageFlagBits; +typedef VkFlags VkImageUsageFlags; + +typedef enum VkInstanceCreateFlagBits { + VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR = 0x00000001, + VK_INSTANCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkInstanceCreateFlagBits; +typedef VkFlags VkInstanceCreateFlags; + +typedef enum VkMemoryHeapFlagBits { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002, + VK_MEMORY_HEAP_TILE_MEMORY_BIT_QCOM = 0x00000008, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryHeapFlagBits; +typedef VkFlags VkMemoryHeapFlags; + +typedef enum VkMemoryPropertyFlagBits { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, + VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020, + VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040, + VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080, + VK_MEMORY_PROPERTY_RDMA_CAPABLE_BIT_NV = 0x00000100, + VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryPropertyFlagBits; +typedef VkFlags VkMemoryPropertyFlags; + +typedef enum VkQueueFlagBits { + VK_QUEUE_GRAPHICS_BIT = 0x00000001, + VK_QUEUE_COMPUTE_BIT = 0x00000002, + VK_QUEUE_TRANSFER_BIT = 0x00000004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, + VK_QUEUE_PROTECTED_BIT = 0x00000010, + VK_QUEUE_VIDEO_DECODE_BIT_KHR = 0x00000020, + VK_QUEUE_VIDEO_ENCODE_BIT_KHR = 0x00000040, + VK_QUEUE_OPTICAL_FLOW_BIT_NV = 0x00000100, + VK_QUEUE_DATA_GRAPH_BIT_ARM = 0x00000400, + VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueueFlagBits; +typedef VkFlags VkQueueFlags; +typedef VkFlags VkDeviceCreateFlags; + +typedef enum VkDeviceQueueCreateFlagBits { + VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001, + VK_DEVICE_QUEUE_CREATE_INTERNALLY_SYNCHRONIZED_BIT_KHR = 0x00000004, + VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDeviceQueueCreateFlagBits; +typedef VkFlags VkDeviceQueueCreateFlags; + +typedef enum VkPipelineStageFlagBits { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, + VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, + VK_PIPELINE_STAGE_NONE = 0, + VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000, + VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000, + VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000, + VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT = 0x00080000, + VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT = 0x00100000, + VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_EXT = 0x00020000, + VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = VK_PIPELINE_STAGE_TASK_SHADER_BIT_EXT, + VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = VK_PIPELINE_STAGE_MESH_SHADER_BIT_EXT, + VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV = VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_EXT, + VK_PIPELINE_STAGE_NONE_KHR = VK_PIPELINE_STAGE_NONE, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineStageFlagBits; +typedef VkFlags VkPipelineStageFlags; + +typedef enum VkMemoryMapFlagBits { + VK_MEMORY_MAP_PLACED_BIT_EXT = 0x00000001, + VK_MEMORY_MAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryMapFlagBits; +typedef VkFlags VkMemoryMapFlags; + +typedef enum VkSparseMemoryBindFlagBits { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, + VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseMemoryBindFlagBits; +typedef VkFlags VkSparseMemoryBindFlags; + +typedef enum VkSparseImageFormatFlagBits { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, + VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseImageFormatFlagBits; +typedef VkFlags VkSparseImageFormatFlags; + +typedef enum VkFenceCreateFlagBits { + VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, + VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceCreateFlagBits; +typedef VkFlags VkFenceCreateFlags; +typedef VkFlags VkSemaphoreCreateFlags; + +typedef enum VkQueryPoolCreateFlagBits { + VK_QUERY_POOL_CREATE_RESET_BIT_KHR = 0x00000001, + VK_QUERY_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPoolCreateFlagBits; +typedef VkFlags VkQueryPoolCreateFlags; + +typedef enum VkQueryPipelineStatisticFlagBits { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, + VK_QUERY_PIPELINE_STATISTIC_TASK_SHADER_INVOCATIONS_BIT_EXT = 0x00000800, + VK_QUERY_PIPELINE_STATISTIC_MESH_SHADER_INVOCATIONS_BIT_EXT = 0x00001000, + VK_QUERY_PIPELINE_STATISTIC_CLUSTER_CULLING_SHADER_INVOCATIONS_BIT_HUAWEI = 0x00002000, + VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPipelineStatisticFlagBits; +typedef VkFlags VkQueryPipelineStatisticFlags; + +typedef enum VkQueryResultFlagBits { + VK_QUERY_RESULT_64_BIT = 0x00000001, + VK_QUERY_RESULT_WAIT_BIT = 0x00000002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, + VK_QUERY_RESULT_WITH_STATUS_BIT_KHR = 0x00000010, + VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryResultFlagBits; +typedef VkFlags VkQueryResultFlags; + +typedef enum VkBufferCreateFlagBits { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000010, + VK_BUFFER_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT = 0x00000020, + VK_BUFFER_CREATE_VIDEO_PROFILE_INDEPENDENT_BIT_KHR = 0x00000040, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferCreateFlagBits; +typedef VkFlags VkBufferCreateFlags; + +typedef enum VkBufferUsageFlagBits { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x00020000, + VK_BUFFER_USAGE_VIDEO_DECODE_SRC_BIT_KHR = 0x00002000, + VK_BUFFER_USAGE_VIDEO_DECODE_DST_BIT_KHR = 0x00004000, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000, + VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_BUFFER_USAGE_EXECUTION_GRAPH_SCRATCH_BIT_AMDX = 0x02000000, +#endif + VK_BUFFER_USAGE_DESCRIPTOR_HEAP_BIT_EXT = 0x10000000, + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000, + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000, + VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400, + VK_BUFFER_USAGE_VIDEO_ENCODE_DST_BIT_KHR = 0x00008000, + VK_BUFFER_USAGE_VIDEO_ENCODE_SRC_BIT_KHR = 0x00010000, + VK_BUFFER_USAGE_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT = 0x00200000, + VK_BUFFER_USAGE_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT = 0x00400000, + VK_BUFFER_USAGE_PUSH_DESCRIPTORS_DESCRIPTOR_BUFFER_BIT_EXT = 0x04000000, + VK_BUFFER_USAGE_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT = 0x00800000, + VK_BUFFER_USAGE_MICROMAP_STORAGE_BIT_EXT = 0x01000000, + VK_BUFFER_USAGE_TILE_MEMORY_BIT_QCOM = 0x08000000, + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferUsageFlagBits; +typedef VkFlags VkBufferUsageFlags; + +typedef enum VkImageViewCreateFlagBits { + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT = 0x00000001, + VK_IMAGE_VIEW_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT = 0x00000004, + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT = 0x00000002, + VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageViewCreateFlagBits; +typedef VkFlags VkImageViewCreateFlags; + +typedef enum VkDependencyFlagBits { + VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, + VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004, + VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002, + VK_DEPENDENCY_FEEDBACK_LOOP_BIT_EXT = 0x00000008, + VK_DEPENDENCY_QUEUE_FAMILY_OWNERSHIP_TRANSFER_USE_ALL_STAGES_BIT_KHR = 0x00000020, + VK_DEPENDENCY_ASYMMETRIC_EVENT_BIT_KHR = 0x00000040, + VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT, + VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDependencyFlagBits; +typedef VkFlags VkDependencyFlags; + +typedef enum VkCommandPoolCreateFlagBits { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, + VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004, + VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolCreateFlagBits; +typedef VkFlags VkCommandPoolCreateFlags; + +typedef enum VkCommandPoolResetFlagBits { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolResetFlagBits; +typedef VkFlags VkCommandPoolResetFlags; + +typedef enum VkCommandBufferUsageFlagBits { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, + VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferUsageFlagBits; +typedef VkFlags VkCommandBufferUsageFlags; + +typedef enum VkQueryControlFlagBits { + VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, + VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryControlFlagBits; +typedef VkFlags VkQueryControlFlags; + +typedef enum VkCommandBufferResetFlagBits { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferResetFlagBits; +typedef VkFlags VkCommandBufferResetFlags; + +typedef enum VkEventCreateFlagBits { + VK_EVENT_CREATE_DEVICE_ONLY_BIT = 0x00000001, + VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR = VK_EVENT_CREATE_DEVICE_ONLY_BIT, + VK_EVENT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkEventCreateFlagBits; +typedef VkFlags VkEventCreateFlags; +typedef VkFlags VkBufferViewCreateFlags; +typedef VkFlags VkShaderModuleCreateFlags; + +typedef enum VkPipelineCacheCreateFlagBits { + VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + VK_PIPELINE_CACHE_CREATE_INTERNALLY_SYNCHRONIZED_MERGE_BIT_KHR = 0x00000008, + VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT = VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT, + VK_PIPELINE_CACHE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheCreateFlagBits; +typedef VkFlags VkPipelineCacheCreateFlags; + +typedef enum VkPipelineCreateFlagBits { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, + VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, + VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT = 0x00000100, + VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT = 0x00000200, + VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT = 0x08000000, + VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT = 0x40000000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000, + VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000, + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, + VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = 0x00400000, + VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00200000, + VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040, + VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080, + VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00040000, + VK_PIPELINE_CREATE_LIBRARY_BIT_KHR = 0x00000800, + VK_PIPELINE_CREATE_DESCRIPTOR_BUFFER_BIT_EXT = 0x20000000, + VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT = 0x00800000, + VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT = 0x00000400, + VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV = 0x00100000, + VK_PIPELINE_CREATE_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x02000000, + VK_PIPELINE_CREATE_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x04000000, + VK_PIPELINE_CREATE_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT = 0x01000000, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_PIPELINE_CREATE_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV = 0x10000000, +#endif + // VK_PIPELINE_CREATE_DISPATCH_BASE is a legacy alias + VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + VK_PIPELINE_CREATE_DISPATCH_BASE_BIT_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + // VK_PIPELINE_CREATE_DISPATCH_BASE_KHR is a legacy alias + VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + // VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT is a legacy alias + VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT, + // VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR is a legacy alias + VK_PIPELINE_RASTERIZATION_STATE_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_CREATE_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, + VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT, + VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT, + VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT_EXT = VK_PIPELINE_CREATE_NO_PROTECTED_ACCESS_BIT, + VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT_EXT = VK_PIPELINE_CREATE_PROTECTED_ACCESS_ONLY_BIT, + VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreateFlagBits; +typedef VkFlags VkPipelineCreateFlags; + +typedef enum VkPipelineShaderStageCreateFlagBits { + VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT = 0x00000001, + VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT = 0x00000002, + VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT, + VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT, + VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineShaderStageCreateFlagBits; +typedef VkFlags VkPipelineShaderStageCreateFlags; + +typedef enum VkShaderStageFlagBits { + VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, + VK_SHADER_STAGE_ALL = 0x7FFFFFFF, + VK_SHADER_STAGE_RAYGEN_BIT_KHR = 0x00000100, + VK_SHADER_STAGE_ANY_HIT_BIT_KHR = 0x00000200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR = 0x00000400, + VK_SHADER_STAGE_MISS_BIT_KHR = 0x00000800, + VK_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x00001000, + VK_SHADER_STAGE_CALLABLE_BIT_KHR = 0x00002000, + VK_SHADER_STAGE_TASK_BIT_EXT = 0x00000040, + VK_SHADER_STAGE_MESH_BIT_EXT = 0x00000080, + VK_SHADER_STAGE_SUBPASS_SHADING_BIT_HUAWEI = 0x00004000, + VK_SHADER_STAGE_CLUSTER_CULLING_BIT_HUAWEI = 0x00080000, + VK_SHADER_STAGE_RAYGEN_BIT_NV = VK_SHADER_STAGE_RAYGEN_BIT_KHR, + VK_SHADER_STAGE_ANY_HIT_BIT_NV = VK_SHADER_STAGE_ANY_HIT_BIT_KHR, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, + VK_SHADER_STAGE_MISS_BIT_NV = VK_SHADER_STAGE_MISS_BIT_KHR, + VK_SHADER_STAGE_INTERSECTION_BIT_NV = VK_SHADER_STAGE_INTERSECTION_BIT_KHR, + VK_SHADER_STAGE_CALLABLE_BIT_NV = VK_SHADER_STAGE_CALLABLE_BIT_KHR, + VK_SHADER_STAGE_TASK_BIT_NV = VK_SHADER_STAGE_TASK_BIT_EXT, + VK_SHADER_STAGE_MESH_BIT_NV = VK_SHADER_STAGE_MESH_BIT_EXT, + VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderStageFlagBits; + +typedef enum VkPipelineLayoutCreateFlagBits { + VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT = 0x00000002, + VK_PIPELINE_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineLayoutCreateFlagBits; +typedef VkFlags VkPipelineLayoutCreateFlags; +typedef VkFlags VkShaderStageFlags; + +typedef enum VkSamplerCreateFlagBits { + VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x00000001, + VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x00000002, + VK_SAMPLER_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT = 0x00000008, + VK_SAMPLER_CREATE_NON_SEAMLESS_CUBE_MAP_BIT_EXT = 0x00000004, + VK_SAMPLER_CREATE_IMAGE_PROCESSING_BIT_QCOM = 0x00000010, + VK_SAMPLER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSamplerCreateFlagBits; +typedef VkFlags VkSamplerCreateFlags; + +typedef enum VkDescriptorPoolCreateFlagBits { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT = 0x00000002, + VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT = 0x00000004, + VK_DESCRIPTOR_POOL_CREATE_ALLOW_OVERALLOCATION_SETS_BIT_NV = 0x00000008, + VK_DESCRIPTOR_POOL_CREATE_ALLOW_OVERALLOCATION_POOLS_BIT_NV = 0x00000010, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE = VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT, + VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorPoolCreateFlagBits; +typedef VkFlags VkDescriptorPoolCreateFlags; +typedef VkFlags VkDescriptorPoolResetFlags; + +typedef enum VkDescriptorSetLayoutCreateFlagBits { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_DESCRIPTOR_BUFFER_BIT_EXT = 0x00000010, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_EMBEDDED_IMMUTABLE_SAMPLERS_BIT_EXT = 0x00000020, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00000080, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT = 0x00000004, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PER_STAGE_BIT_NV = 0x00000040, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE = VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorSetLayoutCreateFlagBits; +typedef VkFlags VkDescriptorSetLayoutCreateFlags; + +typedef enum VkColorComponentFlagBits { + VK_COLOR_COMPONENT_R_BIT = 0x00000001, + VK_COLOR_COMPONENT_G_BIT = 0x00000002, + VK_COLOR_COMPONENT_B_BIT = 0x00000004, + VK_COLOR_COMPONENT_A_BIT = 0x00000008, + VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkColorComponentFlagBits; +typedef VkFlags VkColorComponentFlags; + +typedef enum VkCullModeFlagBits { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x00000001, + VK_CULL_MODE_BACK_BIT = 0x00000002, + VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, + VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCullModeFlagBits; +typedef VkFlags VkCullModeFlags; +typedef VkFlags VkPipelineVertexInputStateCreateFlags; +typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; +typedef VkFlags VkPipelineTessellationStateCreateFlags; +typedef VkFlags VkPipelineViewportStateCreateFlags; +typedef VkFlags VkPipelineRasterizationStateCreateFlags; +typedef VkFlags VkPipelineMultisampleStateCreateFlags; + +typedef enum VkPipelineDepthStencilStateCreateFlagBits { + VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT = 0x00000001, + VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT = 0x00000002, + VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT, + VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT, + VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineDepthStencilStateCreateFlagBits; +typedef VkFlags VkPipelineDepthStencilStateCreateFlags; + +typedef enum VkPipelineColorBlendStateCreateFlagBits { + VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_EXT = 0x00000001, + VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_ARM = VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_EXT, + VK_PIPELINE_COLOR_BLEND_STATE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineColorBlendStateCreateFlagBits; +typedef VkFlags VkPipelineColorBlendStateCreateFlags; +typedef VkFlags VkPipelineDynamicStateCreateFlags; + +typedef enum VkAttachmentDescriptionFlagBits { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, + VK_ATTACHMENT_DESCRIPTION_RESOLVE_SKIP_TRANSFER_FUNCTION_BIT_KHR = 0x00000002, + VK_ATTACHMENT_DESCRIPTION_RESOLVE_ENABLE_TRANSFER_FUNCTION_BIT_KHR = 0x00000004, + VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentDescriptionFlagBits; +typedef VkFlags VkAttachmentDescriptionFlags; + +typedef enum VkFramebufferCreateFlagBits { + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT = 0x00000001, + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, + VK_FRAMEBUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFramebufferCreateFlagBits; +typedef VkFlags VkFramebufferCreateFlags; + +typedef enum VkRenderPassCreateFlagBits { + VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM = 0x00000002, + VK_RENDER_PASS_CREATE_PER_LAYER_FRAGMENT_DENSITY_BIT_VALVE = 0x00000004, + VK_RENDER_PASS_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkRenderPassCreateFlagBits; +typedef VkFlags VkRenderPassCreateFlags; + +typedef enum VkSubpassDescriptionFlagBits { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, + VK_SUBPASS_DESCRIPTION_TILE_SHADING_APRON_BIT_QCOM = 0x00000100, + VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_EXT = 0x00000010, + VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT = 0x00000020, + VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT = 0x00000040, + VK_SUBPASS_DESCRIPTION_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x00000080, + VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_EXT = 0x00000004, + VK_SUBPASS_DESCRIPTION_CUSTOM_RESOLVE_BIT_EXT = 0x00000008, + VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM = VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_EXT, + VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM = VK_SUBPASS_DESCRIPTION_CUSTOM_RESOLVE_BIT_EXT, + VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_ARM = VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_EXT, + VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM = VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_EXT, + VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM = VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_EXT, + VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassDescriptionFlagBits; +typedef VkFlags VkSubpassDescriptionFlags; + +typedef enum VkStencilFaceFlagBits { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003, + // VK_STENCIL_FRONT_AND_BACK is a legacy alias + VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK, + VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkStencilFaceFlagBits; +typedef VkFlags VkStencilFaceFlags; +typedef struct VkExtent2D { + uint32_t width; + uint32_t height; +} VkExtent2D; + +typedef struct VkExtent3D { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkExtent3D; + +typedef struct VkOffset2D { + int32_t x; + int32_t y; +} VkOffset2D; + +typedef struct VkOffset3D { + int32_t x; + int32_t y; + int32_t z; +} VkOffset3D; + +typedef struct VkRect2D { + VkOffset2D offset; + VkExtent2D extent; +} VkRect2D; + +typedef struct VkBaseInStructure { + VkStructureType sType; + const struct VkBaseInStructure* pNext; +} VkBaseInStructure; + +typedef struct VkBaseOutStructure { + VkStructureType sType; + struct VkBaseOutStructure* pNext; +} VkBaseOutStructure; + +typedef struct VkBufferMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier; + +typedef struct VkImageSubresourceRange { + VkImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceRange; + +typedef struct VkImageMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier; + +typedef struct VkMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; +} VkMemoryBarrier; + +typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + +typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); +typedef struct VkAllocationCallbacks { + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; +} VkAllocationCallbacks; + +typedef struct VkApplicationInfo { + VkStructureType sType; + const void* pNext; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; +} VkApplicationInfo; + +typedef struct VkFormatProperties { + VkFormatFeatureFlags linearTilingFeatures; + VkFormatFeatureFlags optimalTilingFeatures; + VkFormatFeatureFlags bufferFeatures; +} VkFormatProperties; + +typedef struct VkImageFormatProperties { + VkExtent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + VkSampleCountFlags sampleCounts; + VkDeviceSize maxResourceSize; +} VkImageFormatProperties; + +typedef struct VkInstanceCreateInfo { + VkStructureType sType; + const void* pNext; + VkInstanceCreateFlags flags; + const VkApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; +} VkInstanceCreateInfo; + +typedef struct VkMemoryHeap { + VkDeviceSize size; + VkMemoryHeapFlags flags; +} VkMemoryHeap; + +typedef struct VkMemoryType { + VkMemoryPropertyFlags propertyFlags; + uint32_t heapIndex; +} VkMemoryType; + +typedef struct VkPhysicalDeviceFeatures { + VkBool32 robustBufferAccess; + VkBool32 fullDrawIndexUint32; + VkBool32 imageCubeArray; + VkBool32 independentBlend; + VkBool32 geometryShader; + VkBool32 tessellationShader; + VkBool32 sampleRateShading; + VkBool32 dualSrcBlend; + VkBool32 logicOp; + VkBool32 multiDrawIndirect; + VkBool32 drawIndirectFirstInstance; + VkBool32 depthClamp; + VkBool32 depthBiasClamp; + VkBool32 fillModeNonSolid; + VkBool32 depthBounds; + VkBool32 wideLines; + VkBool32 largePoints; + VkBool32 alphaToOne; + VkBool32 multiViewport; + VkBool32 samplerAnisotropy; + VkBool32 textureCompressionETC2; + VkBool32 textureCompressionASTC_LDR; + VkBool32 textureCompressionBC; + VkBool32 occlusionQueryPrecise; + VkBool32 pipelineStatisticsQuery; + VkBool32 vertexPipelineStoresAndAtomics; + VkBool32 fragmentStoresAndAtomics; + VkBool32 shaderTessellationAndGeometryPointSize; + VkBool32 shaderImageGatherExtended; + VkBool32 shaderStorageImageExtendedFormats; + VkBool32 shaderStorageImageMultisample; + VkBool32 shaderStorageImageReadWithoutFormat; + VkBool32 shaderStorageImageWriteWithoutFormat; + VkBool32 shaderUniformBufferArrayDynamicIndexing; + VkBool32 shaderSampledImageArrayDynamicIndexing; + VkBool32 shaderStorageBufferArrayDynamicIndexing; + VkBool32 shaderStorageImageArrayDynamicIndexing; + VkBool32 shaderClipDistance; + VkBool32 shaderCullDistance; + VkBool32 shaderFloat64; + VkBool32 shaderInt64; + VkBool32 shaderInt16; + VkBool32 shaderResourceResidency; + VkBool32 shaderResourceMinLod; + VkBool32 sparseBinding; + VkBool32 sparseResidencyBuffer; + VkBool32 sparseResidencyImage2D; + VkBool32 sparseResidencyImage3D; + VkBool32 sparseResidency2Samples; + VkBool32 sparseResidency4Samples; + VkBool32 sparseResidency8Samples; + VkBool32 sparseResidency16Samples; + VkBool32 sparseResidencyAliased; + VkBool32 variableMultisampleRate; + VkBool32 inheritedQueries; +} VkPhysicalDeviceFeatures; + +typedef struct VkPhysicalDeviceLimits { + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + VkDeviceSize bufferImageGranularity; + VkDeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + VkDeviceSize minTexelBufferOffsetAlignment; + VkDeviceSize minUniformBufferOffsetAlignment; + VkDeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + VkSampleCountFlags framebufferColorSampleCounts; + VkSampleCountFlags framebufferDepthSampleCounts; + VkSampleCountFlags framebufferStencilSampleCounts; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + VkSampleCountFlags sampledImageColorSampleCounts; + VkSampleCountFlags sampledImageIntegerSampleCounts; + VkSampleCountFlags sampledImageDepthSampleCounts; + VkSampleCountFlags sampledImageStencilSampleCounts; + VkSampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + VkBool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + VkBool32 strictLines; + VkBool32 standardSampleLocations; + VkDeviceSize optimalBufferCopyOffsetAlignment; + VkDeviceSize optimalBufferCopyRowPitchAlignment; + VkDeviceSize nonCoherentAtomSize; +} VkPhysicalDeviceLimits; + +typedef struct VkPhysicalDeviceMemoryProperties { + uint32_t memoryTypeCount; + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryProperties; + +typedef struct VkPhysicalDeviceSparseProperties { + VkBool32 residencyStandard2DBlockShape; + VkBool32 residencyStandard2DMultisampleBlockShape; + VkBool32 residencyStandard3DBlockShape; + VkBool32 residencyAlignedMipSize; + VkBool32 residencyNonResidentStrict; +} VkPhysicalDeviceSparseProperties; + +typedef struct VkPhysicalDeviceProperties { + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +} VkPhysicalDeviceProperties; + +typedef struct VkQueueFamilyProperties { + VkQueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + VkExtent3D minImageTransferGranularity; +} VkQueueFamilyProperties; + +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + // enabledLayerCount is legacy and should not be used + uint32_t enabledLayerCount; + // ppEnabledLayerNames is legacy and should not be used + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +typedef struct VkExtensionProperties { + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; +} VkExtensionProperties; + +typedef struct VkLayerProperties { + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkLayerProperties; + +typedef struct VkSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + const VkPipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const VkCommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkSubmitInfo; + +typedef struct VkMappedMemoryRange { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMappedMemoryRange; + +typedef struct VkMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeIndex; +} VkMemoryAllocateInfo; + +typedef struct VkMemoryRequirements { + VkDeviceSize size; + VkDeviceSize alignment; + uint32_t memoryTypeBits; +} VkMemoryRequirements; + +typedef struct VkSparseMemoryBind { + VkDeviceSize resourceOffset; + VkDeviceSize size; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseMemoryBind; + +typedef struct VkSparseBufferMemoryBindInfo { + VkBuffer buffer; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseBufferMemoryBindInfo; + +typedef struct VkSparseImageOpaqueMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseImageOpaqueMemoryBindInfo; + +typedef struct VkImageSubresource { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; +} VkImageSubresource; + +typedef struct VkSparseImageMemoryBind { + VkImageSubresource subresource; + VkOffset3D offset; + VkExtent3D extent; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseImageMemoryBind; + +typedef struct VkSparseImageMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseImageMemoryBind* pBinds; +} VkSparseImageMemoryBindInfo; + +typedef struct VkBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const VkSparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const VkSparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkBindSparseInfo; + +typedef struct VkSparseImageFormatProperties { + VkImageAspectFlags aspectMask; + VkExtent3D imageGranularity; + VkSparseImageFormatFlags flags; +} VkSparseImageFormatProperties; + +typedef struct VkSparseImageMemoryRequirements { + VkSparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + VkDeviceSize imageMipTailSize; + VkDeviceSize imageMipTailOffset; + VkDeviceSize imageMipTailStride; +} VkSparseImageMemoryRequirements; + +typedef struct VkFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFenceCreateFlags flags; +} VkFenceCreateInfo; + +typedef struct VkSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreCreateFlags flags; +} VkSemaphoreCreateInfo; + +typedef struct VkQueryPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueryPoolCreateFlags flags; + VkQueryType queryType; + uint32_t queryCount; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkQueryPoolCreateInfo; + +typedef struct VkBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkDeviceSize size; + VkBufferUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkBufferCreateInfo; + +typedef struct VkImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageType imageType; + VkFormat format; + VkExtent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + VkSampleCountFlagBits samples; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkImageLayout initialLayout; +} VkImageCreateInfo; + +typedef struct VkSubresourceLayout { + VkDeviceSize offset; + VkDeviceSize size; + VkDeviceSize rowPitch; + VkDeviceSize arrayPitch; + VkDeviceSize depthPitch; +} VkSubresourceLayout; + +typedef struct VkComponentMapping { + VkComponentSwizzle r; + VkComponentSwizzle g; + VkComponentSwizzle b; + VkComponentSwizzle a; +} VkComponentMapping; + +typedef struct VkImageViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageViewCreateFlags flags; + VkImage image; + VkImageViewType viewType; + VkFormat format; + VkComponentMapping components; + VkImageSubresourceRange subresourceRange; +} VkImageViewCreateInfo; + +typedef struct VkCommandPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; +} VkCommandPoolCreateInfo; + +typedef struct VkCommandBufferAllocateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPool commandPool; + VkCommandBufferLevel level; + uint32_t commandBufferCount; +} VkCommandBufferAllocateInfo; + +typedef struct VkCommandBufferInheritanceInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + uint32_t subpass; + VkFramebuffer framebuffer; + VkBool32 occlusionQueryEnable; + VkQueryControlFlags queryFlags; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkCommandBufferInheritanceInfo; + +typedef struct VkCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + VkCommandBufferUsageFlags flags; + const VkCommandBufferInheritanceInfo* pInheritanceInfo; +} VkCommandBufferBeginInfo; + +typedef struct VkBufferCopy { + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy; + +typedef struct VkImageSubresourceLayers { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceLayers; + +typedef struct VkBufferImageCopy { + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy; + +typedef struct VkImageCopy { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy; + +typedef struct VkDispatchIndirectCommand { + uint32_t x; + uint32_t y; + uint32_t z; +} VkDispatchIndirectCommand; + +typedef struct VkPipelineCacheHeaderVersionOne { + uint32_t headerSize; + VkPipelineCacheHeaderVersion headerVersion; + uint32_t vendorID; + uint32_t deviceID; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; +} VkPipelineCacheHeaderVersionOne; + +typedef struct VkEventCreateInfo { + VkStructureType sType; + const void* pNext; + VkEventCreateFlags flags; +} VkEventCreateInfo; + +typedef struct VkBufferViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferViewCreateFlags flags; + VkBuffer buffer; + VkFormat format; + VkDeviceSize offset; + VkDeviceSize range; +} VkBufferViewCreateInfo; + +typedef struct VkShaderModuleCreateInfo { + VkStructureType sType; + const void* pNext; + VkShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; +} VkShaderModuleCreateInfo; + +typedef struct VkPipelineCacheCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; +} VkPipelineCacheCreateInfo; + +typedef struct VkSpecializationMapEntry { + uint32_t constantID; + uint32_t offset; + size_t size; +} VkSpecializationMapEntry; + +typedef struct VkSpecializationInfo { + uint32_t mapEntryCount; + const VkSpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; +} VkSpecializationInfo; + +typedef struct VkPipelineShaderStageCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineShaderStageCreateFlags flags; + VkShaderStageFlagBits stage; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; +} VkPipelineShaderStageCreateInfo; + +typedef struct VkComputePipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + VkPipelineShaderStageCreateInfo stage; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkComputePipelineCreateInfo; + +typedef struct VkPushConstantRange { + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; +} VkPushConstantRange; + +typedef struct VkPipelineLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkPipelineLayoutCreateInfo; + +typedef struct VkSamplerCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerCreateFlags flags; + VkFilter magFilter; + VkFilter minFilter; + VkSamplerMipmapMode mipmapMode; + VkSamplerAddressMode addressModeU; + VkSamplerAddressMode addressModeV; + VkSamplerAddressMode addressModeW; + float mipLodBias; + VkBool32 anisotropyEnable; + float maxAnisotropy; + VkBool32 compareEnable; + VkCompareOp compareOp; + float minLod; + float maxLod; + VkBorderColor borderColor; + VkBool32 unnormalizedCoordinates; +} VkSamplerCreateInfo; + +typedef struct VkCopyDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; +} VkCopyDescriptorSet; + +typedef struct VkDescriptorBufferInfo { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize range; +} VkDescriptorBufferInfo; + +typedef struct VkDescriptorImageInfo { + VkSampler sampler; + VkImageView imageView; + VkImageLayout imageLayout; +} VkDescriptorImageInfo; + +typedef struct VkDescriptorPoolSize { + VkDescriptorType type; + uint32_t descriptorCount; +} VkDescriptorPoolSize; + +typedef struct VkDescriptorPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const VkDescriptorPoolSize* pPoolSizes; +} VkDescriptorPoolCreateInfo; + +typedef struct VkDescriptorSetAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkDescriptorSetAllocateInfo; + +typedef struct VkDescriptorSetLayoutBinding { + uint32_t binding; + VkDescriptorType descriptorType; + uint32_t descriptorCount; + VkShaderStageFlags stageFlags; + const VkSampler* pImmutableSamplers; +} VkDescriptorSetLayoutBinding; + +typedef struct VkDescriptorSetLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const VkDescriptorSetLayoutBinding* pBindings; +} VkDescriptorSetLayoutCreateInfo; + +typedef struct VkWriteDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + const VkDescriptorImageInfo* pImageInfo; + const VkDescriptorBufferInfo* pBufferInfo; + const VkBufferView* pTexelBufferView; +} VkWriteDescriptorSet; + +typedef union VkClearColorValue { + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; +} VkClearColorValue; + +typedef struct VkDrawIndexedIndirectCommand { + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; +} VkDrawIndexedIndirectCommand; + +typedef struct VkDrawIndirectCommand { + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; +} VkDrawIndirectCommand; + +typedef struct VkVertexInputBindingDescription { + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; +} VkVertexInputBindingDescription; + +typedef struct VkVertexInputAttributeDescription { + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription; + +typedef struct VkPipelineVertexInputStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VkVertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; +} VkPipelineVertexInputStateCreateInfo; + +typedef struct VkPipelineInputAssemblyStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineInputAssemblyStateCreateFlags flags; + VkPrimitiveTopology topology; + VkBool32 primitiveRestartEnable; +} VkPipelineInputAssemblyStateCreateInfo; + +typedef struct VkPipelineTessellationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; +} VkPipelineTessellationStateCreateInfo; + +typedef struct VkViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} VkViewport; + +typedef struct VkPipelineViewportStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const VkViewport* pViewports; + uint32_t scissorCount; + const VkRect2D* pScissors; +} VkPipelineViewportStateCreateInfo; + +typedef struct VkPipelineRasterizationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateCreateFlags flags; + VkBool32 depthClampEnable; + VkBool32 rasterizerDiscardEnable; + VkPolygonMode polygonMode; + VkCullModeFlags cullMode; + VkFrontFace frontFace; + VkBool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; +} VkPipelineRasterizationStateCreateInfo; + +typedef struct VkPipelineMultisampleStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineMultisampleStateCreateFlags flags; + VkSampleCountFlagBits rasterizationSamples; + VkBool32 sampleShadingEnable; + float minSampleShading; + const VkSampleMask* pSampleMask; + VkBool32 alphaToCoverageEnable; + VkBool32 alphaToOneEnable; +} VkPipelineMultisampleStateCreateInfo; + +typedef struct VkStencilOpState { + VkStencilOp failOp; + VkStencilOp passOp; + VkStencilOp depthFailOp; + VkCompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; +} VkStencilOpState; + +typedef struct VkPipelineDepthStencilStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDepthStencilStateCreateFlags flags; + VkBool32 depthTestEnable; + VkBool32 depthWriteEnable; + VkCompareOp depthCompareOp; + VkBool32 depthBoundsTestEnable; + VkBool32 stencilTestEnable; + VkStencilOpState front; + VkStencilOpState back; + float minDepthBounds; + float maxDepthBounds; +} VkPipelineDepthStencilStateCreateInfo; + +typedef struct VkPipelineColorBlendAttachmentState { + VkBool32 blendEnable; + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; + VkColorComponentFlags colorWriteMask; +} VkPipelineColorBlendAttachmentState; + +typedef struct VkPipelineColorBlendStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineColorBlendStateCreateFlags flags; + VkBool32 logicOpEnable; + VkLogicOp logicOp; + uint32_t attachmentCount; + const VkPipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; +} VkPipelineColorBlendStateCreateInfo; + +typedef struct VkPipelineDynamicStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const VkDynamicState* pDynamicStates; +} VkPipelineDynamicStateCreateInfo; + +typedef struct VkGraphicsPipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; + const VkPipelineViewportStateCreateInfo* pViewportState; + const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkRenderPass renderPass; + uint32_t subpass; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkGraphicsPipelineCreateInfo; + +typedef struct VkAttachmentDescription { + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription; + +typedef struct VkAttachmentReference { + uint32_t attachment; + VkImageLayout layout; +} VkAttachmentReference; + +typedef struct VkFramebufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkFramebufferCreateFlags flags; + VkRenderPass renderPass; + uint32_t attachmentCount; + const VkImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; +} VkFramebufferCreateInfo; + +typedef struct VkSubpassDescription { + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const VkAttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference* pColorAttachments; + const VkAttachmentReference* pResolveAttachments; + const VkAttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription; + +typedef struct VkSubpassDependency { + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; +} VkSubpassDependency; + +typedef struct VkRenderPassCreateInfo { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency* pDependencies; +} VkRenderPassCreateInfo; + +typedef struct VkClearDepthStencilValue { + float depth; + uint32_t stencil; +} VkClearDepthStencilValue; + +typedef union VkClearValue { + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +} VkClearValue; + +typedef struct VkClearAttachment { + VkImageAspectFlags aspectMask; + uint32_t colorAttachment; + VkClearValue clearValue; +} VkClearAttachment; + +typedef struct VkClearRect { + VkRect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkClearRect; + +typedef struct VkImageBlit { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit; + +typedef struct VkImageResolve { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve; + +typedef struct VkRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + VkFramebuffer framebuffer; + VkRect2D renderArea; + uint32_t clearValueCount; + const VkClearValue* pClearValues; +} VkRenderPassBeginInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); +typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); +typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); +typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); +typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); +typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); +typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); +typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); +typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); +typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); +typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); +typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); +typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); +typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); +typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); +typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); +typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); +typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); +typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); + +VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( + VkInstance instance, + const char* pName); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( + VkDevice device, + const char* pName); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( + VkDevice device, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( + VkDevice device, + uint32_t queueFamilyIndex, + uint32_t queueIndex, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo* pSubmits, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( + VkQueue queue); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( + VkDevice device, + const VkMemoryAllocateInfo* pAllocateInfo, + const VkAllocationCallbacks* pAllocator, + VkDeviceMemory* pMemory); + +VKAPI_ATTR void VKAPI_CALL vkFreeMemory( + VkDevice device, + VkDeviceMemory memory, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize offset, + VkDeviceSize size, + VkMemoryMapFlags flags, + void** ppData); + +VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( + VkDevice device, + VkDeviceMemory memory); + +VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize* pCommittedMemoryInBytes); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( + VkDevice device, + VkBuffer buffer, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( + VkDevice device, + VkImage image, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( + VkDevice device, + VkBuffer buffer, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( + VkDevice device, + VkImage image, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( + VkDevice device, + VkImage image, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( + VkQueue queue, + uint32_t bindInfoCount, + const VkBindSparseInfo* pBindInfo, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( + VkDevice device, + const VkFenceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFence( + VkDevice device, + VkFence fence, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( + VkDevice device, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences, + VkBool32 waitAll, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( + VkDevice device, + const VkSemaphoreCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSemaphore* pSemaphore); + +VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( + VkDevice device, + VkSemaphore semaphore, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( + VkDevice device, + const VkQueryPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkQueryPool* pQueryPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( + VkDevice device, + VkQueryPool queryPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + void* pData, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( + VkDevice device, + const VkBufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBuffer* pBuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( + VkDevice device, + VkBuffer buffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( + VkDevice device, + const VkImageCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImage* pImage); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImage( + VkDevice device, + VkImage image, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( + VkDevice device, + VkImage image, + const VkImageSubresource* pSubresource, + VkSubresourceLayout* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( + VkDevice device, + const VkImageViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImageView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( + VkDevice device, + VkImageView imageView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( + VkDevice device, + const VkCommandPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCommandPool* pCommandPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( + VkDevice device, + VkCommandPool commandPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( + VkDevice device, + const VkCommandBufferAllocateInfo* pAllocateInfo, + VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( + VkDevice device, + VkCommandPool commandPool, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( + VkCommandBuffer commandBuffer, + VkCommandBufferResetFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize dataSize, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize size, + uint32_t data); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( + VkDevice device, + const VkEventCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkEvent* pEvent); + +VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( + VkDevice device, + VkEvent event, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( + VkDevice device, + const VkBufferViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBufferView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( + VkDevice device, + VkBufferView bufferView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkShaderModule* pShaderModule); + +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( + VkDevice device, + VkShaderModule shaderModule, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( + VkDevice device, + const VkPipelineCacheCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineCache* pPipelineCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( + VkDevice device, + VkPipelineCache pipelineCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( + VkDevice device, + VkPipelineCache pipelineCache, + size_t* pDataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( + VkDevice device, + VkPipelineCache dstCache, + uint32_t srcCacheCount, + const VkPipelineCache* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( + VkDevice device, + VkPipeline pipeline, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( + VkDevice device, + const VkPipelineLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineLayout* pPipelineLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( + VkDevice device, + VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( + VkDevice device, + const VkSamplerCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSampler* pSampler); + +VKAPI_ATTR void VKAPI_CALL vkDestroySampler( + VkDevice device, + VkSampler sampler, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorSetLayout* pSetLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( + VkDevice device, + VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( + VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( + VkDevice device, + VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( + VkDevice device, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet* pDescriptorCopies); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets, + uint32_t dynamicOffsetCount, + const uint32_t* pDynamicOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearColorValue* pColor, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( + VkCommandBuffer commandBuffer, + VkPipelineLayout layout, + VkShaderStageFlags stageFlags, + uint32_t offset, + uint32_t size, + const void* pValues); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( + VkDevice device, + const VkFramebufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFramebuffer* pFramebuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( + VkDevice device, + VkFramebuffer framebuffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( + VkDevice device, + const VkRenderPassCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( + VkDevice device, + VkRenderPass renderPass, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( + VkDevice device, + VkRenderPass renderPass, + VkExtent2D* pGranularity); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( + VkCommandBuffer commandBuffer, + uint32_t firstScissor, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( + VkCommandBuffer commandBuffer, + float lineWidth); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( + VkCommandBuffer commandBuffer, + float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( + VkCommandBuffer commandBuffer, + const float blendConstants[4]); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( + VkCommandBuffer commandBuffer, + float minDepthBounds, + float maxDepthBounds); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t compareMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t writeMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t reference); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdDraw( + VkCommandBuffer commandBuffer, + uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( + VkCommandBuffer commandBuffer, + uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageBlit* pRegions, + VkFilter filter); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue* pDepthStencil, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkClearAttachment* pAttachments, + uint32_t rectCount, + const VkClearRect* pRects); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageResolve* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( + VkCommandBuffer commandBuffer, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( + VkCommandBuffer commandBuffer); +#endif + + +// VK_VERSION_1_1 is a preprocessor guard. Do not pass it to API calls. +#define VK_VERSION_1_1 1 +// Vulkan 1.1 version number +#define VK_API_VERSION_1_1 VK_MAKE_API_VERSION(0, 1, 1, 0)// Patch version should always be set to 0 + +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) +#define VK_MAX_DEVICE_GROUP_SIZE 32U +#define VK_LUID_SIZE 8U +#define VK_QUEUE_FAMILY_EXTERNAL (~1U) + +typedef enum VkDescriptorUpdateTemplateType { + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorUpdateTemplateType; + +typedef enum VkSamplerYcbcrModelConversion { + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrModelConversion; + +typedef enum VkSamplerYcbcrRange { + VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1, + VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrRange; + +typedef enum VkChromaLocation { + VK_CHROMA_LOCATION_COSITED_EVEN = 0, + VK_CHROMA_LOCATION_MIDPOINT = 1, + VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF +} VkChromaLocation; + +typedef enum VkPointClippingBehavior { + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1, + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPointClippingBehavior; + +typedef enum VkTessellationDomainOrigin { + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1, + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF +} VkTessellationDomainOrigin; + +typedef enum VkPeerMemoryFeatureFlagBits { + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008, + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, + VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPeerMemoryFeatureFlagBits; +typedef VkFlags VkPeerMemoryFeatureFlags; + +typedef enum VkMemoryAllocateFlagBits { + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT = 0x00000002, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000004, + VK_MEMORY_ALLOCATE_ZERO_INITIALIZE_BIT_EXT = 0x00000008, + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryAllocateFlagBits; +typedef VkFlags VkMemoryAllocateFlags; +typedef VkFlags VkCommandPoolTrimFlags; + +typedef enum VkExternalMemoryHandleTypeFlagBits { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA = 0x00000800, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_RDMA_ADDRESS_BIT_NV = 0x00001000, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OH_NATIVE_BUFFER_BIT_OHOS = 0x00008000, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_SCREEN_BUFFER_BIT_QNX = 0x00004000, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_MTLBUFFER_BIT_EXT = 0x00010000, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_MTLTEXTURE_BIT_EXT = 0x00020000, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_MTLHEAP_BIT_EXT = 0x00040000, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBits; +typedef VkFlags VkExternalMemoryHandleTypeFlags; + +typedef enum VkExternalMemoryFeatureFlagBits { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBits; +typedef VkFlags VkExternalMemoryFeatureFlags; + +typedef enum VkExternalFenceHandleTypeFlagBits { + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceHandleTypeFlagBits; +typedef VkFlags VkExternalFenceHandleTypeFlags; + +typedef enum VkExternalFenceFeatureFlagBits { + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceFeatureFlagBits; +typedef VkFlags VkExternalFenceFeatureFlags; + +typedef enum VkFenceImportFlagBits { + VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT, + VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceImportFlagBits; +typedef VkFlags VkFenceImportFlags; + +typedef enum VkSemaphoreImportFlagBits { + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreImportFlagBits; +typedef VkFlags VkSemaphoreImportFlags; + +typedef enum VkExternalSemaphoreHandleTypeFlagBits { + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA = 0x00000080, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE_BIT = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreHandleTypeFlagBits; +typedef VkFlags VkExternalSemaphoreHandleTypeFlags; + +typedef enum VkExternalSemaphoreFeatureFlagBits { + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreFeatureFlagBits; +typedef VkFlags VkExternalSemaphoreFeatureFlags; + +typedef enum VkSubgroupFeatureFlagBits { + VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, + VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, + VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, + VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, + VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, + VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, + VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, + VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, + VK_SUBGROUP_FEATURE_ROTATE_BIT = 0x00000200, + VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT = 0x00000400, + VK_SUBGROUP_FEATURE_PARTITIONED_BIT_EXT = 0x00000100, + VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = VK_SUBGROUP_FEATURE_PARTITIONED_BIT_EXT, + VK_SUBGROUP_FEATURE_ROTATE_BIT_KHR = VK_SUBGROUP_FEATURE_ROTATE_BIT, + VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT_KHR = VK_SUBGROUP_FEATURE_ROTATE_CLUSTERED_BIT, + VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubgroupFeatureFlagBits; +typedef VkFlags VkSubgroupFeatureFlags; +typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; +typedef struct VkBindBufferMemoryInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindBufferMemoryInfo; + +typedef struct VkBindImageMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindImageMemoryInfo; + +typedef struct VkMemoryDedicatedRequirements { + VkStructureType sType; + void* pNext; + VkBool32 prefersDedicatedAllocation; + VkBool32 requiresDedicatedAllocation; +} VkMemoryDedicatedRequirements; + +typedef struct VkMemoryDedicatedAllocateInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkMemoryDedicatedAllocateInfo; + +typedef struct VkMemoryAllocateFlagsInfo { + VkStructureType sType; + const void* pNext; + VkMemoryAllocateFlags flags; + uint32_t deviceMask; +} VkMemoryAllocateFlagsInfo; + +typedef struct VkDeviceGroupCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; +} VkDeviceGroupCommandBufferBeginInfo; + +typedef struct VkDeviceGroupSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; +} VkDeviceGroupSubmitInfo; + +typedef struct VkDeviceGroupBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; +} VkDeviceGroupBindSparseInfo; + +typedef struct VkBindBufferMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindBufferMemoryDeviceGroupInfo; + +typedef struct VkBindImageMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t splitInstanceBindRegionCount; + const VkRect2D* pSplitInstanceBindRegions; +} VkBindImageMemoryDeviceGroupInfo; + +typedef struct VkPhysicalDeviceGroupProperties { + VkStructureType sType; + void* pNext; + uint32_t physicalDeviceCount; + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; + VkBool32 subsetAllocation; +} VkPhysicalDeviceGroupProperties; + +typedef struct VkDeviceGroupDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t physicalDeviceCount; + const VkPhysicalDevice* pPhysicalDevices; +} VkDeviceGroupDeviceCreateInfo; + +typedef struct VkBufferMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferMemoryRequirementsInfo2; + +typedef struct VkImageMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageMemoryRequirementsInfo2; + +typedef struct VkImageSparseMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageSparseMemoryRequirementsInfo2; + +typedef struct VkMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkMemoryRequirements memoryRequirements; +} VkMemoryRequirements2; + +typedef struct VkSparseImageMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkSparseImageMemoryRequirements memoryRequirements; +} VkSparseImageMemoryRequirements2; + +typedef struct VkPhysicalDeviceFeatures2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceFeatures features; +} VkPhysicalDeviceFeatures2; + +typedef struct VkPhysicalDeviceProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties properties; +} VkPhysicalDeviceProperties2; + +typedef struct VkFormatProperties2 { + VkStructureType sType; + void* pNext; + VkFormatProperties formatProperties; +} VkFormatProperties2; + +typedef struct VkImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkImageFormatProperties imageFormatProperties; +} VkImageFormatProperties2; + +typedef struct VkPhysicalDeviceImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkImageCreateFlags flags; +} VkPhysicalDeviceImageFormatInfo2; + +typedef struct VkQueueFamilyProperties2 { + VkStructureType sType; + void* pNext; + VkQueueFamilyProperties queueFamilyProperties; +} VkQueueFamilyProperties2; + +typedef struct VkPhysicalDeviceMemoryProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceMemoryProperties memoryProperties; +} VkPhysicalDeviceMemoryProperties2; + +typedef struct VkSparseImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkSparseImageFormatProperties properties; +} VkSparseImageFormatProperties2; + +typedef struct VkPhysicalDeviceSparseImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkSampleCountFlagBits samples; + VkImageUsageFlags usage; + VkImageTiling tiling; +} VkPhysicalDeviceSparseImageFormatInfo2; + +typedef struct VkImageViewUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags usage; +} VkImageViewUsageCreateInfo; + +typedef struct VkPhysicalDeviceProtectedMemoryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 protectedMemory; +} VkPhysicalDeviceProtectedMemoryFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryProperties { + VkStructureType sType; + void* pNext; + VkBool32 protectedNoFault; +} VkPhysicalDeviceProtectedMemoryProperties; + +typedef struct VkDeviceQueueInfo2 { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueIndex; +} VkDeviceQueueInfo2; + +typedef struct VkProtectedSubmitInfo { + VkStructureType sType; + const void* pNext; + VkBool32 protectedSubmit; +} VkProtectedSubmitInfo; + +typedef struct VkBindImagePlaneMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkBindImagePlaneMemoryInfo; + +typedef struct VkImagePlaneMemoryRequirementsInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkImagePlaneMemoryRequirementsInfo; + +typedef struct VkExternalMemoryProperties { + VkExternalMemoryFeatureFlags externalMemoryFeatures; + VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlags compatibleHandleTypes; +} VkExternalMemoryProperties; + +typedef struct VkPhysicalDeviceExternalImageFormatInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalImageFormatInfo; + +typedef struct VkExternalImageFormatProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalImageFormatProperties; + +typedef struct VkPhysicalDeviceExternalBufferInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkBufferUsageFlags usage; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalBufferInfo; + +typedef struct VkExternalBufferProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalBufferProperties; + +typedef struct VkPhysicalDeviceIDProperties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; +} VkPhysicalDeviceIDProperties; + +typedef struct VkExternalMemoryImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryImageCreateInfo; + +typedef struct VkExternalMemoryBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryBufferCreateInfo; + +typedef struct VkExportMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExportMemoryAllocateInfo; + +typedef struct VkPhysicalDeviceExternalFenceInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalFenceInfo; + +typedef struct VkExternalFenceProperties { + VkStructureType sType; + void* pNext; + VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes; + VkExternalFenceHandleTypeFlags compatibleHandleTypes; + VkExternalFenceFeatureFlags externalFenceFeatures; +} VkExternalFenceProperties; + +typedef struct VkExportFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlags handleTypes; +} VkExportFenceCreateInfo; + +typedef struct VkExportSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlags handleTypes; +} VkExportSemaphoreCreateInfo; + +typedef struct VkPhysicalDeviceExternalSemaphoreInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalSemaphoreInfo; + +typedef struct VkExternalSemaphoreProperties { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; + VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes; + VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures; +} VkExternalSemaphoreProperties; + +typedef struct VkPhysicalDeviceSubgroupProperties { + VkStructureType sType; + void* pNext; + uint32_t subgroupSize; + VkShaderStageFlags supportedStages; + VkSubgroupFeatureFlags supportedOperations; + VkBool32 quadOperationsInAllStages; +} VkPhysicalDeviceSubgroupProperties; + +typedef struct VkPhysicalDevice16BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; +} VkPhysicalDevice16BitStorageFeatures; + +typedef struct VkPhysicalDeviceVariablePointersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; +} VkPhysicalDeviceVariablePointersFeatures; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeatures; + +typedef struct VkDescriptorUpdateTemplateEntry { + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + size_t offset; + size_t stride; +} VkDescriptorUpdateTemplateEntry; + +typedef struct VkDescriptorUpdateTemplateCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorUpdateTemplateCreateFlags flags; + uint32_t descriptorUpdateEntryCount; + const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; + VkDescriptorUpdateTemplateType templateType; + VkDescriptorSetLayout descriptorSetLayout; + VkPipelineBindPoint pipelineBindPoint; + VkPipelineLayout pipelineLayout; + uint32_t set; +} VkDescriptorUpdateTemplateCreateInfo; + +typedef struct VkPhysicalDeviceMaintenance3Properties { + VkStructureType sType; + void* pNext; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceMaintenance3Properties; + +typedef struct VkDescriptorSetLayoutSupport { + VkStructureType sType; + void* pNext; + VkBool32 supported; +} VkDescriptorSetLayoutSupport; + +typedef struct VkSamplerYcbcrConversionCreateInfo { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkSamplerYcbcrModelConversion ycbcrModel; + VkSamplerYcbcrRange ycbcrRange; + VkComponentMapping components; + VkChromaLocation xChromaOffset; + VkChromaLocation yChromaOffset; + VkFilter chromaFilter; + VkBool32 forceExplicitReconstruction; +} VkSamplerYcbcrConversionCreateInfo; + +typedef struct VkSamplerYcbcrConversionInfo { + VkStructureType sType; + const void* pNext; + VkSamplerYcbcrConversion conversion; +} VkSamplerYcbcrConversionInfo; + +typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures { + VkStructureType sType; + void* pNext; + VkBool32 samplerYcbcrConversion; +} VkPhysicalDeviceSamplerYcbcrConversionFeatures; + +typedef struct VkSamplerYcbcrConversionImageFormatProperties { + VkStructureType sType; + void* pNext; + uint32_t combinedImageSamplerDescriptorCount; +} VkSamplerYcbcrConversionImageFormatProperties; + +typedef struct VkDeviceGroupRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const VkRect2D* pDeviceRenderAreas; +} VkDeviceGroupRenderPassBeginInfo; + +typedef struct VkPhysicalDevicePointClippingProperties { + VkStructureType sType; + void* pNext; + VkPointClippingBehavior pointClippingBehavior; +} VkPhysicalDevicePointClippingProperties; + +typedef struct VkInputAttachmentAspectReference { + uint32_t subpass; + uint32_t inputAttachmentIndex; + VkImageAspectFlags aspectMask; +} VkInputAttachmentAspectReference; + +typedef struct VkRenderPassInputAttachmentAspectCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t aspectReferenceCount; + const VkInputAttachmentAspectReference* pAspectReferences; +} VkRenderPassInputAttachmentAspectCreateInfo; + +typedef struct VkPipelineTessellationDomainOriginStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkTessellationDomainOrigin domainOrigin; +} VkPipelineTessellationDomainOriginStateCreateInfo; + +typedef struct VkRenderPassMultiviewCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; +} VkRenderPassMultiviewCreateInfo; + +typedef struct VkPhysicalDeviceMultiviewFeatures { + VkStructureType sType; + void* pNext; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; +} VkPhysicalDeviceMultiviewFeatures; + +typedef struct VkPhysicalDeviceMultiviewProperties { + VkStructureType sType; + void* pNext; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; +} VkPhysicalDeviceMultiviewProperties; + +typedef struct VkPhysicalDeviceShaderDrawParametersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceShaderDrawParametersFeatures; + +typedef VkPhysicalDeviceShaderDrawParametersFeatures VkPhysicalDeviceShaderDrawParameterFeatures; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion( + uint32_t* pApiVersion); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2( + VkDevice device, + const VkDeviceQueueInfo2* pQueueInfo, + VkQueue* pQueue); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif + + +// VK_VERSION_1_2 is a preprocessor guard. Do not pass it to API calls. +#define VK_VERSION_1_2 1 +// Vulkan 1.2 version number +#define VK_API_VERSION_1_2 VK_MAKE_API_VERSION(0, 1, 2, 0)// Patch version should always be set to 0 + +#define VK_MAX_DRIVER_NAME_SIZE 256U +#define VK_MAX_DRIVER_INFO_SIZE 256U + +typedef enum VkDriverId { + VK_DRIVER_ID_AMD_PROPRIETARY = 1, + VK_DRIVER_ID_AMD_OPEN_SOURCE = 2, + VK_DRIVER_ID_MESA_RADV = 3, + VK_DRIVER_ID_NVIDIA_PROPRIETARY = 4, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS = 5, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA = 6, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY = 7, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY = 8, + VK_DRIVER_ID_ARM_PROPRIETARY = 9, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER = 10, + VK_DRIVER_ID_GGP_PROPRIETARY = 11, + VK_DRIVER_ID_BROADCOM_PROPRIETARY = 12, + VK_DRIVER_ID_MESA_LLVMPIPE = 13, + VK_DRIVER_ID_MOLTENVK = 14, + VK_DRIVER_ID_COREAVI_PROPRIETARY = 15, + VK_DRIVER_ID_JUICE_PROPRIETARY = 16, + VK_DRIVER_ID_VERISILICON_PROPRIETARY = 17, + VK_DRIVER_ID_MESA_TURNIP = 18, + VK_DRIVER_ID_MESA_V3DV = 19, + VK_DRIVER_ID_MESA_PANVK = 20, + VK_DRIVER_ID_SAMSUNG_PROPRIETARY = 21, + VK_DRIVER_ID_MESA_VENUS = 22, + VK_DRIVER_ID_MESA_DOZEN = 23, + VK_DRIVER_ID_MESA_NVK = 24, + VK_DRIVER_ID_IMAGINATION_OPEN_SOURCE_MESA = 25, + VK_DRIVER_ID_MESA_HONEYKRISP = 26, + VK_DRIVER_ID_VULKAN_SC_EMULATION_ON_VULKAN = 27, + VK_DRIVER_ID_MESA_KOSMICKRISP = 28, + VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY, + VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE, + VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV, + VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR = VK_DRIVER_ID_NVIDIA_PROPRIETARY, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR = VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY_KHR = VK_DRIVER_ID_IMAGINATION_PROPRIETARY, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY_KHR = VK_DRIVER_ID_QUALCOMM_PROPRIETARY, + VK_DRIVER_ID_ARM_PROPRIETARY_KHR = VK_DRIVER_ID_ARM_PROPRIETARY, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER_KHR = VK_DRIVER_ID_GOOGLE_SWIFTSHADER, + VK_DRIVER_ID_GGP_PROPRIETARY_KHR = VK_DRIVER_ID_GGP_PROPRIETARY, + VK_DRIVER_ID_BROADCOM_PROPRIETARY_KHR = VK_DRIVER_ID_BROADCOM_PROPRIETARY, + VK_DRIVER_ID_MAX_ENUM = 0x7FFFFFFF +} VkDriverId; + +typedef enum VkShaderFloatControlsIndependence { + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY = 0, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL = 1, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE = 2, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_MAX_ENUM = 0x7FFFFFFF +} VkShaderFloatControlsIndependence; + +typedef enum VkSemaphoreType { + VK_SEMAPHORE_TYPE_BINARY = 0, + VK_SEMAPHORE_TYPE_TIMELINE = 1, + VK_SEMAPHORE_TYPE_BINARY_KHR = VK_SEMAPHORE_TYPE_BINARY, + VK_SEMAPHORE_TYPE_TIMELINE_KHR = VK_SEMAPHORE_TYPE_TIMELINE, + VK_SEMAPHORE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreType; + +typedef enum VkSamplerReductionMode { + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE = 0, + VK_SAMPLER_REDUCTION_MODE_MIN = 1, + VK_SAMPLER_REDUCTION_MODE_MAX = 2, + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_RANGECLAMP_QCOM = 1000521000, + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE, + VK_SAMPLER_REDUCTION_MODE_MIN_EXT = VK_SAMPLER_REDUCTION_MODE_MIN, + VK_SAMPLER_REDUCTION_MODE_MAX_EXT = VK_SAMPLER_REDUCTION_MODE_MAX, + VK_SAMPLER_REDUCTION_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerReductionMode; + +typedef enum VkResolveModeFlagBits { + VK_RESOLVE_MODE_NONE = 0, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT = 0x00000001, + VK_RESOLVE_MODE_AVERAGE_BIT = 0x00000002, + VK_RESOLVE_MODE_MIN_BIT = 0x00000004, + VK_RESOLVE_MODE_MAX_BIT = 0x00000008, + VK_RESOLVE_MODE_EXTERNAL_FORMAT_DOWNSAMPLE_BIT_ANDROID = 0x00000010, + VK_RESOLVE_MODE_CUSTOM_BIT_EXT = 0x00000020, + VK_RESOLVE_MODE_NONE_KHR = VK_RESOLVE_MODE_NONE, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT, + VK_RESOLVE_MODE_AVERAGE_BIT_KHR = VK_RESOLVE_MODE_AVERAGE_BIT, + VK_RESOLVE_MODE_MIN_BIT_KHR = VK_RESOLVE_MODE_MIN_BIT, + VK_RESOLVE_MODE_MAX_BIT_KHR = VK_RESOLVE_MODE_MAX_BIT, + // VK_RESOLVE_MODE_EXTERNAL_FORMAT_DOWNSAMPLE_ANDROID is a legacy alias + VK_RESOLVE_MODE_EXTERNAL_FORMAT_DOWNSAMPLE_ANDROID = VK_RESOLVE_MODE_EXTERNAL_FORMAT_DOWNSAMPLE_BIT_ANDROID, + VK_RESOLVE_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkResolveModeFlagBits; +typedef VkFlags VkResolveModeFlags; + +typedef enum VkSemaphoreWaitFlagBits { + VK_SEMAPHORE_WAIT_ANY_BIT = 0x00000001, + VK_SEMAPHORE_WAIT_ANY_BIT_KHR = VK_SEMAPHORE_WAIT_ANY_BIT, + VK_SEMAPHORE_WAIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreWaitFlagBits; +typedef VkFlags VkSemaphoreWaitFlags; + +typedef enum VkDescriptorBindingFlagBits { + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT = 0x00000001, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT = 0x00000002, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT = 0x00000004, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT = 0x00000008, + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT, + VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorBindingFlagBits; +typedef VkFlags VkDescriptorBindingFlags; +typedef struct VkPhysicalDeviceVulkan11Features { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; + VkBool32 protectedMemory; + VkBool32 samplerYcbcrConversion; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceVulkan11Features; + +typedef struct VkPhysicalDeviceVulkan11Properties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; + uint32_t subgroupSize; + VkShaderStageFlags subgroupSupportedStages; + VkSubgroupFeatureFlags subgroupSupportedOperations; + VkBool32 subgroupQuadOperationsInAllStages; + VkPointClippingBehavior pointClippingBehavior; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; + VkBool32 protectedNoFault; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceVulkan11Properties; + +typedef struct VkPhysicalDeviceVulkan12Features { + VkStructureType sType; + void* pNext; + VkBool32 samplerMirrorClampToEdge; + VkBool32 drawIndirectCount; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; + VkBool32 descriptorIndexing; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; + VkBool32 samplerFilterMinmax; + VkBool32 scalarBlockLayout; + VkBool32 imagelessFramebuffer; + VkBool32 uniformBufferStandardLayout; + VkBool32 shaderSubgroupExtendedTypes; + VkBool32 separateDepthStencilLayouts; + VkBool32 hostQueryReset; + VkBool32 timelineSemaphore; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; + VkBool32 shaderOutputViewportIndex; + VkBool32 shaderOutputLayer; + VkBool32 subgroupBroadcastDynamicId; +} VkPhysicalDeviceVulkan12Features; + +typedef struct VkConformanceVersion { + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t patch; +} VkConformanceVersion; + +typedef struct VkPhysicalDeviceVulkan12Properties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; + uint64_t maxTimelineSemaphoreValueDifference; + VkSampleCountFlags framebufferIntegerColorSampleCounts; +} VkPhysicalDeviceVulkan12Properties; + +typedef struct VkImageFormatListCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkImageFormatListCreateInfo; + +typedef struct VkPhysicalDeviceDriverProperties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; +} VkPhysicalDeviceDriverProperties; + +typedef struct VkPhysicalDeviceVulkanMemoryModelFeatures { + VkStructureType sType; + void* pNext; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; +} VkPhysicalDeviceVulkanMemoryModelFeatures; + +typedef struct VkPhysicalDeviceHostQueryResetFeatures { + VkStructureType sType; + void* pNext; + VkBool32 hostQueryReset; +} VkPhysicalDeviceHostQueryResetFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreFeatures { + VkStructureType sType; + void* pNext; + VkBool32 timelineSemaphore; +} VkPhysicalDeviceTimelineSemaphoreFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreProperties { + VkStructureType sType; + void* pNext; + uint64_t maxTimelineSemaphoreValueDifference; +} VkPhysicalDeviceTimelineSemaphoreProperties; + +typedef struct VkSemaphoreTypeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreType semaphoreType; + uint64_t initialValue; +} VkSemaphoreTypeCreateInfo; + +typedef struct VkTimelineSemaphoreSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValueCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValueCount; + const uint64_t* pSignalSemaphoreValues; +} VkTimelineSemaphoreSubmitInfo; + +typedef struct VkSemaphoreWaitInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreWaitFlags flags; + uint32_t semaphoreCount; + const VkSemaphore* pSemaphores; + const uint64_t* pValues; +} VkSemaphoreWaitInfo; + +typedef struct VkSemaphoreSignalInfo { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + uint64_t value; +} VkSemaphoreSignalInfo; + +typedef struct VkPhysicalDeviceBufferDeviceAddressFeatures { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeatures; + +typedef struct VkBufferDeviceAddressInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferDeviceAddressInfo; + +typedef struct VkBufferOpaqueCaptureAddressCreateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkBufferOpaqueCaptureAddressCreateInfo; + +typedef struct VkMemoryOpaqueCaptureAddressAllocateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkMemoryOpaqueCaptureAddressAllocateInfo; + +typedef struct VkDeviceMemoryOpaqueCaptureAddressInfo { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkDeviceMemoryOpaqueCaptureAddressInfo; + +typedef struct VkPhysicalDevice8BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; +} VkPhysicalDevice8BitStorageFeatures; + +typedef struct VkPhysicalDeviceShaderAtomicInt64Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; +} VkPhysicalDeviceShaderAtomicInt64Features; + +typedef struct VkPhysicalDeviceShaderFloat16Int8Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; +} VkPhysicalDeviceShaderFloat16Int8Features; + +typedef struct VkPhysicalDeviceFloatControlsProperties { + VkStructureType sType; + void* pNext; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; +} VkPhysicalDeviceFloatControlsProperties; + +typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t bindingCount; + const VkDescriptorBindingFlags* pBindingFlags; +} VkDescriptorSetLayoutBindingFlagsCreateInfo; + +typedef struct VkPhysicalDeviceDescriptorIndexingFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; +} VkPhysicalDeviceDescriptorIndexingFeatures; + +typedef struct VkPhysicalDeviceDescriptorIndexingProperties { + VkStructureType sType; + void* pNext; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; +} VkPhysicalDeviceDescriptorIndexingProperties; + +typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfo { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSetCount; + const uint32_t* pDescriptorCounts; +} VkDescriptorSetVariableDescriptorCountAllocateInfo; + +typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupport { + VkStructureType sType; + void* pNext; + uint32_t maxVariableDescriptorCount; +} VkDescriptorSetVariableDescriptorCountLayoutSupport; + +typedef struct VkPhysicalDeviceScalarBlockLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 scalarBlockLayout; +} VkPhysicalDeviceScalarBlockLayoutFeatures; + +typedef struct VkSamplerReductionModeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerReductionMode reductionMode; +} VkSamplerReductionModeCreateInfo; + +typedef struct VkPhysicalDeviceSamplerFilterMinmaxProperties { + VkStructureType sType; + void* pNext; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; +} VkPhysicalDeviceSamplerFilterMinmaxProperties; + +typedef struct VkPhysicalDeviceUniformBufferStandardLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 uniformBufferStandardLayout; +} VkPhysicalDeviceUniformBufferStandardLayoutFeatures; + +typedef struct VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupExtendedTypes; +} VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures; + +typedef struct VkAttachmentDescription2 { + VkStructureType sType; + const void* pNext; + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription2; + +typedef struct VkAttachmentReference2 { + VkStructureType sType; + const void* pNext; + uint32_t attachment; + VkImageLayout layout; + VkImageAspectFlags aspectMask; +} VkAttachmentReference2; + +typedef struct VkSubpassDescription2 { + VkStructureType sType; + const void* pNext; + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t viewMask; + uint32_t inputAttachmentCount; + const VkAttachmentReference2* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference2* pColorAttachments; + const VkAttachmentReference2* pResolveAttachments; + const VkAttachmentReference2* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription2; + +typedef struct VkSubpassDependency2 { + VkStructureType sType; + const void* pNext; + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; + int32_t viewOffset; +} VkSubpassDependency2; + +typedef struct VkRenderPassCreateInfo2 { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription2* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription2* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency2* pDependencies; + uint32_t correlatedViewMaskCount; + const uint32_t* pCorrelatedViewMasks; +} VkRenderPassCreateInfo2; + +typedef struct VkSubpassBeginInfo { + VkStructureType sType; + const void* pNext; + VkSubpassContents contents; +} VkSubpassBeginInfo; + +typedef struct VkSubpassEndInfo { + VkStructureType sType; + const void* pNext; +} VkSubpassEndInfo; + +typedef struct VkSubpassDescriptionDepthStencilResolve { + VkStructureType sType; + const void* pNext; + VkResolveModeFlagBits depthResolveMode; + VkResolveModeFlagBits stencilResolveMode; + const VkAttachmentReference2* pDepthStencilResolveAttachment; +} VkSubpassDescriptionDepthStencilResolve; + +typedef struct VkPhysicalDeviceDepthStencilResolveProperties { + VkStructureType sType; + void* pNext; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; +} VkPhysicalDeviceDepthStencilResolveProperties; + +typedef struct VkImageStencilUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags stencilUsage; +} VkImageStencilUsageCreateInfo; + +typedef struct VkPhysicalDeviceImagelessFramebufferFeatures { + VkStructureType sType; + void* pNext; + VkBool32 imagelessFramebuffer; +} VkPhysicalDeviceImagelessFramebufferFeatures; + +typedef struct VkFramebufferAttachmentImageInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageUsageFlags usage; + uint32_t width; + uint32_t height; + uint32_t layerCount; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkFramebufferAttachmentImageInfo; + +typedef struct VkFramebufferAttachmentsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentImageInfoCount; + const VkFramebufferAttachmentImageInfo* pAttachmentImageInfos; +} VkFramebufferAttachmentsCreateInfo; + +typedef struct VkRenderPassAttachmentBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentCount; + const VkImageView* pAttachments; +} VkRenderPassAttachmentBeginInfo; + +typedef struct VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures { + VkStructureType sType; + void* pNext; + VkBool32 separateDepthStencilLayouts; +} VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures; + +typedef struct VkAttachmentReferenceStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilLayout; +} VkAttachmentReferenceStencilLayout; + +typedef struct VkAttachmentDescriptionStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilInitialLayout; + VkImageLayout stencilFinalLayout; +} VkAttachmentDescriptionStencilLayout; + +typedef void (VKAPI_PTR *PFN_vkResetQueryPool)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValue)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphores)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphore)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddress)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkResetQueryPool( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValue( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphores( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphore( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); +#endif + + +// VK_VERSION_1_3 is a preprocessor guard. Do not pass it to API calls. +#define VK_VERSION_1_3 1 +// Vulkan 1.3 version number +#define VK_API_VERSION_1_3 VK_MAKE_API_VERSION(0, 1, 3, 0)// Patch version should always be set to 0 + +typedef uint64_t VkFlags64; +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlot) + +typedef enum VkToolPurposeFlagBits { + VK_TOOL_PURPOSE_VALIDATION_BIT = 0x00000001, + VK_TOOL_PURPOSE_PROFILING_BIT = 0x00000002, + VK_TOOL_PURPOSE_TRACING_BIT = 0x00000004, + VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT = 0x00000008, + VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT = 0x00000010, + VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020, + VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040, + VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = VK_TOOL_PURPOSE_VALIDATION_BIT, + VK_TOOL_PURPOSE_PROFILING_BIT_EXT = VK_TOOL_PURPOSE_PROFILING_BIT, + VK_TOOL_PURPOSE_TRACING_BIT_EXT = VK_TOOL_PURPOSE_TRACING_BIT, + VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT, + VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT, + VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkToolPurposeFlagBits; +typedef VkFlags VkToolPurposeFlags; +typedef VkFlags VkPrivateDataSlotCreateFlags; +typedef VkFlags64 VkPipelineStageFlags2; + +// Flag bits for VkPipelineStageFlagBits2 +typedef VkFlags64 VkPipelineStageFlagBits2; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE = 0ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT = 0x00000001ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT = 0x00000002ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT = 0x00000004ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT = 0x00000008ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT = 0x00000040ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT = 0x00000080ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT = 0x00000100ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT = 0x00000200ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT = 0x00000800ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT = 0x00001000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT = 0x00001000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT = 0x00002000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT = 0x00004000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT = 0x00008000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT = 0x00010000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT = 0x100000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT = 0x200000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT = 0x400000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT = 0x800000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT = 0x1000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT = 0x2000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT = 0x4000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR = 0x04000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR = 0x08000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_NONE_KHR = 0ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR = 0x00000001ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT_KHR = 0x00000002ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR = 0x00000004ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT_KHR = 0x00000008ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT_KHR = 0x00000010ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT_KHR = 0x00000020ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT_KHR = 0x00000040ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT_KHR = 0x00000080ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_EARLY_FRAGMENT_TESTS_BIT_KHR = 0x00000100ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_LATE_FRAGMENT_TESTS_BIT_KHR = 0x00000200ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT_KHR = 0x00000400ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT_KHR = 0x00000800ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR = 0x00001000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR = 0x00001000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR = 0x00002000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_HOST_BIT_KHR = 0x00004000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR = 0x00008000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR = 0x00010000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_BIT_KHR = 0x100000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RESOLVE_BIT_KHR = 0x200000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_BLIT_BIT_KHR = 0x400000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLEAR_BIT_KHR = 0x800000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INDEX_INPUT_BIT_KHR = 0x1000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_VERTEX_ATTRIBUTE_INPUT_BIT_KHR = 0x2000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR = 0x4000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_NV = 0x00020000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COMMAND_PREPROCESS_BIT_EXT = 0x00020000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00400000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SHADING_RATE_IMAGE_BIT_NV = 0x00400000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR = 0x00200000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_NV = 0x00200000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_NV = 0x00080000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_NV = 0x00100000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT = 0x00080000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT = 0x00100000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SUBPASS_SHADER_BIT_HUAWEI = 0x8000000000ULL; +// VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI is a legacy alias +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_SUBPASS_SHADING_BIT_HUAWEI = 0x8000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_INVOCATION_MASK_BIT_HUAWEI = 0x10000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_COPY_BIT_KHR = 0x10000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MICROMAP_BUILD_BIT_EXT = 0x40000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CLUSTER_CULLING_SHADER_BIT_HUAWEI = 0x20000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_OPTICAL_FLOW_BIT_NV = 0x20000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_CONVERT_COOPERATIVE_VECTOR_MATRIX_BIT_NV = 0x100000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_DATA_GRAPH_BIT_ARM = 0x40000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_COPY_INDIRECT_BIT_KHR = 0x400000000000ULL; +static const VkPipelineStageFlagBits2 VK_PIPELINE_STAGE_2_MEMORY_DECOMPRESSION_BIT_EXT = 0x200000000000ULL; + +typedef VkFlags64 VkAccessFlags2; + +// Flag bits for VkAccessFlagBits2 +typedef VkFlags64 VkAccessFlagBits2; +static const VkAccessFlagBits2 VK_ACCESS_2_NONE = 0ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT = 0x00000001ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT = 0x00000002ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT = 0x00000008ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT = 0x00000010ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT = 0x00000020ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT = 0x00000040ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT = 0x00000080ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT = 0x00000800ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT = 0x00001000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT = 0x00002000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT = 0x00004000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT = 0x00008000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT = 0x00010000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT = 0x100000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT = 0x200000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT = 0x400000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_READ_BIT_KHR = 0x800000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR = 0x1000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SAMPLER_HEAP_READ_BIT_EXT = 0x200000000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_RESOURCE_HEAP_READ_BIT_EXT = 0x400000000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_READ_BIT_KHR = 0x2000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR = 0x4000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_TILE_ATTACHMENT_READ_BIT_QCOM = 0x8000000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_TILE_ATTACHMENT_WRITE_BIT_QCOM = 0x10000000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_NONE_KHR = 0ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT_KHR = 0x00000001ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INDEX_READ_BIT_KHR = 0x00000002ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT_KHR = 0x00000004ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_UNIFORM_READ_BIT_KHR = 0x00000008ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT_KHR = 0x00000010ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_READ_BIT_KHR = 0x00000020ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_WRITE_BIT_KHR = 0x00000040ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_BIT_KHR = 0x00000080ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT_KHR = 0x00000100ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_READ_BIT_KHR = 0x00000200ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT_KHR = 0x00000400ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_READ_BIT_KHR = 0x00000800ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR = 0x00001000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_HOST_READ_BIT_KHR = 0x00002000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_HOST_WRITE_BIT_KHR = 0x00004000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_READ_BIT_KHR = 0x00008000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_WRITE_BIT_KHR = 0x00010000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_SAMPLED_READ_BIT_KHR = 0x100000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_READ_BIT_KHR = 0x200000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT_KHR = 0x400000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_READ_BIT_EXT = 0x00020000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_EXT = 0x00040000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = 0x00800000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DESCRIPTOR_BUFFER_READ_BIT_EXT = 0x20000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_INVOCATION_MASK_READ_BIT_HUAWEI = 0x8000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR = 0x10000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MICROMAP_READ_BIT_EXT = 0x100000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT = 0x200000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_OPTICAL_FLOW_READ_BIT_NV = 0x40000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_OPTICAL_FLOW_WRITE_BIT_NV = 0x80000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DATA_GRAPH_READ_BIT_ARM = 0x800000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_DATA_GRAPH_WRITE_BIT_ARM = 0x1000000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_DECOMPRESSION_READ_BIT_EXT = 0x80000000000000ULL; +static const VkAccessFlagBits2 VK_ACCESS_2_MEMORY_DECOMPRESSION_WRITE_BIT_EXT = 0x100000000000000ULL; + + +typedef enum VkSubmitFlagBits { + VK_SUBMIT_PROTECTED_BIT = 0x00000001, + VK_SUBMIT_PROTECTED_BIT_KHR = VK_SUBMIT_PROTECTED_BIT, + VK_SUBMIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubmitFlagBits; +typedef VkFlags VkSubmitFlags; +typedef VkFlags64 VkFormatFeatureFlags2; + +// Flag bits for VkFormatFeatureFlagBits2 +typedef VkFlags64 VkFormatFeatureFlagBits2; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT = 0x00000001ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT = 0x00000002ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT = 0x00000010ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT = 0x00000040ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT = 0x00000080ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT = 0x00000400ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT = 0x00000800ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT = 0x00004000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT = 0x00008000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT = 0x00400000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT = 0x00800000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT = 0x80000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT = 0x100000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT = 0x200000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT = 0x00002000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT = 0x400000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_OUTPUT_BIT_KHR = 0x02000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_DECODE_DPB_BIT_KHR = 0x04000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_HOST_IMAGE_TRANSFER_BIT_EXT = 0x400000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_INPUT_BIT_KHR = 0x08000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_DPB_BIT_KHR = 0x10000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR = 0x00000001ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR = 0x00000002ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_IMAGE_ATOMIC_BIT_KHR = 0x00000004ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000010ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_ATOMIC_BIT_KHR = 0x00000020ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VERTEX_BUFFER_BIT_KHR = 0x00000040ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BIT_KHR = 0x00000080ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COLOR_ATTACHMENT_BLEND_BIT_KHR = 0x00000100ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_STENCIL_ATTACHMENT_BIT_KHR = 0x00000200ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_SRC_BIT_KHR = 0x00000400ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLIT_DST_BIT_KHR = 0x00000800ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_LINEAR_BIT_KHR = 0x00001000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_SRC_BIT_KHR = 0x00004000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TRANSFER_DST_BIT_KHR = 0x00008000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = 0x00020000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = 0x00040000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = 0x00080000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = 0x00100000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = 0x00200000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DISJOINT_BIT_KHR = 0x00400000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COSITED_CHROMA_SAMPLES_BIT_KHR = 0x00800000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR = 0x80000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR = 0x100000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT_KHR = 0x200000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_MINMAX_BIT_KHR = 0x00010000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = 0x00002000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_ACCELERATION_STRUCTURE_RADIUS_BUFFER_BIT_NV = 0x8000000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_LINEAR_COLOR_ATTACHMENT_BIT_NV = 0x4000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_IMAGE_BIT_QCOM = 0x400000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_WEIGHT_SAMPLED_IMAGE_BIT_QCOM = 0x800000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BLOCK_MATCHING_BIT_QCOM = 0x1000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_BOX_FILTER_SAMPLED_BIT_QCOM = 0x2000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TENSOR_SHADER_BIT_ARM = 0x8000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TENSOR_IMAGE_ALIASING_BIT_ARM = 0x80000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_OPTICAL_FLOW_IMAGE_BIT_NV = 0x10000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_OPTICAL_FLOW_VECTOR_BIT_NV = 0x20000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_OPTICAL_FLOW_COST_BIT_NV = 0x40000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_TENSOR_DATA_GRAPH_BIT_ARM = 0x1000000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_COPY_IMAGE_INDIRECT_DST_BIT_KHR = 0x800000000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_QUANTIZATION_DELTA_MAP_BIT_KHR = 0x2000000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_VIDEO_ENCODE_EMPHASIS_MAP_BIT_KHR = 0x4000000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_COPY_ON_COMPUTE_QUEUE_BIT_KHR = 0x10000000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_DEPTH_COPY_ON_TRANSFER_QUEUE_BIT_KHR = 0x20000000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STENCIL_COPY_ON_COMPUTE_QUEUE_BIT_KHR = 0x40000000000000ULL; +static const VkFormatFeatureFlagBits2 VK_FORMAT_FEATURE_2_STENCIL_COPY_ON_TRANSFER_QUEUE_BIT_KHR = 0x80000000000000ULL; + + +typedef enum VkPipelineCreationFeedbackFlagBits { + VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT = 0x00000001, + VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT = 0x00000002, + VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT = 0x00000004, + VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT, + VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT, + VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT, + VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreationFeedbackFlagBits; +typedef VkFlags VkPipelineCreationFeedbackFlags; + +typedef enum VkRenderingFlagBits { + VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT = 0x00000001, + VK_RENDERING_SUSPENDING_BIT = 0x00000002, + VK_RENDERING_RESUMING_BIT = 0x00000004, + VK_RENDERING_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x00000008, + VK_RENDERING_CONTENTS_INLINE_BIT_KHR = 0x00000010, + VK_RENDERING_PER_LAYER_FRAGMENT_DENSITY_BIT_VALVE = 0x00000020, + VK_RENDERING_FRAGMENT_REGION_BIT_EXT = 0x00000040, + VK_RENDERING_CUSTOM_RESOLVE_BIT_EXT = 0x00000080, + VK_RENDERING_LOCAL_READ_CONCURRENT_ACCESS_CONTROL_BIT_KHR = 0x00000100, + VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT_KHR = VK_RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS_BIT, + VK_RENDERING_SUSPENDING_BIT_KHR = VK_RENDERING_SUSPENDING_BIT, + VK_RENDERING_RESUMING_BIT_KHR = VK_RENDERING_RESUMING_BIT, + VK_RENDERING_CONTENTS_INLINE_BIT_EXT = VK_RENDERING_CONTENTS_INLINE_BIT_KHR, + VK_RENDERING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkRenderingFlagBits; +typedef VkFlags VkRenderingFlags; +typedef struct VkPhysicalDeviceVulkan13Features { + VkStructureType sType; + void* pNext; + VkBool32 robustImageAccess; + VkBool32 inlineUniformBlock; + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; + VkBool32 pipelineCreationCacheControl; + VkBool32 privateData; + VkBool32 shaderDemoteToHelperInvocation; + VkBool32 shaderTerminateInvocation; + VkBool32 subgroupSizeControl; + VkBool32 computeFullSubgroups; + VkBool32 synchronization2; + VkBool32 textureCompressionASTC_HDR; + VkBool32 shaderZeroInitializeWorkgroupMemory; + VkBool32 dynamicRendering; + VkBool32 shaderIntegerDotProduct; + VkBool32 maintenance4; +} VkPhysicalDeviceVulkan13Features; + +typedef struct VkPhysicalDeviceVulkan13Properties { + VkStructureType sType; + void* pNext; + uint32_t minSubgroupSize; + uint32_t maxSubgroupSize; + uint32_t maxComputeWorkgroupSubgroups; + VkShaderStageFlags requiredSubgroupSizeStages; + uint32_t maxInlineUniformBlockSize; + uint32_t maxPerStageDescriptorInlineUniformBlocks; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; + uint32_t maxDescriptorSetInlineUniformBlocks; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; + uint32_t maxInlineUniformTotalSize; + VkBool32 integerDotProduct8BitUnsignedAccelerated; + VkBool32 integerDotProduct8BitSignedAccelerated; + VkBool32 integerDotProduct8BitMixedSignednessAccelerated; + VkBool32 integerDotProduct4x8BitPackedUnsignedAccelerated; + VkBool32 integerDotProduct4x8BitPackedSignedAccelerated; + VkBool32 integerDotProduct4x8BitPackedMixedSignednessAccelerated; + VkBool32 integerDotProduct16BitUnsignedAccelerated; + VkBool32 integerDotProduct16BitSignedAccelerated; + VkBool32 integerDotProduct16BitMixedSignednessAccelerated; + VkBool32 integerDotProduct32BitUnsignedAccelerated; + VkBool32 integerDotProduct32BitSignedAccelerated; + VkBool32 integerDotProduct32BitMixedSignednessAccelerated; + VkBool32 integerDotProduct64BitUnsignedAccelerated; + VkBool32 integerDotProduct64BitSignedAccelerated; + VkBool32 integerDotProduct64BitMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating8BitUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating8BitSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating16BitUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating16BitSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating32BitUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating32BitSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating64BitUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated; + VkDeviceSize storageTexelBufferOffsetAlignmentBytes; + VkBool32 storageTexelBufferOffsetSingleTexelAlignment; + VkDeviceSize uniformTexelBufferOffsetAlignmentBytes; + VkBool32 uniformTexelBufferOffsetSingleTexelAlignment; + VkDeviceSize maxBufferSize; +} VkPhysicalDeviceVulkan13Properties; + +typedef struct VkPhysicalDeviceToolProperties { + VkStructureType sType; + void* pNext; + char name[VK_MAX_EXTENSION_NAME_SIZE]; + char version[VK_MAX_EXTENSION_NAME_SIZE]; + VkToolPurposeFlags purposes; + char description[VK_MAX_DESCRIPTION_SIZE]; + char layer[VK_MAX_EXTENSION_NAME_SIZE]; +} VkPhysicalDeviceToolProperties; + +typedef struct VkPhysicalDevicePrivateDataFeatures { + VkStructureType sType; + void* pNext; + VkBool32 privateData; +} VkPhysicalDevicePrivateDataFeatures; + +typedef struct VkDevicePrivateDataCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t privateDataSlotRequestCount; +} VkDevicePrivateDataCreateInfo; + +typedef struct VkPrivateDataSlotCreateInfo { + VkStructureType sType; + const void* pNext; + VkPrivateDataSlotCreateFlags flags; +} VkPrivateDataSlotCreateInfo; + +typedef struct VkMemoryBarrier2 { + VkStructureType sType; + const void* pNext; + VkPipelineStageFlags2 srcStageMask; + VkAccessFlags2 srcAccessMask; + VkPipelineStageFlags2 dstStageMask; + VkAccessFlags2 dstAccessMask; +} VkMemoryBarrier2; + +typedef struct VkBufferMemoryBarrier2 { + VkStructureType sType; + const void* pNext; + VkPipelineStageFlags2 srcStageMask; + VkAccessFlags2 srcAccessMask; + VkPipelineStageFlags2 dstStageMask; + VkAccessFlags2 dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier2; + +typedef struct VkImageMemoryBarrier2 { + VkStructureType sType; + const void* pNext; + VkPipelineStageFlags2 srcStageMask; + VkAccessFlags2 srcAccessMask; + VkPipelineStageFlags2 dstStageMask; + VkAccessFlags2 dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier2; + +typedef struct VkDependencyInfo { + VkStructureType sType; + const void* pNext; + VkDependencyFlags dependencyFlags; + uint32_t memoryBarrierCount; + const VkMemoryBarrier2* pMemoryBarriers; + uint32_t bufferMemoryBarrierCount; + const VkBufferMemoryBarrier2* pBufferMemoryBarriers; + uint32_t imageMemoryBarrierCount; + const VkImageMemoryBarrier2* pImageMemoryBarriers; +} VkDependencyInfo; + +typedef struct VkSemaphoreSubmitInfo { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + uint64_t value; + VkPipelineStageFlags2 stageMask; + uint32_t deviceIndex; +} VkSemaphoreSubmitInfo; + +typedef struct VkCommandBufferSubmitInfo { + VkStructureType sType; + const void* pNext; + VkCommandBuffer commandBuffer; + uint32_t deviceMask; +} VkCommandBufferSubmitInfo; + +typedef struct VkSubmitInfo2 { + VkStructureType sType; + const void* pNext; + VkSubmitFlags flags; + uint32_t waitSemaphoreInfoCount; + const VkSemaphoreSubmitInfo* pWaitSemaphoreInfos; + uint32_t commandBufferInfoCount; + const VkCommandBufferSubmitInfo* pCommandBufferInfos; + uint32_t signalSemaphoreInfoCount; + const VkSemaphoreSubmitInfo* pSignalSemaphoreInfos; +} VkSubmitInfo2; + +typedef struct VkPhysicalDeviceSynchronization2Features { + VkStructureType sType; + void* pNext; + VkBool32 synchronization2; +} VkPhysicalDeviceSynchronization2Features; + +typedef struct VkBufferCopy2 { + VkStructureType sType; + const void* pNext; + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy2; + +typedef struct VkCopyBufferInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer srcBuffer; + VkBuffer dstBuffer; + uint32_t regionCount; + const VkBufferCopy2* pRegions; +} VkCopyBufferInfo2; + +typedef struct VkImageCopy2 { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy2; + +typedef struct VkCopyImageInfo2 { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageCopy2* pRegions; +} VkCopyImageInfo2; + +typedef struct VkBufferImageCopy2 { + VkStructureType sType; + const void* pNext; + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy2; + +typedef struct VkCopyBufferToImageInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer srcBuffer; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkBufferImageCopy2* pRegions; +} VkCopyBufferToImageInfo2; + +typedef struct VkCopyImageToBufferInfo2 { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkBuffer dstBuffer; + uint32_t regionCount; + const VkBufferImageCopy2* pRegions; +} VkCopyImageToBufferInfo2; + +typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeatures { + VkStructureType sType; + void* pNext; + VkBool32 textureCompressionASTC_HDR; +} VkPhysicalDeviceTextureCompressionASTCHDRFeatures; + +typedef struct VkFormatProperties3 { + VkStructureType sType; + void* pNext; + VkFormatFeatureFlags2 linearTilingFeatures; + VkFormatFeatureFlags2 optimalTilingFeatures; + VkFormatFeatureFlags2 bufferFeatures; +} VkFormatProperties3; + +typedef struct VkPhysicalDeviceMaintenance4Features { + VkStructureType sType; + void* pNext; + VkBool32 maintenance4; +} VkPhysicalDeviceMaintenance4Features; + +typedef struct VkPhysicalDeviceMaintenance4Properties { + VkStructureType sType; + void* pNext; + VkDeviceSize maxBufferSize; +} VkPhysicalDeviceMaintenance4Properties; + +typedef struct VkDeviceBufferMemoryRequirements { + VkStructureType sType; + const void* pNext; + const VkBufferCreateInfo* pCreateInfo; +} VkDeviceBufferMemoryRequirements; + +typedef struct VkDeviceImageMemoryRequirements { + VkStructureType sType; + const void* pNext; + const VkImageCreateInfo* pCreateInfo; + VkImageAspectFlagBits planeAspect; +} VkDeviceImageMemoryRequirements; + +typedef struct VkPipelineCreationFeedback { + VkPipelineCreationFeedbackFlags flags; + uint64_t duration; +} VkPipelineCreationFeedback; + +typedef struct VkPipelineCreationFeedbackCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreationFeedback* pPipelineCreationFeedback; + uint32_t pipelineStageCreationFeedbackCount; + VkPipelineCreationFeedback* pPipelineStageCreationFeedbacks; +} VkPipelineCreationFeedbackCreateInfo; + +typedef struct VkPhysicalDeviceShaderTerminateInvocationFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderTerminateInvocation; +} VkPhysicalDeviceShaderTerminateInvocationFeatures; + +typedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderDemoteToHelperInvocation; +} VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures; + +typedef struct VkPhysicalDevicePipelineCreationCacheControlFeatures { + VkStructureType sType; + void* pNext; + VkBool32 pipelineCreationCacheControl; +} VkPhysicalDevicePipelineCreationCacheControlFeatures; + +typedef struct VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderZeroInitializeWorkgroupMemory; +} VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures; + +typedef struct VkPhysicalDeviceImageRobustnessFeatures { + VkStructureType sType; + void* pNext; + VkBool32 robustImageAccess; +} VkPhysicalDeviceImageRobustnessFeatures; + +typedef struct VkPhysicalDeviceSubgroupSizeControlFeatures { + VkStructureType sType; + void* pNext; + VkBool32 subgroupSizeControl; + VkBool32 computeFullSubgroups; +} VkPhysicalDeviceSubgroupSizeControlFeatures; + +typedef struct VkPhysicalDeviceSubgroupSizeControlProperties { + VkStructureType sType; + void* pNext; + uint32_t minSubgroupSize; + uint32_t maxSubgroupSize; + uint32_t maxComputeWorkgroupSubgroups; + VkShaderStageFlags requiredSubgroupSizeStages; +} VkPhysicalDeviceSubgroupSizeControlProperties; + +typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t requiredSubgroupSize; +} VkPipelineShaderStageRequiredSubgroupSizeCreateInfo; + +typedef struct VkPhysicalDeviceInlineUniformBlockFeatures { + VkStructureType sType; + void* pNext; + VkBool32 inlineUniformBlock; + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; +} VkPhysicalDeviceInlineUniformBlockFeatures; + +typedef struct VkPhysicalDeviceInlineUniformBlockProperties { + VkStructureType sType; + void* pNext; + uint32_t maxInlineUniformBlockSize; + uint32_t maxPerStageDescriptorInlineUniformBlocks; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; + uint32_t maxDescriptorSetInlineUniformBlocks; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; +} VkPhysicalDeviceInlineUniformBlockProperties; + +typedef struct VkWriteDescriptorSetInlineUniformBlock { + VkStructureType sType; + const void* pNext; + uint32_t dataSize; + const void* pData; +} VkWriteDescriptorSetInlineUniformBlock; + +typedef struct VkDescriptorPoolInlineUniformBlockCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t maxInlineUniformBlockBindings; +} VkDescriptorPoolInlineUniformBlockCreateInfo; + +typedef struct VkPhysicalDeviceShaderIntegerDotProductFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderIntegerDotProduct; +} VkPhysicalDeviceShaderIntegerDotProductFeatures; + +typedef struct VkPhysicalDeviceShaderIntegerDotProductProperties { + VkStructureType sType; + void* pNext; + VkBool32 integerDotProduct8BitUnsignedAccelerated; + VkBool32 integerDotProduct8BitSignedAccelerated; + VkBool32 integerDotProduct8BitMixedSignednessAccelerated; + VkBool32 integerDotProduct4x8BitPackedUnsignedAccelerated; + VkBool32 integerDotProduct4x8BitPackedSignedAccelerated; + VkBool32 integerDotProduct4x8BitPackedMixedSignednessAccelerated; + VkBool32 integerDotProduct16BitUnsignedAccelerated; + VkBool32 integerDotProduct16BitSignedAccelerated; + VkBool32 integerDotProduct16BitMixedSignednessAccelerated; + VkBool32 integerDotProduct32BitUnsignedAccelerated; + VkBool32 integerDotProduct32BitSignedAccelerated; + VkBool32 integerDotProduct32BitMixedSignednessAccelerated; + VkBool32 integerDotProduct64BitUnsignedAccelerated; + VkBool32 integerDotProduct64BitSignedAccelerated; + VkBool32 integerDotProduct64BitMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating8BitUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating8BitSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating8BitMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating4x8BitPackedMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating16BitUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating16BitSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating16BitMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating32BitUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating32BitSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating32BitMixedSignednessAccelerated; + VkBool32 integerDotProductAccumulatingSaturating64BitUnsignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating64BitSignedAccelerated; + VkBool32 integerDotProductAccumulatingSaturating64BitMixedSignednessAccelerated; +} VkPhysicalDeviceShaderIntegerDotProductProperties; + +typedef struct VkPhysicalDeviceTexelBufferAlignmentProperties { + VkStructureType sType; + void* pNext; + VkDeviceSize storageTexelBufferOffsetAlignmentBytes; + VkBool32 storageTexelBufferOffsetSingleTexelAlignment; + VkDeviceSize uniformTexelBufferOffsetAlignmentBytes; + VkBool32 uniformTexelBufferOffsetSingleTexelAlignment; +} VkPhysicalDeviceTexelBufferAlignmentProperties; + +typedef struct VkImageBlit2 { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit2; + +typedef struct VkBlitImageInfo2 { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageBlit2* pRegions; + VkFilter filter; +} VkBlitImageInfo2; + +typedef struct VkImageResolve2 { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve2; + +typedef struct VkResolveImageInfo2 { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageResolve2* pRegions; +} VkResolveImageInfo2; + +typedef struct VkRenderingAttachmentInfo { + VkStructureType sType; + const void* pNext; + VkImageView imageView; + VkImageLayout imageLayout; + VkResolveModeFlagBits resolveMode; + VkImageView resolveImageView; + VkImageLayout resolveImageLayout; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkClearValue clearValue; +} VkRenderingAttachmentInfo; + +typedef struct VkRenderingInfo { + VkStructureType sType; + const void* pNext; + VkRenderingFlags flags; + VkRect2D renderArea; + uint32_t layerCount; + uint32_t viewMask; + uint32_t colorAttachmentCount; + const VkRenderingAttachmentInfo* pColorAttachments; + const VkRenderingAttachmentInfo* pDepthAttachment; + const VkRenderingAttachmentInfo* pStencilAttachment; +} VkRenderingInfo; + +typedef struct VkPipelineRenderingCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t viewMask; + uint32_t colorAttachmentCount; + const VkFormat* pColorAttachmentFormats; + VkFormat depthAttachmentFormat; + VkFormat stencilAttachmentFormat; +} VkPipelineRenderingCreateInfo; + +typedef struct VkPhysicalDeviceDynamicRenderingFeatures { + VkStructureType sType; + void* pNext; + VkBool32 dynamicRendering; +} VkPhysicalDeviceDynamicRenderingFeatures; + +typedef struct VkCommandBufferInheritanceRenderingInfo { + VkStructureType sType; + const void* pNext; + VkRenderingFlags flags; + uint32_t viewMask; + uint32_t colorAttachmentCount; + const VkFormat* pColorAttachmentFormats; + VkFormat depthAttachmentFormat; + VkFormat stencilAttachmentFormat; + VkSampleCountFlagBits rasterizationSamples; +} VkCommandBufferInheritanceRenderingInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolProperties)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlot)(VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot); +typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlot)(VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateData)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data); +typedef void (VKAPI_PTR *PFN_vkGetPrivateData)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2)(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo); +typedef void (VKAPI_PTR *PFN_vkGetDeviceBufferMemoryRequirements)(VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetDeviceImageMemoryRequirements)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSparseMemoryRequirements)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent2)(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent2)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRendering)(VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRendering)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdSetCullMode)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetFrontFace)(VkCommandBuffer commandBuffer, VkFrontFace frontFace); +typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopology)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCount)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCount)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnable)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnable)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOp)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnable)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnable)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilOp)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp); +typedef void (VKAPI_PTR *PFN_vkCmdSetRasterizerDiscardEnable)(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBiasEnable)(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveRestartEnable)(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolProperties* pToolProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlot( + VkDevice device, + const VkPrivateDataSlotCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPrivateDataSlot* pPrivateDataSlot); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlot( + VkDevice device, + VkPrivateDataSlot privateDataSlot, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateData( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlot privateDataSlot, + uint64_t data); + +VKAPI_ATTR void VKAPI_CALL vkGetPrivateData( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlot privateDataSlot, + uint64_t* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2( + VkCommandBuffer commandBuffer, + const VkDependencyInfo* pDependencyInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags2 stage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo2* pSubmits, + VkFence fence); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2( + VkCommandBuffer commandBuffer, + const VkCopyBufferInfo2* pCopyBufferInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2( + VkCommandBuffer commandBuffer, + const VkCopyImageInfo2* pCopyImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2( + VkCommandBuffer commandBuffer, + const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2( + VkCommandBuffer commandBuffer, + const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceBufferMemoryRequirements( + VkDevice device, + const VkDeviceBufferMemoryRequirements* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageMemoryRequirements( + VkDevice device, + const VkDeviceImageMemoryRequirements* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirements( + VkDevice device, + const VkDeviceImageMemoryRequirements* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2( + VkCommandBuffer commandBuffer, + VkEvent event, + const VkDependencyInfo* pDependencyInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags2 stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + const VkDependencyInfo* pDependencyInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2( + VkCommandBuffer commandBuffer, + const VkBlitImageInfo2* pBlitImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2( + VkCommandBuffer commandBuffer, + const VkResolveImageInfo2* pResolveImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRendering( + VkCommandBuffer commandBuffer, + const VkRenderingInfo* pRenderingInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRendering( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetCullMode( + VkCommandBuffer commandBuffer, + VkCullModeFlags cullMode); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFace( + VkCommandBuffer commandBuffer, + VkFrontFace frontFace); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopology( + VkCommandBuffer commandBuffer, + VkPrimitiveTopology primitiveTopology); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCount( + VkCommandBuffer commandBuffer, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCount( + VkCommandBuffer commandBuffer, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes, + const VkDeviceSize* pStrides); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnable( + VkCommandBuffer commandBuffer, + VkBool32 depthTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnable( + VkCommandBuffer commandBuffer, + VkBool32 depthWriteEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOp( + VkCommandBuffer commandBuffer, + VkCompareOp depthCompareOp); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnable( + VkCommandBuffer commandBuffer, + VkBool32 depthBoundsTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnable( + VkCommandBuffer commandBuffer, + VkBool32 stencilTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOp( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + VkStencilOp failOp, + VkStencilOp passOp, + VkStencilOp depthFailOp, + VkCompareOp compareOp); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizerDiscardEnable( + VkCommandBuffer commandBuffer, + VkBool32 rasterizerDiscardEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBiasEnable( + VkCommandBuffer commandBuffer, + VkBool32 depthBiasEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveRestartEnable( + VkCommandBuffer commandBuffer, + VkBool32 primitiveRestartEnable); +#endif + + +// VK_VERSION_1_4 is a preprocessor guard. Do not pass it to API calls. +#define VK_VERSION_1_4 1 +// Vulkan 1.4 version number +#define VK_API_VERSION_1_4 VK_MAKE_API_VERSION(0, 1, 4, 0)// Patch version should always be set to 0 + +#define VK_MAX_GLOBAL_PRIORITY_SIZE 16U + +typedef enum VkPipelineRobustnessBufferBehavior { + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT = 0, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED = 1, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS = 2, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2 = 3, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2, + VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPipelineRobustnessBufferBehavior; + +typedef enum VkPipelineRobustnessImageBehavior { + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT = 0, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED = 1, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS = 2, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2 = 3, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2, + VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPipelineRobustnessImageBehavior; + +typedef enum VkQueueGlobalPriority { + VK_QUEUE_GLOBAL_PRIORITY_LOW = 128, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM = 256, + VK_QUEUE_GLOBAL_PRIORITY_HIGH = 512, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME = 1024, + VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = VK_QUEUE_GLOBAL_PRIORITY_HIGH, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME, + VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR = VK_QUEUE_GLOBAL_PRIORITY_LOW, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR = VK_QUEUE_GLOBAL_PRIORITY_HIGH, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_KHR = VK_QUEUE_GLOBAL_PRIORITY_REALTIME, + VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM = 0x7FFFFFFF +} VkQueueGlobalPriority; + +typedef enum VkLineRasterizationMode { + VK_LINE_RASTERIZATION_MODE_DEFAULT = 0, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR = 1, + VK_LINE_RASTERIZATION_MODE_BRESENHAM = 2, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH = 3, + VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = VK_LINE_RASTERIZATION_MODE_DEFAULT, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = VK_LINE_RASTERIZATION_MODE_RECTANGULAR, + VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = VK_LINE_RASTERIZATION_MODE_BRESENHAM, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH, + VK_LINE_RASTERIZATION_MODE_DEFAULT_KHR = VK_LINE_RASTERIZATION_MODE_DEFAULT, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_KHR = VK_LINE_RASTERIZATION_MODE_RECTANGULAR, + VK_LINE_RASTERIZATION_MODE_BRESENHAM_KHR = VK_LINE_RASTERIZATION_MODE_BRESENHAM, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_KHR = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH, + VK_LINE_RASTERIZATION_MODE_MAX_ENUM = 0x7FFFFFFF +} VkLineRasterizationMode; + +typedef enum VkMemoryUnmapFlagBits { + VK_MEMORY_UNMAP_RESERVE_BIT_EXT = 0x00000001, + VK_MEMORY_UNMAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryUnmapFlagBits; +typedef VkFlags VkMemoryUnmapFlags; +typedef VkFlags64 VkBufferUsageFlags2; + +// Flag bits for VkBufferUsageFlagBits2 +typedef VkFlags64 VkBufferUsageFlagBits2; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFER_SRC_BIT = 0x00000001ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFER_DST_BIT = 0x00000002ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT = 0x00000008ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT = 0x00000010ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT = 0x00000020ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_INDEX_BUFFER_BIT = 0x00000040ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VERTEX_BUFFER_BIT = 0x00000080ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT = 0x00000100ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_SHADER_DEVICE_ADDRESS_BIT = 0x00020000ULL; +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_EXECUTION_GRAPH_SCRATCH_BIT_AMDX = 0x02000000ULL; +#endif +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_DESCRIPTOR_HEAP_BIT_EXT = 0x10000000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFER_SRC_BIT_KHR = 0x00000001ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFER_DST_BIT_KHR = 0x00000002ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR = 0x00000004ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR = 0x00000008ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR = 0x00000010ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR = 0x00000020ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_INDEX_BUFFER_BIT_KHR = 0x00000040ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VERTEX_BUFFER_BIT_KHR = 0x00000080ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT_KHR = 0x00000100ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_RAY_TRACING_BIT_NV = 0x00000400ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VIDEO_DECODE_SRC_BIT_KHR = 0x00002000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VIDEO_DECODE_DST_BIT_KHR = 0x00004000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VIDEO_ENCODE_DST_BIT_KHR = 0x00008000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_VIDEO_ENCODE_SRC_BIT_KHR = 0x00010000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_SHADER_DEVICE_ADDRESS_BIT_KHR = 0x00020000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_SAMPLER_DESCRIPTOR_BUFFER_BIT_EXT = 0x00200000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_RESOURCE_DESCRIPTOR_BUFFER_BIT_EXT = 0x00400000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_PUSH_DESCRIPTORS_DESCRIPTOR_BUFFER_BIT_EXT = 0x04000000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_MICROMAP_BUILD_INPUT_READ_ONLY_BIT_EXT = 0x00800000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_MICROMAP_STORAGE_BIT_EXT = 0x01000000ULL; +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_COMPRESSED_DATA_DGF1_BIT_AMDX = 0x200000000ULL; +#endif +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_DATA_GRAPH_FOREIGN_DESCRIPTOR_BIT_ARM = 0x20000000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_TILE_MEMORY_BIT_QCOM = 0x08000000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_MEMORY_DECOMPRESSION_BIT_EXT = 0x100000000ULL; +static const VkBufferUsageFlagBits2 VK_BUFFER_USAGE_2_PREPROCESS_BUFFER_BIT_EXT = 0x80000000ULL; + + +typedef enum VkHostImageCopyFlagBits { + VK_HOST_IMAGE_COPY_MEMCPY_BIT = 0x00000001, + // VK_HOST_IMAGE_COPY_MEMCPY is a legacy alias + VK_HOST_IMAGE_COPY_MEMCPY = VK_HOST_IMAGE_COPY_MEMCPY_BIT, + VK_HOST_IMAGE_COPY_MEMCPY_BIT_EXT = VK_HOST_IMAGE_COPY_MEMCPY_BIT, + // VK_HOST_IMAGE_COPY_MEMCPY_EXT is a legacy alias + VK_HOST_IMAGE_COPY_MEMCPY_EXT = VK_HOST_IMAGE_COPY_MEMCPY_BIT, + VK_HOST_IMAGE_COPY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkHostImageCopyFlagBits; +typedef VkFlags VkHostImageCopyFlags; +typedef VkFlags64 VkPipelineCreateFlags2; + +// Flag bits for VkPipelineCreateFlagBits2 +typedef VkFlags64 VkPipelineCreateFlagBits2; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT = 0x00000001ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT = 0x00000002ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DERIVATIVE_BIT = 0x00000004ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT = 0x00000010ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT = 0x00000100ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT = 0x00000200ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT = 0x08000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT = 0x40000000ULL; +#ifdef VK_ENABLE_BETA_EXTENSIONS +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_EXECUTION_GRAPH_BIT_AMDX = 0x100000000ULL; +#endif +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DESCRIPTOR_HEAP_BIT_EXT = 0x1000000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_BUILT_IN_PRIMITIVES_BIT_KHR = 0x00001000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_ALLOW_SPHERES_AND_LINEAR_SWEPT_SPHERES_BIT_NV = 0x200000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_ENABLE_LEGACY_DITHERING_BIT_EXT = 0x400000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISABLE_OPTIMIZATION_BIT_KHR = 0x00000001ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_ALLOW_DERIVATIVES_BIT_KHR = 0x00000002ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DERIVATIVE_BIT_KHR = 0x00000004ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = 0x00000008ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISPATCH_BASE_BIT_KHR = 0x00000010ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DEFER_COMPILE_BIT_NV = 0x00000020ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_CAPTURE_STATISTICS_BIT_KHR = 0x00000040ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_KHR = 0x00000100ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_EARLY_RETURN_ON_FAILURE_BIT_KHR = 0x00000200ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_LINK_TIME_OPTIMIZATION_BIT_EXT = 0x00000400ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT = 0x00800000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_LIBRARY_BIT_KHR = 0x00000800ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_NV = 0x00040000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_ALLOW_MOTION_BIT_NV = 0x00100000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x00200000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RENDERING_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = 0x00400000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_OPACITY_MICROMAP_BIT_EXT = 0x01000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_COLOR_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x02000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DEPTH_STENCIL_ATTACHMENT_FEEDBACK_LOOP_BIT_EXT = 0x04000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_NO_PROTECTED_ACCESS_BIT_EXT = 0x08000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_PROTECTED_ACCESS_ONLY_BIT_EXT = 0x40000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_RAY_TRACING_DISPLACEMENT_MICROMAP_BIT_NV = 0x10000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DESCRIPTOR_BUFFER_BIT_EXT = 0x20000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_DISALLOW_OPACITY_MICROMAP_BIT_ARM = 0x2000000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR = 0x80000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_INDIRECT_BINDABLE_BIT_EXT = 0x4000000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_PER_LAYER_FRAGMENT_DENSITY_BIT_VALVE = 0x10000000000ULL; +static const VkPipelineCreateFlagBits2 VK_PIPELINE_CREATE_2_64_BIT_INDEXING_BIT_EXT = 0x80000000000ULL; + +typedef struct VkPhysicalDeviceVulkan14Features { + VkStructureType sType; + void* pNext; + VkBool32 globalPriorityQuery; + VkBool32 shaderSubgroupRotate; + VkBool32 shaderSubgroupRotateClustered; + VkBool32 shaderFloatControls2; + VkBool32 shaderExpectAssume; + VkBool32 rectangularLines; + VkBool32 bresenhamLines; + VkBool32 smoothLines; + VkBool32 stippledRectangularLines; + VkBool32 stippledBresenhamLines; + VkBool32 stippledSmoothLines; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; + VkBool32 indexTypeUint8; + VkBool32 dynamicRenderingLocalRead; + VkBool32 maintenance5; + VkBool32 maintenance6; + VkBool32 pipelineProtectedAccess; + VkBool32 pipelineRobustness; + VkBool32 hostImageCopy; + VkBool32 pushDescriptor; +} VkPhysicalDeviceVulkan14Features; + +typedef struct VkPhysicalDeviceVulkan14Properties { + VkStructureType sType; + void* pNext; + uint32_t lineSubPixelPrecisionBits; + uint32_t maxVertexAttribDivisor; + VkBool32 supportsNonZeroFirstInstance; + uint32_t maxPushDescriptors; + VkBool32 dynamicRenderingLocalReadDepthStencilAttachments; + VkBool32 dynamicRenderingLocalReadMultisampledAttachments; + VkBool32 earlyFragmentMultisampleCoverageAfterSampleCounting; + VkBool32 earlyFragmentSampleMaskTestBeforeSampleCounting; + VkBool32 depthStencilSwizzleOneSupport; + VkBool32 polygonModePointSize; + VkBool32 nonStrictSinglePixelWideLinesUseParallelogram; + VkBool32 nonStrictWideLinesUseParallelogram; + VkBool32 blockTexelViewCompatibleMultipleLayers; + uint32_t maxCombinedImageSamplerDescriptorCount; + VkBool32 fragmentShadingRateClampCombinerInputs; + VkPipelineRobustnessBufferBehavior defaultRobustnessStorageBuffers; + VkPipelineRobustnessBufferBehavior defaultRobustnessUniformBuffers; + VkPipelineRobustnessBufferBehavior defaultRobustnessVertexInputs; + VkPipelineRobustnessImageBehavior defaultRobustnessImages; + uint32_t copySrcLayoutCount; + VkImageLayout* pCopySrcLayouts; + uint32_t copyDstLayoutCount; + VkImageLayout* pCopyDstLayouts; + uint8_t optimalTilingLayoutUUID[VK_UUID_SIZE]; + VkBool32 identicalMemoryTypeRequirements; +} VkPhysicalDeviceVulkan14Properties; + +typedef struct VkDeviceQueueGlobalPriorityCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueueGlobalPriority globalPriority; +} VkDeviceQueueGlobalPriorityCreateInfo; + +typedef struct VkPhysicalDeviceGlobalPriorityQueryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 globalPriorityQuery; +} VkPhysicalDeviceGlobalPriorityQueryFeatures; + +typedef struct VkQueueFamilyGlobalPriorityProperties { + VkStructureType sType; + void* pNext; + uint32_t priorityCount; + VkQueueGlobalPriority priorities[VK_MAX_GLOBAL_PRIORITY_SIZE]; +} VkQueueFamilyGlobalPriorityProperties; + +typedef struct VkPhysicalDeviceIndexTypeUint8Features { + VkStructureType sType; + void* pNext; + VkBool32 indexTypeUint8; +} VkPhysicalDeviceIndexTypeUint8Features; + +typedef struct VkMemoryMapInfo { + VkStructureType sType; + const void* pNext; + VkMemoryMapFlags flags; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMemoryMapInfo; + +typedef struct VkMemoryUnmapInfo { + VkStructureType sType; + const void* pNext; + VkMemoryUnmapFlags flags; + VkDeviceMemory memory; +} VkMemoryUnmapInfo; + +typedef struct VkPhysicalDeviceMaintenance5Features { + VkStructureType sType; + void* pNext; + VkBool32 maintenance5; +} VkPhysicalDeviceMaintenance5Features; + +typedef struct VkPhysicalDeviceMaintenance5Properties { + VkStructureType sType; + void* pNext; + VkBool32 earlyFragmentMultisampleCoverageAfterSampleCounting; + VkBool32 earlyFragmentSampleMaskTestBeforeSampleCounting; + VkBool32 depthStencilSwizzleOneSupport; + VkBool32 polygonModePointSize; + VkBool32 nonStrictSinglePixelWideLinesUseParallelogram; + VkBool32 nonStrictWideLinesUseParallelogram; +} VkPhysicalDeviceMaintenance5Properties; + +typedef struct VkImageSubresource2 { + VkStructureType sType; + void* pNext; + VkImageSubresource imageSubresource; +} VkImageSubresource2; + +typedef struct VkDeviceImageSubresourceInfo { + VkStructureType sType; + const void* pNext; + const VkImageCreateInfo* pCreateInfo; + const VkImageSubresource2* pSubresource; +} VkDeviceImageSubresourceInfo; + +typedef struct VkSubresourceLayout2 { + VkStructureType sType; + void* pNext; + VkSubresourceLayout subresourceLayout; +} VkSubresourceLayout2; + +typedef struct VkBufferUsageFlags2CreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferUsageFlags2 usage; +} VkBufferUsageFlags2CreateInfo; + +typedef struct VkPhysicalDeviceMaintenance6Features { + VkStructureType sType; + void* pNext; + VkBool32 maintenance6; +} VkPhysicalDeviceMaintenance6Features; + +typedef struct VkPhysicalDeviceMaintenance6Properties { + VkStructureType sType; + void* pNext; + VkBool32 blockTexelViewCompatibleMultipleLayers; + uint32_t maxCombinedImageSamplerDescriptorCount; + VkBool32 fragmentShadingRateClampCombinerInputs; +} VkPhysicalDeviceMaintenance6Properties; + +typedef struct VkBindMemoryStatus { + VkStructureType sType; + const void* pNext; + VkResult* pResult; +} VkBindMemoryStatus; + +typedef struct VkPhysicalDeviceHostImageCopyFeatures { + VkStructureType sType; + void* pNext; + VkBool32 hostImageCopy; +} VkPhysicalDeviceHostImageCopyFeatures; + +typedef struct VkPhysicalDeviceHostImageCopyProperties { + VkStructureType sType; + void* pNext; + uint32_t copySrcLayoutCount; + VkImageLayout* pCopySrcLayouts; + uint32_t copyDstLayoutCount; + VkImageLayout* pCopyDstLayouts; + uint8_t optimalTilingLayoutUUID[VK_UUID_SIZE]; + VkBool32 identicalMemoryTypeRequirements; +} VkPhysicalDeviceHostImageCopyProperties; + +typedef struct VkMemoryToImageCopy { + VkStructureType sType; + const void* pNext; + const void* pHostPointer; + uint32_t memoryRowLength; + uint32_t memoryImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkMemoryToImageCopy; + +typedef struct VkImageToMemoryCopy { + VkStructureType sType; + const void* pNext; + void* pHostPointer; + uint32_t memoryRowLength; + uint32_t memoryImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkImageToMemoryCopy; + +typedef struct VkCopyMemoryToImageInfo { + VkStructureType sType; + const void* pNext; + VkHostImageCopyFlags flags; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkMemoryToImageCopy* pRegions; +} VkCopyMemoryToImageInfo; + +typedef struct VkCopyImageToMemoryInfo { + VkStructureType sType; + const void* pNext; + VkHostImageCopyFlags flags; + VkImage srcImage; + VkImageLayout srcImageLayout; + uint32_t regionCount; + const VkImageToMemoryCopy* pRegions; +} VkCopyImageToMemoryInfo; + +typedef struct VkCopyImageToImageInfo { + VkStructureType sType; + const void* pNext; + VkHostImageCopyFlags flags; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageCopy2* pRegions; +} VkCopyImageToImageInfo; + +typedef struct VkHostImageLayoutTransitionInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkImageLayout oldLayout; + VkImageLayout newLayout; + VkImageSubresourceRange subresourceRange; +} VkHostImageLayoutTransitionInfo; + +typedef struct VkSubresourceHostMemcpySize { + VkStructureType sType; + void* pNext; + VkDeviceSize size; +} VkSubresourceHostMemcpySize; + +typedef struct VkHostImageCopyDevicePerformanceQuery { + VkStructureType sType; + void* pNext; + VkBool32 optimalDeviceAccess; + VkBool32 identicalMemoryLayout; +} VkHostImageCopyDevicePerformanceQuery; + +typedef struct VkPhysicalDeviceShaderSubgroupRotateFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupRotate; + VkBool32 shaderSubgroupRotateClustered; +} VkPhysicalDeviceShaderSubgroupRotateFeatures; + +typedef struct VkPhysicalDeviceShaderFloatControls2Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloatControls2; +} VkPhysicalDeviceShaderFloatControls2Features; + +typedef struct VkPhysicalDeviceShaderExpectAssumeFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderExpectAssume; +} VkPhysicalDeviceShaderExpectAssumeFeatures; + +typedef struct VkPipelineCreateFlags2CreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags2 flags; +} VkPipelineCreateFlags2CreateInfo; + +typedef struct VkPhysicalDevicePushDescriptorProperties { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorProperties; + +typedef struct VkBindDescriptorSetsInfo { + VkStructureType sType; + const void* pNext; + VkShaderStageFlags stageFlags; + VkPipelineLayout layout; + uint32_t firstSet; + uint32_t descriptorSetCount; + const VkDescriptorSet* pDescriptorSets; + uint32_t dynamicOffsetCount; + const uint32_t* pDynamicOffsets; +} VkBindDescriptorSetsInfo; + +typedef struct VkPushConstantsInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayout layout; + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; + const void* pValues; +} VkPushConstantsInfo; + +typedef struct VkPushDescriptorSetInfo { + VkStructureType sType; + const void* pNext; + VkShaderStageFlags stageFlags; + VkPipelineLayout layout; + uint32_t set; + uint32_t descriptorWriteCount; + const VkWriteDescriptorSet* pDescriptorWrites; +} VkPushDescriptorSetInfo; + +typedef struct VkPushDescriptorSetWithTemplateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorUpdateTemplate descriptorUpdateTemplate; + VkPipelineLayout layout; + uint32_t set; + const void* pData; +} VkPushDescriptorSetWithTemplateInfo; + +typedef struct VkPhysicalDevicePipelineProtectedAccessFeatures { + VkStructureType sType; + void* pNext; + VkBool32 pipelineProtectedAccess; +} VkPhysicalDevicePipelineProtectedAccessFeatures; + +typedef struct VkPhysicalDevicePipelineRobustnessFeatures { + VkStructureType sType; + void* pNext; + VkBool32 pipelineRobustness; +} VkPhysicalDevicePipelineRobustnessFeatures; + +typedef struct VkPhysicalDevicePipelineRobustnessProperties { + VkStructureType sType; + void* pNext; + VkPipelineRobustnessBufferBehavior defaultRobustnessStorageBuffers; + VkPipelineRobustnessBufferBehavior defaultRobustnessUniformBuffers; + VkPipelineRobustnessBufferBehavior defaultRobustnessVertexInputs; + VkPipelineRobustnessImageBehavior defaultRobustnessImages; +} VkPhysicalDevicePipelineRobustnessProperties; + +typedef struct VkPipelineRobustnessCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRobustnessBufferBehavior storageBuffers; + VkPipelineRobustnessBufferBehavior uniformBuffers; + VkPipelineRobustnessBufferBehavior vertexInputs; + VkPipelineRobustnessImageBehavior images; +} VkPipelineRobustnessCreateInfo; + +typedef struct VkPhysicalDeviceLineRasterizationFeatures { + VkStructureType sType; + void* pNext; + VkBool32 rectangularLines; + VkBool32 bresenhamLines; + VkBool32 smoothLines; + VkBool32 stippledRectangularLines; + VkBool32 stippledBresenhamLines; + VkBool32 stippledSmoothLines; +} VkPhysicalDeviceLineRasterizationFeatures; + +typedef struct VkPhysicalDeviceLineRasterizationProperties { + VkStructureType sType; + void* pNext; + uint32_t lineSubPixelPrecisionBits; +} VkPhysicalDeviceLineRasterizationProperties; + +typedef struct VkPipelineRasterizationLineStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkLineRasterizationMode lineRasterizationMode; + VkBool32 stippledLineEnable; + uint32_t lineStippleFactor; + uint16_t lineStipplePattern; +} VkPipelineRasterizationLineStateCreateInfo; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorProperties { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; + VkBool32 supportsNonZeroFirstInstance; +} VkPhysicalDeviceVertexAttributeDivisorProperties; + +typedef struct VkVertexInputBindingDivisorDescription { + uint32_t binding; + uint32_t divisor; +} VkVertexInputBindingDivisorDescription; + +typedef struct VkPipelineVertexInputDivisorStateCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t vertexBindingDivisorCount; + const VkVertexInputBindingDivisorDescription* pVertexBindingDivisors; +} VkPipelineVertexInputDivisorStateCreateInfo; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorFeatures { + VkStructureType sType; + void* pNext; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; +} VkPhysicalDeviceVertexAttributeDivisorFeatures; + +typedef struct VkRenderingAreaInfo { + VkStructureType sType; + const void* pNext; + uint32_t viewMask; + uint32_t colorAttachmentCount; + const VkFormat* pColorAttachmentFormats; + VkFormat depthAttachmentFormat; + VkFormat stencilAttachmentFormat; +} VkRenderingAreaInfo; + +typedef struct VkPhysicalDeviceDynamicRenderingLocalReadFeatures { + VkStructureType sType; + void* pNext; + VkBool32 dynamicRenderingLocalRead; +} VkPhysicalDeviceDynamicRenderingLocalReadFeatures; + +typedef struct VkRenderingAttachmentLocationInfo { + VkStructureType sType; + const void* pNext; + uint32_t colorAttachmentCount; + const uint32_t* pColorAttachmentLocations; +} VkRenderingAttachmentLocationInfo; + +typedef struct VkRenderingInputAttachmentIndexInfo { + VkStructureType sType; + const void* pNext; + uint32_t colorAttachmentCount; + const uint32_t* pColorAttachmentInputIndices; + const uint32_t* pDepthInputAttachmentIndex; + const uint32_t* pStencilInputAttachmentIndex; +} VkRenderingInputAttachmentIndexInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory2)(VkDevice device, const VkMemoryMapInfo* pMemoryMapInfo, void** ppData); +typedef VkResult (VKAPI_PTR *PFN_vkUnmapMemory2)(VkDevice device, const VkMemoryUnmapInfo* pMemoryUnmapInfo); +typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSubresourceLayout)(VkDevice device, const VkDeviceImageSubresourceInfo* pInfo, VkSubresourceLayout2* pLayout); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2)(VkDevice device, VkImage image, const VkImageSubresource2* pSubresource, VkSubresourceLayout2* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToImage)(VkDevice device, const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToMemory)(VkDevice device, const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToImage)(VkDevice device, const VkCopyImageToImageInfo* pCopyImageToImageInfo); +typedef VkResult (VKAPI_PTR *PFN_vkTransitionImageLayout)(VkDevice device, uint32_t transitionCount, const VkHostImageLayoutTransitionInfo* pTransitions); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSet)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplate)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets2)(VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants2)(VkCommandBuffer commandBuffer, const VkPushConstantsInfo* pPushConstantsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSet2)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfo* pPushDescriptorSetInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplate2)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStipple)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer2)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkGetRenderingAreaGranularity)(VkDevice device, const VkRenderingAreaInfo* pRenderingAreaInfo, VkExtent2D* pGranularity); +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingAttachmentLocations)(VkCommandBuffer commandBuffer, const VkRenderingAttachmentLocationInfo* pLocationInfo); +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingInputAttachmentIndices)(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory2( + VkDevice device, + const VkMemoryMapInfo* pMemoryMapInfo, + void** ppData); + +VKAPI_ATTR VkResult VKAPI_CALL vkUnmapMemory2( + VkDevice device, + const VkMemoryUnmapInfo* pMemoryUnmapInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSubresourceLayout( + VkDevice device, + const VkDeviceImageSubresourceInfo* pInfo, + VkSubresourceLayout2* pLayout); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2( + VkDevice device, + VkImage image, + const VkImageSubresource2* pSubresource, + VkSubresourceLayout2* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToImage( + VkDevice device, + const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyImageToMemory( + VkDevice device, + const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyImageToImage( + VkDevice device, + const VkCopyImageToImageInfo* pCopyImageToImageInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkTransitionImageLayout( + VkDevice device, + uint32_t transitionCount, + const VkHostImageLayoutTransitionInfo* pTransitions); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSet( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplate( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets2( + VkCommandBuffer commandBuffer, + const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants2( + VkCommandBuffer commandBuffer, + const VkPushConstantsInfo* pPushConstantsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSet2( + VkCommandBuffer commandBuffer, + const VkPushDescriptorSetInfo* pPushDescriptorSetInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplate2( + VkCommandBuffer commandBuffer, + const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStipple( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer2( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkDeviceSize size, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderingAreaGranularity( + VkDevice device, + const VkRenderingAreaInfo* pRenderingAreaInfo, + VkExtent2D* pGranularity); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingAttachmentLocations( + VkCommandBuffer commandBuffer, + const VkRenderingAttachmentLocationInfo* pLocationInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingInputAttachmentIndices( + VkCommandBuffer commandBuffer, + const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo); +#endif + + +// VK_KHR_surface is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_surface 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) +#define VK_KHR_SURFACE_SPEC_VERSION 25 +#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" + +typedef enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, + VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, + VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, + VK_PRESENT_MODE_FIFO_LATEST_READY_KHR = 1000361000, + VK_PRESENT_MODE_FIFO_LATEST_READY_EXT = VK_PRESENT_MODE_FIFO_LATEST_READY_KHR, + VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentModeKHR; + +typedef enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, + VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, + VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + // VK_COLOR_SPACE_DOLBYVISION_EXT is legacy, but no reason was given in the API XML + VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, + VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, + VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, + VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, + VK_COLOR_SPACE_DISPLAY_NATIVE_AMD = 1000213000, + // VK_COLORSPACE_SRGB_NONLINEAR_KHR is a legacy alias + VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + // VK_COLOR_SPACE_DCI_P3_LINEAR_EXT is a legacy alias + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, + VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkColorSpaceKHR; + +typedef enum VkSurfaceTransformFlagBitsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, + VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSurfaceTransformFlagBitsKHR; + +typedef enum VkCompositeAlphaFlagBitsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, + VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCompositeAlphaFlagBitsKHR; +typedef VkFlags VkCompositeAlphaFlagsKHR; +typedef VkFlags VkSurfaceTransformFlagsKHR; +typedef struct VkSurfaceCapabilitiesKHR { + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; +} VkSurfaceCapabilitiesKHR; + +typedef struct VkSurfaceFormatKHR { + VkFormat format; + VkColorSpaceKHR colorSpace; +} VkSurfaceFormatKHR; + +typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( + VkInstance instance, + VkSurfaceKHR surface, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + VkSurfaceKHR surface, + VkBool32* pSupported); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormatKHR* pSurfaceFormats); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif +#endif + + +// VK_KHR_swapchain is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_swapchain 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) +#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70 +#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" + +typedef enum VkSwapchainCreateFlagBitsKHR { + VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001, + VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002, + VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x00000004, + VK_SWAPCHAIN_CREATE_PRESENT_TIMING_BIT_EXT = 0x00000200, + VK_SWAPCHAIN_CREATE_PRESENT_ID_2_BIT_KHR = 0x00000040, + VK_SWAPCHAIN_CREATE_PRESENT_WAIT_2_BIT_KHR = 0x00000080, + VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_KHR = 0x00000008, + VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_EXT = VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_KHR, + VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSwapchainCreateFlagBitsKHR; +typedef VkFlags VkSwapchainCreateFlagsKHR; + +typedef enum VkDeviceGroupPresentModeFlagBitsKHR { + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001, + VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002, + VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004, + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008, + VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDeviceGroupPresentModeFlagBitsKHR; +typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; +typedef struct VkSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainCreateFlagsKHR flags; + VkSurfaceKHR surface; + uint32_t minImageCount; + VkFormat imageFormat; + VkColorSpaceKHR imageColorSpace; + VkExtent2D imageExtent; + uint32_t imageArrayLayers; + VkImageUsageFlags imageUsage; + VkSharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkSurfaceTransformFlagBitsKHR preTransform; + VkCompositeAlphaFlagBitsKHR compositeAlpha; + VkPresentModeKHR presentMode; + VkBool32 clipped; + VkSwapchainKHR oldSwapchain; +} VkSwapchainCreateInfoKHR; + +typedef struct VkPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t swapchainCount; + const VkSwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + VkResult* pResults; +} VkPresentInfoKHR; + +typedef struct VkImageSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; +} VkImageSwapchainCreateInfoKHR; + +typedef struct VkBindImageMemorySwapchainInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndex; +} VkBindImageMemorySwapchainInfoKHR; + +typedef struct VkAcquireNextImageInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint64_t timeout; + VkSemaphore semaphore; + VkFence fence; + uint32_t deviceMask; +} VkAcquireNextImageInfoKHR; + +typedef struct VkDeviceGroupPresentCapabilitiesKHR { + VkStructureType sType; + void* pNext; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupPresentCapabilitiesKHR; + +typedef struct VkDeviceGroupPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + VkDeviceGroupPresentModeFlagBitsKHR mode; +} VkDeviceGroupPresentInfoKHR; + +typedef struct VkDeviceGroupSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupSwapchainCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); +typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); +typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( + VkDevice device, + const VkSwapchainCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchain); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pSwapchainImageCount, + VkImage* pSwapchainImages); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t timeout, + VkSemaphore semaphore, + VkFence fence, + uint32_t* pImageIndex); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( + VkQueue queue, + const VkPresentInfoKHR* pPresentInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( + VkDevice device, + const VkAcquireNextImageInfoKHR* pAcquireInfo, + uint32_t* pImageIndex); +#endif +#endif + + +// VK_KHR_display is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_display 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) +#define VK_KHR_DISPLAY_SPEC_VERSION 23 +#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" +typedef VkFlags VkDisplayModeCreateFlagsKHR; + +typedef enum VkDisplayPlaneAlphaFlagBitsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, + VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDisplayPlaneAlphaFlagBitsKHR; +typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; +typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; +typedef struct VkDisplayModeParametersKHR { + VkExtent2D visibleRegion; + uint32_t refreshRate; +} VkDisplayModeParametersKHR; + +typedef struct VkDisplayModeCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeCreateFlagsKHR flags; + VkDisplayModeParametersKHR parameters; +} VkDisplayModeCreateInfoKHR; + +typedef struct VkDisplayModePropertiesKHR { + VkDisplayModeKHR displayMode; + VkDisplayModeParametersKHR parameters; +} VkDisplayModePropertiesKHR; + +typedef struct VkDisplayPlaneCapabilitiesKHR { + VkDisplayPlaneAlphaFlagsKHR supportedAlpha; + VkOffset2D minSrcPosition; + VkOffset2D maxSrcPosition; + VkExtent2D minSrcExtent; + VkExtent2D maxSrcExtent; + VkOffset2D minDstPosition; + VkOffset2D maxDstPosition; + VkExtent2D minDstExtent; + VkExtent2D maxDstExtent; +} VkDisplayPlaneCapabilitiesKHR; + +typedef struct VkDisplayPlanePropertiesKHR { + VkDisplayKHR currentDisplay; + uint32_t currentStackIndex; +} VkDisplayPlanePropertiesKHR; + +typedef struct VkDisplayPropertiesKHR { + VkDisplayKHR display; + const char* displayName; + VkExtent2D physicalDimensions; + VkExtent2D physicalResolution; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkBool32 planeReorderPossible; + VkBool32 persistentContent; +} VkDisplayPropertiesKHR; + +typedef struct VkDisplaySurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceCreateFlagsKHR flags; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkDisplaySurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPropertiesKHR* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlanePropertiesKHR* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( + VkPhysicalDevice physicalDevice, + uint32_t planeIndex, + uint32_t* pDisplayCount, + VkDisplayKHR* pDisplays); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModePropertiesKHR* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDisplayModeKHR* pMode); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR* pCapabilities); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( + VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif +#endif + + +// VK_KHR_display_swapchain is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_display_swapchain 1 +#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 10 +#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" +typedef struct VkDisplayPresentInfoKHR { + VkStructureType sType; + const void* pNext; + VkRect2D srcRect; + VkRect2D dstRect; + VkBool32 persistent; +} VkDisplayPresentInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchains); +#endif +#endif + + +// VK_KHR_sampler_mirror_clamp_to_edge is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_sampler_mirror_clamp_to_edge 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 3 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" + + +// VK_KHR_video_queue is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_queue 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkVideoSessionKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkVideoSessionParametersKHR) +#define VK_KHR_VIDEO_QUEUE_SPEC_VERSION 8 +#define VK_KHR_VIDEO_QUEUE_EXTENSION_NAME "VK_KHR_video_queue" + +typedef enum VkQueryResultStatusKHR { + VK_QUERY_RESULT_STATUS_ERROR_KHR = -1, + VK_QUERY_RESULT_STATUS_NOT_READY_KHR = 0, + VK_QUERY_RESULT_STATUS_COMPLETE_KHR = 1, + VK_QUERY_RESULT_STATUS_INSUFFICIENT_BITSTREAM_BUFFER_RANGE_KHR = -1000299000, + VK_QUERY_RESULT_STATUS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkQueryResultStatusKHR; + +typedef enum VkVideoCodecOperationFlagBitsKHR { + VK_VIDEO_CODEC_OPERATION_NONE_KHR = 0, + VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR = 0x00010000, + VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR = 0x00020000, + VK_VIDEO_CODEC_OPERATION_DECODE_H264_BIT_KHR = 0x00000001, + VK_VIDEO_CODEC_OPERATION_DECODE_H265_BIT_KHR = 0x00000002, + VK_VIDEO_CODEC_OPERATION_DECODE_AV1_BIT_KHR = 0x00000004, + VK_VIDEO_CODEC_OPERATION_ENCODE_AV1_BIT_KHR = 0x00040000, + VK_VIDEO_CODEC_OPERATION_DECODE_VP9_BIT_KHR = 0x00000008, + VK_VIDEO_CODEC_OPERATION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoCodecOperationFlagBitsKHR; +typedef VkFlags VkVideoCodecOperationFlagsKHR; + +typedef enum VkVideoChromaSubsamplingFlagBitsKHR { + VK_VIDEO_CHROMA_SUBSAMPLING_INVALID_KHR = 0, + VK_VIDEO_CHROMA_SUBSAMPLING_MONOCHROME_BIT_KHR = 0x00000001, + VK_VIDEO_CHROMA_SUBSAMPLING_420_BIT_KHR = 0x00000002, + VK_VIDEO_CHROMA_SUBSAMPLING_422_BIT_KHR = 0x00000004, + VK_VIDEO_CHROMA_SUBSAMPLING_444_BIT_KHR = 0x00000008, + VK_VIDEO_CHROMA_SUBSAMPLING_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoChromaSubsamplingFlagBitsKHR; +typedef VkFlags VkVideoChromaSubsamplingFlagsKHR; + +typedef enum VkVideoComponentBitDepthFlagBitsKHR { + VK_VIDEO_COMPONENT_BIT_DEPTH_INVALID_KHR = 0, + VK_VIDEO_COMPONENT_BIT_DEPTH_8_BIT_KHR = 0x00000001, + VK_VIDEO_COMPONENT_BIT_DEPTH_10_BIT_KHR = 0x00000004, + VK_VIDEO_COMPONENT_BIT_DEPTH_12_BIT_KHR = 0x00000010, + VK_VIDEO_COMPONENT_BIT_DEPTH_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoComponentBitDepthFlagBitsKHR; +typedef VkFlags VkVideoComponentBitDepthFlagsKHR; + +typedef enum VkVideoCapabilityFlagBitsKHR { + VK_VIDEO_CAPABILITY_PROTECTED_CONTENT_BIT_KHR = 0x00000001, + VK_VIDEO_CAPABILITY_SEPARATE_REFERENCE_IMAGES_BIT_KHR = 0x00000002, + VK_VIDEO_CAPABILITY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoCapabilityFlagBitsKHR; +typedef VkFlags VkVideoCapabilityFlagsKHR; + +typedef enum VkVideoSessionCreateFlagBitsKHR { + VK_VIDEO_SESSION_CREATE_PROTECTED_CONTENT_BIT_KHR = 0x00000001, + VK_VIDEO_SESSION_CREATE_ALLOW_ENCODE_PARAMETER_OPTIMIZATIONS_BIT_KHR = 0x00000002, + VK_VIDEO_SESSION_CREATE_INLINE_QUERIES_BIT_KHR = 0x00000004, + VK_VIDEO_SESSION_CREATE_ALLOW_ENCODE_QUANTIZATION_DELTA_MAP_BIT_KHR = 0x00000008, + VK_VIDEO_SESSION_CREATE_ALLOW_ENCODE_EMPHASIS_MAP_BIT_KHR = 0x00000010, + VK_VIDEO_SESSION_CREATE_INLINE_SESSION_PARAMETERS_BIT_KHR = 0x00000020, + VK_VIDEO_SESSION_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoSessionCreateFlagBitsKHR; +typedef VkFlags VkVideoSessionCreateFlagsKHR; + +typedef enum VkVideoSessionParametersCreateFlagBitsKHR { + VK_VIDEO_SESSION_PARAMETERS_CREATE_QUANTIZATION_MAP_COMPATIBLE_BIT_KHR = 0x00000001, + VK_VIDEO_SESSION_PARAMETERS_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoSessionParametersCreateFlagBitsKHR; +typedef VkFlags VkVideoSessionParametersCreateFlagsKHR; +typedef VkFlags VkVideoBeginCodingFlagsKHR; +typedef VkFlags VkVideoEndCodingFlagsKHR; + +typedef enum VkVideoCodingControlFlagBitsKHR { + VK_VIDEO_CODING_CONTROL_RESET_BIT_KHR = 0x00000001, + VK_VIDEO_CODING_CONTROL_ENCODE_RATE_CONTROL_BIT_KHR = 0x00000002, + VK_VIDEO_CODING_CONTROL_ENCODE_QUALITY_LEVEL_BIT_KHR = 0x00000004, + VK_VIDEO_CODING_CONTROL_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoCodingControlFlagBitsKHR; +typedef VkFlags VkVideoCodingControlFlagsKHR; +typedef struct VkQueueFamilyQueryResultStatusPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 queryResultStatusSupport; +} VkQueueFamilyQueryResultStatusPropertiesKHR; + +typedef struct VkQueueFamilyVideoPropertiesKHR { + VkStructureType sType; + void* pNext; + VkVideoCodecOperationFlagsKHR videoCodecOperations; +} VkQueueFamilyVideoPropertiesKHR; + +typedef struct VkVideoProfileInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoCodecOperationFlagBitsKHR videoCodecOperation; + VkVideoChromaSubsamplingFlagsKHR chromaSubsampling; + VkVideoComponentBitDepthFlagsKHR lumaBitDepth; + VkVideoComponentBitDepthFlagsKHR chromaBitDepth; +} VkVideoProfileInfoKHR; + +typedef struct VkVideoProfileListInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t profileCount; + const VkVideoProfileInfoKHR* pProfiles; +} VkVideoProfileListInfoKHR; + +typedef struct VkVideoCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkVideoCapabilityFlagsKHR flags; + VkDeviceSize minBitstreamBufferOffsetAlignment; + VkDeviceSize minBitstreamBufferSizeAlignment; + VkExtent2D pictureAccessGranularity; + VkExtent2D minCodedExtent; + VkExtent2D maxCodedExtent; + uint32_t maxDpbSlots; + uint32_t maxActiveReferencePictures; + VkExtensionProperties stdHeaderVersion; +} VkVideoCapabilitiesKHR; + +typedef struct VkPhysicalDeviceVideoFormatInfoKHR { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags imageUsage; +} VkPhysicalDeviceVideoFormatInfoKHR; + +typedef struct VkVideoFormatPropertiesKHR { + VkStructureType sType; + void* pNext; + VkFormat format; + VkComponentMapping componentMapping; + VkImageCreateFlags imageCreateFlags; + VkImageType imageType; + VkImageTiling imageTiling; + VkImageUsageFlags imageUsageFlags; +} VkVideoFormatPropertiesKHR; + +typedef struct VkVideoPictureResourceInfoKHR { + VkStructureType sType; + const void* pNext; + VkOffset2D codedOffset; + VkExtent2D codedExtent; + uint32_t baseArrayLayer; + VkImageView imageViewBinding; +} VkVideoPictureResourceInfoKHR; + +typedef struct VkVideoReferenceSlotInfoKHR { + VkStructureType sType; + const void* pNext; + int32_t slotIndex; + const VkVideoPictureResourceInfoKHR* pPictureResource; +} VkVideoReferenceSlotInfoKHR; + +typedef struct VkVideoSessionMemoryRequirementsKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryBindIndex; + VkMemoryRequirements memoryRequirements; +} VkVideoSessionMemoryRequirementsKHR; + +typedef struct VkBindVideoSessionMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t memoryBindIndex; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkDeviceSize memorySize; +} VkBindVideoSessionMemoryInfoKHR; + +typedef struct VkVideoSessionCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t queueFamilyIndex; + VkVideoSessionCreateFlagsKHR flags; + const VkVideoProfileInfoKHR* pVideoProfile; + VkFormat pictureFormat; + VkExtent2D maxCodedExtent; + VkFormat referencePictureFormat; + uint32_t maxDpbSlots; + uint32_t maxActiveReferencePictures; + const VkExtensionProperties* pStdHeaderVersion; +} VkVideoSessionCreateInfoKHR; + +typedef struct VkVideoSessionParametersCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoSessionParametersCreateFlagsKHR flags; + VkVideoSessionParametersKHR videoSessionParametersTemplate; + VkVideoSessionKHR videoSession; +} VkVideoSessionParametersCreateInfoKHR; + +typedef struct VkVideoSessionParametersUpdateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t updateSequenceCount; +} VkVideoSessionParametersUpdateInfoKHR; + +typedef struct VkVideoBeginCodingInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoBeginCodingFlagsKHR flags; + VkVideoSessionKHR videoSession; + VkVideoSessionParametersKHR videoSessionParameters; + uint32_t referenceSlotCount; + const VkVideoReferenceSlotInfoKHR* pReferenceSlots; +} VkVideoBeginCodingInfoKHR; + +typedef struct VkVideoEndCodingInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEndCodingFlagsKHR flags; +} VkVideoEndCodingInfoKHR; + +typedef struct VkVideoCodingControlInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoCodingControlFlagsKHR flags; +} VkVideoCodingControlInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR)(VkPhysicalDevice physicalDevice, const VkVideoProfileInfoKHR* pVideoProfile, VkVideoCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceVideoFormatInfoKHR* pVideoFormatInfo, uint32_t* pVideoFormatPropertyCount, VkVideoFormatPropertiesKHR* pVideoFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateVideoSessionKHR)(VkDevice device, const VkVideoSessionCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkVideoSessionKHR* pVideoSession); +typedef void (VKAPI_PTR *PFN_vkDestroyVideoSessionKHR)(VkDevice device, VkVideoSessionKHR videoSession, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetVideoSessionMemoryRequirementsKHR)(VkDevice device, VkVideoSessionKHR videoSession, uint32_t* pMemoryRequirementsCount, VkVideoSessionMemoryRequirementsKHR* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindVideoSessionMemoryKHR)(VkDevice device, VkVideoSessionKHR videoSession, uint32_t bindSessionMemoryInfoCount, const VkBindVideoSessionMemoryInfoKHR* pBindSessionMemoryInfos); +typedef VkResult (VKAPI_PTR *PFN_vkCreateVideoSessionParametersKHR)(VkDevice device, const VkVideoSessionParametersCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkVideoSessionParametersKHR* pVideoSessionParameters); +typedef VkResult (VKAPI_PTR *PFN_vkUpdateVideoSessionParametersKHR)(VkDevice device, VkVideoSessionParametersKHR videoSessionParameters, const VkVideoSessionParametersUpdateInfoKHR* pUpdateInfo); +typedef void (VKAPI_PTR *PFN_vkDestroyVideoSessionParametersKHR)(VkDevice device, VkVideoSessionParametersKHR videoSessionParameters, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkCmdBeginVideoCodingKHR)(VkCommandBuffer commandBuffer, const VkVideoBeginCodingInfoKHR* pBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndVideoCodingKHR)(VkCommandBuffer commandBuffer, const VkVideoEndCodingInfoKHR* pEndCodingInfo); +typedef void (VKAPI_PTR *PFN_vkCmdControlVideoCodingKHR)(VkCommandBuffer commandBuffer, const VkVideoCodingControlInfoKHR* pCodingControlInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceVideoCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + const VkVideoProfileInfoKHR* pVideoProfile, + VkVideoCapabilitiesKHR* pCapabilities); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceVideoFormatPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceVideoFormatInfoKHR* pVideoFormatInfo, + uint32_t* pVideoFormatPropertyCount, + VkVideoFormatPropertiesKHR* pVideoFormatProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateVideoSessionKHR( + VkDevice device, + const VkVideoSessionCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkVideoSessionKHR* pVideoSession); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyVideoSessionKHR( + VkDevice device, + VkVideoSessionKHR videoSession, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetVideoSessionMemoryRequirementsKHR( + VkDevice device, + VkVideoSessionKHR videoSession, + uint32_t* pMemoryRequirementsCount, + VkVideoSessionMemoryRequirementsKHR* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindVideoSessionMemoryKHR( + VkDevice device, + VkVideoSessionKHR videoSession, + uint32_t bindSessionMemoryInfoCount, + const VkBindVideoSessionMemoryInfoKHR* pBindSessionMemoryInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateVideoSessionParametersKHR( + VkDevice device, + const VkVideoSessionParametersCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkVideoSessionParametersKHR* pVideoSessionParameters); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkUpdateVideoSessionParametersKHR( + VkDevice device, + VkVideoSessionParametersKHR videoSessionParameters, + const VkVideoSessionParametersUpdateInfoKHR* pUpdateInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyVideoSessionParametersKHR( + VkDevice device, + VkVideoSessionParametersKHR videoSessionParameters, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginVideoCodingKHR( + VkCommandBuffer commandBuffer, + const VkVideoBeginCodingInfoKHR* pBeginInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndVideoCodingKHR( + VkCommandBuffer commandBuffer, + const VkVideoEndCodingInfoKHR* pEndCodingInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdControlVideoCodingKHR( + VkCommandBuffer commandBuffer, + const VkVideoCodingControlInfoKHR* pCodingControlInfo); +#endif +#endif + + +// VK_KHR_video_decode_queue is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_decode_queue 1 +#define VK_KHR_VIDEO_DECODE_QUEUE_SPEC_VERSION 8 +#define VK_KHR_VIDEO_DECODE_QUEUE_EXTENSION_NAME "VK_KHR_video_decode_queue" + +typedef enum VkVideoDecodeCapabilityFlagBitsKHR { + VK_VIDEO_DECODE_CAPABILITY_DPB_AND_OUTPUT_COINCIDE_BIT_KHR = 0x00000001, + VK_VIDEO_DECODE_CAPABILITY_DPB_AND_OUTPUT_DISTINCT_BIT_KHR = 0x00000002, + VK_VIDEO_DECODE_CAPABILITY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoDecodeCapabilityFlagBitsKHR; +typedef VkFlags VkVideoDecodeCapabilityFlagsKHR; + +typedef enum VkVideoDecodeUsageFlagBitsKHR { + VK_VIDEO_DECODE_USAGE_DEFAULT_KHR = 0, + VK_VIDEO_DECODE_USAGE_TRANSCODING_BIT_KHR = 0x00000001, + VK_VIDEO_DECODE_USAGE_OFFLINE_BIT_KHR = 0x00000002, + VK_VIDEO_DECODE_USAGE_STREAMING_BIT_KHR = 0x00000004, + VK_VIDEO_DECODE_USAGE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoDecodeUsageFlagBitsKHR; +typedef VkFlags VkVideoDecodeUsageFlagsKHR; +typedef VkFlags VkVideoDecodeFlagsKHR; +typedef struct VkVideoDecodeCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkVideoDecodeCapabilityFlagsKHR flags; +} VkVideoDecodeCapabilitiesKHR; + +typedef struct VkVideoDecodeUsageInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoDecodeUsageFlagsKHR videoUsageHints; +} VkVideoDecodeUsageInfoKHR; + +typedef struct VkVideoDecodeInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoDecodeFlagsKHR flags; + VkBuffer srcBuffer; + VkDeviceSize srcBufferOffset; + VkDeviceSize srcBufferRange; + VkVideoPictureResourceInfoKHR dstPictureResource; + const VkVideoReferenceSlotInfoKHR* pSetupReferenceSlot; + uint32_t referenceSlotCount; + const VkVideoReferenceSlotInfoKHR* pReferenceSlots; +} VkVideoDecodeInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdDecodeVideoKHR)(VkCommandBuffer commandBuffer, const VkVideoDecodeInfoKHR* pDecodeInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDecodeVideoKHR( + VkCommandBuffer commandBuffer, + const VkVideoDecodeInfoKHR* pDecodeInfo); +#endif +#endif + + +// VK_KHR_video_encode_h264 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_encode_h264 1 +#include "vk_video/vulkan_video_codec_h264std.h" +#include "vk_video/vulkan_video_codec_h264std_encode.h" +#define VK_KHR_VIDEO_ENCODE_H264_SPEC_VERSION 14 +#define VK_KHR_VIDEO_ENCODE_H264_EXTENSION_NAME "VK_KHR_video_encode_h264" + +typedef enum VkVideoEncodeH264CapabilityFlagBitsKHR { + VK_VIDEO_ENCODE_H264_CAPABILITY_HRD_COMPLIANCE_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_H264_CAPABILITY_PREDICTION_WEIGHT_TABLE_GENERATED_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_H264_CAPABILITY_ROW_UNALIGNED_SLICE_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_H264_CAPABILITY_DIFFERENT_SLICE_TYPE_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_H264_CAPABILITY_B_FRAME_IN_L0_LIST_BIT_KHR = 0x00000010, + VK_VIDEO_ENCODE_H264_CAPABILITY_B_FRAME_IN_L1_LIST_BIT_KHR = 0x00000020, + VK_VIDEO_ENCODE_H264_CAPABILITY_PER_PICTURE_TYPE_MIN_MAX_QP_BIT_KHR = 0x00000040, + VK_VIDEO_ENCODE_H264_CAPABILITY_PER_SLICE_CONSTANT_QP_BIT_KHR = 0x00000080, + VK_VIDEO_ENCODE_H264_CAPABILITY_GENERATE_PREFIX_NALU_BIT_KHR = 0x00000100, + VK_VIDEO_ENCODE_H264_CAPABILITY_B_PICTURE_INTRA_REFRESH_BIT_KHR = 0x00000400, + VK_VIDEO_ENCODE_H264_CAPABILITY_MB_QP_DIFF_WRAPAROUND_BIT_KHR = 0x00000200, + VK_VIDEO_ENCODE_H264_CAPABILITY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeH264CapabilityFlagBitsKHR; +typedef VkFlags VkVideoEncodeH264CapabilityFlagsKHR; + +typedef enum VkVideoEncodeH264StdFlagBitsKHR { + VK_VIDEO_ENCODE_H264_STD_SEPARATE_COLOR_PLANE_FLAG_SET_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_H264_STD_QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG_SET_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_H264_STD_SCALING_MATRIX_PRESENT_FLAG_SET_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_H264_STD_CHROMA_QP_INDEX_OFFSET_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_H264_STD_SECOND_CHROMA_QP_INDEX_OFFSET_BIT_KHR = 0x00000010, + VK_VIDEO_ENCODE_H264_STD_PIC_INIT_QP_MINUS26_BIT_KHR = 0x00000020, + VK_VIDEO_ENCODE_H264_STD_WEIGHTED_PRED_FLAG_SET_BIT_KHR = 0x00000040, + VK_VIDEO_ENCODE_H264_STD_WEIGHTED_BIPRED_IDC_EXPLICIT_BIT_KHR = 0x00000080, + VK_VIDEO_ENCODE_H264_STD_WEIGHTED_BIPRED_IDC_IMPLICIT_BIT_KHR = 0x00000100, + VK_VIDEO_ENCODE_H264_STD_TRANSFORM_8X8_MODE_FLAG_SET_BIT_KHR = 0x00000200, + VK_VIDEO_ENCODE_H264_STD_DIRECT_SPATIAL_MV_PRED_FLAG_UNSET_BIT_KHR = 0x00000400, + VK_VIDEO_ENCODE_H264_STD_ENTROPY_CODING_MODE_FLAG_UNSET_BIT_KHR = 0x00000800, + VK_VIDEO_ENCODE_H264_STD_ENTROPY_CODING_MODE_FLAG_SET_BIT_KHR = 0x00001000, + VK_VIDEO_ENCODE_H264_STD_DIRECT_8X8_INFERENCE_FLAG_UNSET_BIT_KHR = 0x00002000, + VK_VIDEO_ENCODE_H264_STD_CONSTRAINED_INTRA_PRED_FLAG_SET_BIT_KHR = 0x00004000, + VK_VIDEO_ENCODE_H264_STD_DEBLOCKING_FILTER_DISABLED_BIT_KHR = 0x00008000, + VK_VIDEO_ENCODE_H264_STD_DEBLOCKING_FILTER_ENABLED_BIT_KHR = 0x00010000, + VK_VIDEO_ENCODE_H264_STD_DEBLOCKING_FILTER_PARTIAL_BIT_KHR = 0x00020000, + VK_VIDEO_ENCODE_H264_STD_SLICE_QP_DELTA_BIT_KHR = 0x00080000, + VK_VIDEO_ENCODE_H264_STD_DIFFERENT_SLICE_QP_DELTA_BIT_KHR = 0x00100000, + VK_VIDEO_ENCODE_H264_STD_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeH264StdFlagBitsKHR; +typedef VkFlags VkVideoEncodeH264StdFlagsKHR; + +typedef enum VkVideoEncodeH264RateControlFlagBitsKHR { + VK_VIDEO_ENCODE_H264_RATE_CONTROL_ATTEMPT_HRD_COMPLIANCE_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_H264_RATE_CONTROL_REGULAR_GOP_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_H264_RATE_CONTROL_REFERENCE_PATTERN_FLAT_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_H264_RATE_CONTROL_REFERENCE_PATTERN_DYADIC_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_H264_RATE_CONTROL_TEMPORAL_LAYER_PATTERN_DYADIC_BIT_KHR = 0x00000010, + VK_VIDEO_ENCODE_H264_RATE_CONTROL_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeH264RateControlFlagBitsKHR; +typedef VkFlags VkVideoEncodeH264RateControlFlagsKHR; +typedef struct VkVideoEncodeH264CapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeH264CapabilityFlagsKHR flags; + StdVideoH264LevelIdc maxLevelIdc; + uint32_t maxSliceCount; + uint32_t maxPPictureL0ReferenceCount; + uint32_t maxBPictureL0ReferenceCount; + uint32_t maxL1ReferenceCount; + uint32_t maxTemporalLayerCount; + VkBool32 expectDyadicTemporalLayerPattern; + int32_t minQp; + int32_t maxQp; + VkBool32 prefersGopRemainingFrames; + VkBool32 requiresGopRemainingFrames; + VkVideoEncodeH264StdFlagsKHR stdSyntaxFlags; +} VkVideoEncodeH264CapabilitiesKHR; + +typedef struct VkVideoEncodeH264QpKHR { + int32_t qpI; + int32_t qpP; + int32_t qpB; +} VkVideoEncodeH264QpKHR; + +typedef struct VkVideoEncodeH264QualityLevelPropertiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeH264RateControlFlagsKHR preferredRateControlFlags; + uint32_t preferredGopFrameCount; + uint32_t preferredIdrPeriod; + uint32_t preferredConsecutiveBFrameCount; + uint32_t preferredTemporalLayerCount; + VkVideoEncodeH264QpKHR preferredConstantQp; + uint32_t preferredMaxL0ReferenceCount; + uint32_t preferredMaxL1ReferenceCount; + VkBool32 preferredStdEntropyCodingModeFlag; +} VkVideoEncodeH264QualityLevelPropertiesKHR; + +typedef struct VkVideoEncodeH264SessionCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useMaxLevelIdc; + StdVideoH264LevelIdc maxLevelIdc; +} VkVideoEncodeH264SessionCreateInfoKHR; + +typedef struct VkVideoEncodeH264SessionParametersAddInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t stdSPSCount; + const StdVideoH264SequenceParameterSet* pStdSPSs; + uint32_t stdPPSCount; + const StdVideoH264PictureParameterSet* pStdPPSs; +} VkVideoEncodeH264SessionParametersAddInfoKHR; + +typedef struct VkVideoEncodeH264SessionParametersCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxStdSPSCount; + uint32_t maxStdPPSCount; + const VkVideoEncodeH264SessionParametersAddInfoKHR* pParametersAddInfo; +} VkVideoEncodeH264SessionParametersCreateInfoKHR; + +typedef struct VkVideoEncodeH264SessionParametersGetInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 writeStdSPS; + VkBool32 writeStdPPS; + uint32_t stdSPSId; + uint32_t stdPPSId; +} VkVideoEncodeH264SessionParametersGetInfoKHR; + +typedef struct VkVideoEncodeH264SessionParametersFeedbackInfoKHR { + VkStructureType sType; + void* pNext; + VkBool32 hasStdSPSOverrides; + VkBool32 hasStdPPSOverrides; +} VkVideoEncodeH264SessionParametersFeedbackInfoKHR; + +typedef struct VkVideoEncodeH264NaluSliceInfoKHR { + VkStructureType sType; + const void* pNext; + int32_t constantQp; + const StdVideoEncodeH264SliceHeader* pStdSliceHeader; +} VkVideoEncodeH264NaluSliceInfoKHR; + +typedef struct VkVideoEncodeH264PictureInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t naluSliceEntryCount; + const VkVideoEncodeH264NaluSliceInfoKHR* pNaluSliceEntries; + const StdVideoEncodeH264PictureInfo* pStdPictureInfo; + VkBool32 generatePrefixNalu; +} VkVideoEncodeH264PictureInfoKHR; + +typedef struct VkVideoEncodeH264DpbSlotInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoEncodeH264ReferenceInfo* pStdReferenceInfo; +} VkVideoEncodeH264DpbSlotInfoKHR; + +typedef struct VkVideoEncodeH264ProfileInfoKHR { + VkStructureType sType; + const void* pNext; + StdVideoH264ProfileIdc stdProfileIdc; +} VkVideoEncodeH264ProfileInfoKHR; + +typedef struct VkVideoEncodeH264RateControlInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeH264RateControlFlagsKHR flags; + uint32_t gopFrameCount; + uint32_t idrPeriod; + uint32_t consecutiveBFrameCount; + uint32_t temporalLayerCount; +} VkVideoEncodeH264RateControlInfoKHR; + +typedef struct VkVideoEncodeH264FrameSizeKHR { + uint32_t frameISize; + uint32_t framePSize; + uint32_t frameBSize; +} VkVideoEncodeH264FrameSizeKHR; + +typedef struct VkVideoEncodeH264RateControlLayerInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useMinQp; + VkVideoEncodeH264QpKHR minQp; + VkBool32 useMaxQp; + VkVideoEncodeH264QpKHR maxQp; + VkBool32 useMaxFrameSize; + VkVideoEncodeH264FrameSizeKHR maxFrameSize; +} VkVideoEncodeH264RateControlLayerInfoKHR; + +typedef struct VkVideoEncodeH264GopRemainingFrameInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useGopRemainingFrames; + uint32_t gopRemainingI; + uint32_t gopRemainingP; + uint32_t gopRemainingB; +} VkVideoEncodeH264GopRemainingFrameInfoKHR; + + + +// VK_KHR_video_encode_h265 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_encode_h265 1 +#include "vk_video/vulkan_video_codec_h265std.h" +#include "vk_video/vulkan_video_codec_h265std_encode.h" +#define VK_KHR_VIDEO_ENCODE_H265_SPEC_VERSION 14 +#define VK_KHR_VIDEO_ENCODE_H265_EXTENSION_NAME "VK_KHR_video_encode_h265" + +typedef enum VkVideoEncodeH265CapabilityFlagBitsKHR { + VK_VIDEO_ENCODE_H265_CAPABILITY_HRD_COMPLIANCE_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_H265_CAPABILITY_PREDICTION_WEIGHT_TABLE_GENERATED_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_H265_CAPABILITY_ROW_UNALIGNED_SLICE_SEGMENT_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_H265_CAPABILITY_DIFFERENT_SLICE_SEGMENT_TYPE_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_H265_CAPABILITY_B_FRAME_IN_L0_LIST_BIT_KHR = 0x00000010, + VK_VIDEO_ENCODE_H265_CAPABILITY_B_FRAME_IN_L1_LIST_BIT_KHR = 0x00000020, + VK_VIDEO_ENCODE_H265_CAPABILITY_PER_PICTURE_TYPE_MIN_MAX_QP_BIT_KHR = 0x00000040, + VK_VIDEO_ENCODE_H265_CAPABILITY_PER_SLICE_SEGMENT_CONSTANT_QP_BIT_KHR = 0x00000080, + VK_VIDEO_ENCODE_H265_CAPABILITY_MULTIPLE_TILES_PER_SLICE_SEGMENT_BIT_KHR = 0x00000100, + VK_VIDEO_ENCODE_H265_CAPABILITY_MULTIPLE_SLICE_SEGMENTS_PER_TILE_BIT_KHR = 0x00000200, + VK_VIDEO_ENCODE_H265_CAPABILITY_B_PICTURE_INTRA_REFRESH_BIT_KHR = 0x00000800, + VK_VIDEO_ENCODE_H265_CAPABILITY_CU_QP_DIFF_WRAPAROUND_BIT_KHR = 0x00000400, + VK_VIDEO_ENCODE_H265_CAPABILITY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeH265CapabilityFlagBitsKHR; +typedef VkFlags VkVideoEncodeH265CapabilityFlagsKHR; + +typedef enum VkVideoEncodeH265StdFlagBitsKHR { + VK_VIDEO_ENCODE_H265_STD_SEPARATE_COLOR_PLANE_FLAG_SET_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_H265_STD_SAMPLE_ADAPTIVE_OFFSET_ENABLED_FLAG_SET_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_H265_STD_SCALING_LIST_DATA_PRESENT_FLAG_SET_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_H265_STD_PCM_ENABLED_FLAG_SET_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_H265_STD_SPS_TEMPORAL_MVP_ENABLED_FLAG_SET_BIT_KHR = 0x00000010, + VK_VIDEO_ENCODE_H265_STD_INIT_QP_MINUS26_BIT_KHR = 0x00000020, + VK_VIDEO_ENCODE_H265_STD_WEIGHTED_PRED_FLAG_SET_BIT_KHR = 0x00000040, + VK_VIDEO_ENCODE_H265_STD_WEIGHTED_BIPRED_FLAG_SET_BIT_KHR = 0x00000080, + VK_VIDEO_ENCODE_H265_STD_LOG2_PARALLEL_MERGE_LEVEL_MINUS2_BIT_KHR = 0x00000100, + VK_VIDEO_ENCODE_H265_STD_SIGN_DATA_HIDING_ENABLED_FLAG_SET_BIT_KHR = 0x00000200, + VK_VIDEO_ENCODE_H265_STD_TRANSFORM_SKIP_ENABLED_FLAG_SET_BIT_KHR = 0x00000400, + VK_VIDEO_ENCODE_H265_STD_TRANSFORM_SKIP_ENABLED_FLAG_UNSET_BIT_KHR = 0x00000800, + VK_VIDEO_ENCODE_H265_STD_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT_FLAG_SET_BIT_KHR = 0x00001000, + VK_VIDEO_ENCODE_H265_STD_TRANSQUANT_BYPASS_ENABLED_FLAG_SET_BIT_KHR = 0x00002000, + VK_VIDEO_ENCODE_H265_STD_CONSTRAINED_INTRA_PRED_FLAG_SET_BIT_KHR = 0x00004000, + VK_VIDEO_ENCODE_H265_STD_ENTROPY_CODING_SYNC_ENABLED_FLAG_SET_BIT_KHR = 0x00008000, + VK_VIDEO_ENCODE_H265_STD_DEBLOCKING_FILTER_OVERRIDE_ENABLED_FLAG_SET_BIT_KHR = 0x00010000, + VK_VIDEO_ENCODE_H265_STD_DEPENDENT_SLICE_SEGMENTS_ENABLED_FLAG_SET_BIT_KHR = 0x00020000, + VK_VIDEO_ENCODE_H265_STD_DEPENDENT_SLICE_SEGMENT_FLAG_SET_BIT_KHR = 0x00040000, + VK_VIDEO_ENCODE_H265_STD_SLICE_QP_DELTA_BIT_KHR = 0x00080000, + VK_VIDEO_ENCODE_H265_STD_DIFFERENT_SLICE_QP_DELTA_BIT_KHR = 0x00100000, + VK_VIDEO_ENCODE_H265_STD_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeH265StdFlagBitsKHR; +typedef VkFlags VkVideoEncodeH265StdFlagsKHR; + +typedef enum VkVideoEncodeH265CtbSizeFlagBitsKHR { + VK_VIDEO_ENCODE_H265_CTB_SIZE_16_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_H265_CTB_SIZE_32_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_H265_CTB_SIZE_64_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_H265_CTB_SIZE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeH265CtbSizeFlagBitsKHR; +typedef VkFlags VkVideoEncodeH265CtbSizeFlagsKHR; + +typedef enum VkVideoEncodeH265TransformBlockSizeFlagBitsKHR { + VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_4_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_8_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_16_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_32_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_H265_TRANSFORM_BLOCK_SIZE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeH265TransformBlockSizeFlagBitsKHR; +typedef VkFlags VkVideoEncodeH265TransformBlockSizeFlagsKHR; + +typedef enum VkVideoEncodeH265RateControlFlagBitsKHR { + VK_VIDEO_ENCODE_H265_RATE_CONTROL_ATTEMPT_HRD_COMPLIANCE_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_H265_RATE_CONTROL_REGULAR_GOP_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_H265_RATE_CONTROL_REFERENCE_PATTERN_FLAT_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_H265_RATE_CONTROL_REFERENCE_PATTERN_DYADIC_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_H265_RATE_CONTROL_TEMPORAL_SUB_LAYER_PATTERN_DYADIC_BIT_KHR = 0x00000010, + VK_VIDEO_ENCODE_H265_RATE_CONTROL_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeH265RateControlFlagBitsKHR; +typedef VkFlags VkVideoEncodeH265RateControlFlagsKHR; +typedef struct VkVideoEncodeH265CapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeH265CapabilityFlagsKHR flags; + StdVideoH265LevelIdc maxLevelIdc; + uint32_t maxSliceSegmentCount; + VkExtent2D maxTiles; + VkVideoEncodeH265CtbSizeFlagsKHR ctbSizes; + VkVideoEncodeH265TransformBlockSizeFlagsKHR transformBlockSizes; + uint32_t maxPPictureL0ReferenceCount; + uint32_t maxBPictureL0ReferenceCount; + uint32_t maxL1ReferenceCount; + uint32_t maxSubLayerCount; + VkBool32 expectDyadicTemporalSubLayerPattern; + int32_t minQp; + int32_t maxQp; + VkBool32 prefersGopRemainingFrames; + VkBool32 requiresGopRemainingFrames; + VkVideoEncodeH265StdFlagsKHR stdSyntaxFlags; +} VkVideoEncodeH265CapabilitiesKHR; + +typedef struct VkVideoEncodeH265SessionCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useMaxLevelIdc; + StdVideoH265LevelIdc maxLevelIdc; +} VkVideoEncodeH265SessionCreateInfoKHR; + +typedef struct VkVideoEncodeH265QpKHR { + int32_t qpI; + int32_t qpP; + int32_t qpB; +} VkVideoEncodeH265QpKHR; + +typedef struct VkVideoEncodeH265QualityLevelPropertiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeH265RateControlFlagsKHR preferredRateControlFlags; + uint32_t preferredGopFrameCount; + uint32_t preferredIdrPeriod; + uint32_t preferredConsecutiveBFrameCount; + uint32_t preferredSubLayerCount; + VkVideoEncodeH265QpKHR preferredConstantQp; + uint32_t preferredMaxL0ReferenceCount; + uint32_t preferredMaxL1ReferenceCount; +} VkVideoEncodeH265QualityLevelPropertiesKHR; + +typedef struct VkVideoEncodeH265SessionParametersAddInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t stdVPSCount; + const StdVideoH265VideoParameterSet* pStdVPSs; + uint32_t stdSPSCount; + const StdVideoH265SequenceParameterSet* pStdSPSs; + uint32_t stdPPSCount; + const StdVideoH265PictureParameterSet* pStdPPSs; +} VkVideoEncodeH265SessionParametersAddInfoKHR; + +typedef struct VkVideoEncodeH265SessionParametersCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxStdVPSCount; + uint32_t maxStdSPSCount; + uint32_t maxStdPPSCount; + const VkVideoEncodeH265SessionParametersAddInfoKHR* pParametersAddInfo; +} VkVideoEncodeH265SessionParametersCreateInfoKHR; + +typedef struct VkVideoEncodeH265SessionParametersGetInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 writeStdVPS; + VkBool32 writeStdSPS; + VkBool32 writeStdPPS; + uint32_t stdVPSId; + uint32_t stdSPSId; + uint32_t stdPPSId; +} VkVideoEncodeH265SessionParametersGetInfoKHR; + +typedef struct VkVideoEncodeH265SessionParametersFeedbackInfoKHR { + VkStructureType sType; + void* pNext; + VkBool32 hasStdVPSOverrides; + VkBool32 hasStdSPSOverrides; + VkBool32 hasStdPPSOverrides; +} VkVideoEncodeH265SessionParametersFeedbackInfoKHR; + +typedef struct VkVideoEncodeH265NaluSliceSegmentInfoKHR { + VkStructureType sType; + const void* pNext; + int32_t constantQp; + const StdVideoEncodeH265SliceSegmentHeader* pStdSliceSegmentHeader; +} VkVideoEncodeH265NaluSliceSegmentInfoKHR; + +typedef struct VkVideoEncodeH265PictureInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t naluSliceSegmentEntryCount; + const VkVideoEncodeH265NaluSliceSegmentInfoKHR* pNaluSliceSegmentEntries; + const StdVideoEncodeH265PictureInfo* pStdPictureInfo; +} VkVideoEncodeH265PictureInfoKHR; + +typedef struct VkVideoEncodeH265DpbSlotInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoEncodeH265ReferenceInfo* pStdReferenceInfo; +} VkVideoEncodeH265DpbSlotInfoKHR; + +typedef struct VkVideoEncodeH265ProfileInfoKHR { + VkStructureType sType; + const void* pNext; + StdVideoH265ProfileIdc stdProfileIdc; +} VkVideoEncodeH265ProfileInfoKHR; + +typedef struct VkVideoEncodeH265RateControlInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeH265RateControlFlagsKHR flags; + uint32_t gopFrameCount; + uint32_t idrPeriod; + uint32_t consecutiveBFrameCount; + uint32_t subLayerCount; +} VkVideoEncodeH265RateControlInfoKHR; + +typedef struct VkVideoEncodeH265FrameSizeKHR { + uint32_t frameISize; + uint32_t framePSize; + uint32_t frameBSize; +} VkVideoEncodeH265FrameSizeKHR; + +typedef struct VkVideoEncodeH265RateControlLayerInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useMinQp; + VkVideoEncodeH265QpKHR minQp; + VkBool32 useMaxQp; + VkVideoEncodeH265QpKHR maxQp; + VkBool32 useMaxFrameSize; + VkVideoEncodeH265FrameSizeKHR maxFrameSize; +} VkVideoEncodeH265RateControlLayerInfoKHR; + +typedef struct VkVideoEncodeH265GopRemainingFrameInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useGopRemainingFrames; + uint32_t gopRemainingI; + uint32_t gopRemainingP; + uint32_t gopRemainingB; +} VkVideoEncodeH265GopRemainingFrameInfoKHR; + + + +// VK_KHR_video_decode_h264 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_decode_h264 1 +#include "vk_video/vulkan_video_codec_h264std_decode.h" +#define VK_KHR_VIDEO_DECODE_H264_SPEC_VERSION 9 +#define VK_KHR_VIDEO_DECODE_H264_EXTENSION_NAME "VK_KHR_video_decode_h264" + +typedef enum VkVideoDecodeH264PictureLayoutFlagBitsKHR { + VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_PROGRESSIVE_KHR = 0, + VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_INTERLACED_INTERLEAVED_LINES_BIT_KHR = 0x00000001, + VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_INTERLACED_SEPARATE_PLANES_BIT_KHR = 0x00000002, + VK_VIDEO_DECODE_H264_PICTURE_LAYOUT_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoDecodeH264PictureLayoutFlagBitsKHR; +typedef VkFlags VkVideoDecodeH264PictureLayoutFlagsKHR; +typedef struct VkVideoDecodeH264ProfileInfoKHR { + VkStructureType sType; + const void* pNext; + StdVideoH264ProfileIdc stdProfileIdc; + VkVideoDecodeH264PictureLayoutFlagBitsKHR pictureLayout; +} VkVideoDecodeH264ProfileInfoKHR; + +typedef struct VkVideoDecodeH264CapabilitiesKHR { + VkStructureType sType; + void* pNext; + StdVideoH264LevelIdc maxLevelIdc; + VkOffset2D fieldOffsetGranularity; +} VkVideoDecodeH264CapabilitiesKHR; + +typedef struct VkVideoDecodeH264SessionParametersAddInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t stdSPSCount; + const StdVideoH264SequenceParameterSet* pStdSPSs; + uint32_t stdPPSCount; + const StdVideoH264PictureParameterSet* pStdPPSs; +} VkVideoDecodeH264SessionParametersAddInfoKHR; + +typedef struct VkVideoDecodeH264SessionParametersCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxStdSPSCount; + uint32_t maxStdPPSCount; + const VkVideoDecodeH264SessionParametersAddInfoKHR* pParametersAddInfo; +} VkVideoDecodeH264SessionParametersCreateInfoKHR; + +typedef struct VkVideoDecodeH264PictureInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoDecodeH264PictureInfo* pStdPictureInfo; + uint32_t sliceCount; + const uint32_t* pSliceOffsets; +} VkVideoDecodeH264PictureInfoKHR; + +typedef struct VkVideoDecodeH264DpbSlotInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoDecodeH264ReferenceInfo* pStdReferenceInfo; +} VkVideoDecodeH264DpbSlotInfoKHR; + + + +// VK_KHR_dynamic_rendering is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_dynamic_rendering 1 +#define VK_KHR_DYNAMIC_RENDERING_SPEC_VERSION 1 +#define VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME "VK_KHR_dynamic_rendering" +typedef VkRenderingFlags VkRenderingFlagsKHR; + +typedef VkRenderingFlagBits VkRenderingFlagBitsKHR; + +typedef VkRenderingInfo VkRenderingInfoKHR; + +typedef VkRenderingAttachmentInfo VkRenderingAttachmentInfoKHR; + +typedef VkPipelineRenderingCreateInfo VkPipelineRenderingCreateInfoKHR; + +typedef VkPhysicalDeviceDynamicRenderingFeatures VkPhysicalDeviceDynamicRenderingFeaturesKHR; + +typedef VkCommandBufferInheritanceRenderingInfo VkCommandBufferInheritanceRenderingInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderingKHR)(VkCommandBuffer commandBuffer, const VkRenderingInfo* pRenderingInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderingKHR)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderingKHR( + VkCommandBuffer commandBuffer, + const VkRenderingInfo* pRenderingInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderingKHR( + VkCommandBuffer commandBuffer); +#endif +#endif + + +// VK_KHR_multiview is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_multiview 1 +#define VK_KHR_MULTIVIEW_SPEC_VERSION 1 +#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview" +typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR; + +typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR; + +typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR; + + + +// VK_KHR_get_physical_device_properties2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_get_physical_device_properties2 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 2 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" +typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR; + +typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR; + +typedef VkFormatProperties2 VkFormatProperties2KHR; + +typedef VkImageFormatProperties2 VkImageFormatProperties2KHR; + +typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR; + +typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR; + +typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR; + +typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR; + +typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); +#endif +#endif + + +// VK_KHR_device_group is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_device_group 1 +#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 4 +#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group" +typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR; + +typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR; + +typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR; + +typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR; + +typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR; + +typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR; + +typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR; + +typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR; + +typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR; + +typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR; + +typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif +#endif + + +// VK_KHR_shader_draw_parameters is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_draw_parameters 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" + + +// VK_KHR_maintenance1 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance1 1 +#define VK_KHR_MAINTENANCE_1_SPEC_VERSION 2 +#define VK_KHR_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_maintenance1" +// VK_KHR_MAINTENANCE1_SPEC_VERSION is a legacy alias +#define VK_KHR_MAINTENANCE1_SPEC_VERSION VK_KHR_MAINTENANCE_1_SPEC_VERSION +// VK_KHR_MAINTENANCE1_EXTENSION_NAME is a legacy alias +#define VK_KHR_MAINTENANCE1_EXTENSION_NAME VK_KHR_MAINTENANCE_1_EXTENSION_NAME +typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; + +typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); +#endif +#endif + + +// VK_KHR_device_group_creation is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_device_group_creation 1 +#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1 +#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation" +#define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE +typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR; + +typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +#endif +#endif + + +// VK_KHR_external_memory_capabilities is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_memory_capabilities 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" +#define VK_LUID_SIZE_KHR VK_LUID_SIZE +typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR; + +typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR; + +typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR; + +typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR; + +typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR; + +typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR; + +typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR; + +typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR; + +typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR; + +typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); +#endif +#endif + + +// VK_KHR_external_memory is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_memory 1 +#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" +#define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL +typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR; + +typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR; + +typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR; + + + +// VK_KHR_external_memory_fd is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_memory_fd 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" +typedef struct VkImportMemoryFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + int fd; +} VkImportMemoryFdInfoKHR; + +typedef struct VkMemoryFdPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryFdPropertiesKHR; + +typedef struct VkMemoryGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties); +#endif +#endif + + +// VK_KHR_external_semaphore_capabilities is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_semaphore_capabilities 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" +typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR; + +typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR; + +typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR; + +typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR; + +typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +#endif +#endif + + +// VK_KHR_external_semaphore is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_semaphore 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" +typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR; + +typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR; + +typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR; + + + +// VK_KHR_external_semaphore_fd is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_semaphore_fd 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" +typedef struct VkImportSemaphoreFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + int fd; +} VkImportSemaphoreFdInfoKHR; + +typedef struct VkSemaphoreGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif +#endif + + +// VK_KHR_push_descriptor is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_push_descriptor 1 +#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 +#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" +typedef VkPhysicalDevicePushDescriptorProperties VkPhysicalDevicePushDescriptorPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); +#endif +#endif + + +// VK_KHR_shader_float16_int8 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_float16_int8 1 +#define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1 +#define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME "VK_KHR_shader_float16_int8" +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceShaderFloat16Int8FeaturesKHR; + +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceFloat16Int8FeaturesKHR; + + + +// VK_KHR_16bit_storage is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_16bit_storage 1 +#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" +typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR; + + + +// VK_KHR_incremental_present is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_incremental_present 1 +#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 2 +#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" +typedef struct VkRectLayerKHR { + VkOffset2D offset; + VkExtent2D extent; + uint32_t layer; +} VkRectLayerKHR; + +typedef struct VkPresentRegionKHR { + uint32_t rectangleCount; + const VkRectLayerKHR* pRectangles; +} VkPresentRegionKHR; + +typedef struct VkPresentRegionsKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentRegionKHR* pRegions; +} VkPresentRegionsKHR; + + + +// VK_KHR_descriptor_update_template is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_descriptor_update_template 1 +typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR; + +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" +typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR; + +typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR; + +typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR; + +typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); +#endif +#endif + + +// VK_KHR_imageless_framebuffer is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_imageless_framebuffer 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME "VK_KHR_imageless_framebuffer" +typedef VkPhysicalDeviceImagelessFramebufferFeatures VkPhysicalDeviceImagelessFramebufferFeaturesKHR; + +typedef VkFramebufferAttachmentsCreateInfo VkFramebufferAttachmentsCreateInfoKHR; + +typedef VkFramebufferAttachmentImageInfo VkFramebufferAttachmentImageInfoKHR; + +typedef VkRenderPassAttachmentBeginInfo VkRenderPassAttachmentBeginInfoKHR; + + + +// VK_KHR_create_renderpass2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_create_renderpass2 1 +#define VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION 1 +#define VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME "VK_KHR_create_renderpass2" +typedef VkRenderPassCreateInfo2 VkRenderPassCreateInfo2KHR; + +typedef VkAttachmentDescription2 VkAttachmentDescription2KHR; + +typedef VkAttachmentReference2 VkAttachmentReference2KHR; + +typedef VkSubpassDescription2 VkSubpassDescription2KHR; + +typedef VkSubpassDependency2 VkSubpassDependency2KHR; + +typedef VkSubpassBeginInfo VkSubpassBeginInfoKHR; + +typedef VkSubpassEndInfo VkSubpassEndInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2KHR( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); +#endif +#endif + + +// VK_KHR_shared_presentable_image is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shared_presentable_image 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" +typedef struct VkSharedPresentSurfaceCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkImageUsageFlags sharedPresentSupportedUsageFlags; +} VkSharedPresentSurfaceCapabilitiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain); +#endif +#endif + + +// VK_KHR_external_fence_capabilities is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_fence_capabilities 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" +typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR; + +typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR; + +typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR; + +typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR; + +typedef VkExternalFenceProperties VkExternalFencePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); +#endif +#endif + + +// VK_KHR_external_fence is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_fence 1 +#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" +typedef VkFenceImportFlags VkFenceImportFlagsKHR; + +typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR; + +typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR; + + + +// VK_KHR_external_fence_fd is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_external_fence_fd 1 +#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" +typedef struct VkImportFenceFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + int fd; +} VkImportFenceFdInfoKHR; + +typedef struct VkFenceGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif +#endif + + +// VK_KHR_performance_query is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_performance_query 1 +#define VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION 1 +#define VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME "VK_KHR_performance_query" + +typedef enum VkPerformanceCounterUnitKHR { + VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR = 0, + VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR = 1, + VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR = 2, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR = 3, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR = 4, + VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR = 5, + VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR = 6, + VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR = 7, + VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR = 8, + VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR = 9, + VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR = 10, + VK_PERFORMANCE_COUNTER_UNIT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterUnitKHR; + +typedef enum VkPerformanceCounterScopeKHR { + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR = 0, + VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR = 1, + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR = 2, + // VK_QUERY_SCOPE_COMMAND_BUFFER_KHR is a legacy alias + VK_QUERY_SCOPE_COMMAND_BUFFER_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, + // VK_QUERY_SCOPE_RENDER_PASS_KHR is a legacy alias + VK_QUERY_SCOPE_RENDER_PASS_KHR = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR, + // VK_QUERY_SCOPE_COMMAND_KHR is a legacy alias + VK_QUERY_SCOPE_COMMAND_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, + VK_PERFORMANCE_COUNTER_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterScopeKHR; + +typedef enum VkPerformanceCounterStorageKHR { + VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR = 0, + VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR = 1, + VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR = 2, + VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR = 3, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR = 4, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR = 5, + VK_PERFORMANCE_COUNTER_STORAGE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterStorageKHR; + +typedef enum VkPerformanceCounterDescriptionFlagBitsKHR { + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR = 0x00000001, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR = 0x00000002, + // VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR is a legacy alias + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR, + // VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR is a legacy alias + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR, + VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterDescriptionFlagBitsKHR; +typedef VkFlags VkPerformanceCounterDescriptionFlagsKHR; + +typedef enum VkAcquireProfilingLockFlagBitsKHR { + VK_ACQUIRE_PROFILING_LOCK_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAcquireProfilingLockFlagBitsKHR; +typedef VkFlags VkAcquireProfilingLockFlagsKHR; +typedef struct VkPhysicalDevicePerformanceQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 performanceCounterQueryPools; + VkBool32 performanceCounterMultipleQueryPools; +} VkPhysicalDevicePerformanceQueryFeaturesKHR; + +typedef struct VkPhysicalDevicePerformanceQueryPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 allowCommandBufferQueryCopies; +} VkPhysicalDevicePerformanceQueryPropertiesKHR; + +typedef struct VkPerformanceCounterKHR { + VkStructureType sType; + void* pNext; + VkPerformanceCounterUnitKHR unit; + VkPerformanceCounterScopeKHR scope; + VkPerformanceCounterStorageKHR storage; + uint8_t uuid[VK_UUID_SIZE]; +} VkPerformanceCounterKHR; + +typedef struct VkPerformanceCounterDescriptionKHR { + VkStructureType sType; + void* pNext; + VkPerformanceCounterDescriptionFlagsKHR flags; + char name[VK_MAX_DESCRIPTION_SIZE]; + char category[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkPerformanceCounterDescriptionKHR; + +typedef struct VkQueryPoolPerformanceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t queueFamilyIndex; + uint32_t counterIndexCount; + const uint32_t* pCounterIndices; +} VkQueryPoolPerformanceCreateInfoKHR; + +typedef union VkPerformanceCounterResultKHR { + int32_t int32; + int64_t int64; + uint32_t uint32; + uint64_t uint64; + float float32; + double float64; +} VkPerformanceCounterResultKHR; + +typedef struct VkAcquireProfilingLockInfoKHR { + VkStructureType sType; + const void* pNext; + VkAcquireProfilingLockFlagsKHR flags; + uint64_t timeout; +} VkAcquireProfilingLockInfoKHR; + +typedef struct VkPerformanceQuerySubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t counterPassIndex; +} VkPerformanceQuerySubmitInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)(VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireProfilingLockKHR)(VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkReleaseProfilingLockKHR)(VkDevice device); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pCounterCount, + VkPerformanceCounterKHR* pCounters, + VkPerformanceCounterDescriptionKHR* pCounterDescriptions); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( + VkPhysicalDevice physicalDevice, + const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, + uint32_t* pNumPasses); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireProfilingLockKHR( + VkDevice device, + const VkAcquireProfilingLockInfoKHR* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR( + VkDevice device); +#endif +#endif + + +// VK_KHR_maintenance2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance2 1 +#define VK_KHR_MAINTENANCE_2_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_2_EXTENSION_NAME "VK_KHR_maintenance2" +// VK_KHR_MAINTENANCE2_SPEC_VERSION is a legacy alias +#define VK_KHR_MAINTENANCE2_SPEC_VERSION VK_KHR_MAINTENANCE_2_SPEC_VERSION +// VK_KHR_MAINTENANCE2_EXTENSION_NAME is a legacy alias +#define VK_KHR_MAINTENANCE2_EXTENSION_NAME VK_KHR_MAINTENANCE_2_EXTENSION_NAME +typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; + +typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR; + +typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR; + +typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR; + +typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR; + +typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR; + +typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR; + + + +// VK_KHR_get_surface_capabilities2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_get_surface_capabilities2 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" +typedef struct VkPhysicalDeviceSurfaceInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSurfaceKHR surface; +} VkPhysicalDeviceSurfaceInfo2KHR; + +typedef struct VkSurfaceCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceCapabilitiesKHR surfaceCapabilities; +} VkSurfaceCapabilities2KHR; + +typedef struct VkSurfaceFormat2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceFormatKHR surfaceFormat; +} VkSurfaceFormat2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormat2KHR* pSurfaceFormats); +#endif +#endif + + +// VK_KHR_variable_pointers is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_variable_pointers 1 +#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 +#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeaturesKHR; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointersFeaturesKHR; + + + +// VK_KHR_get_display_properties2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_get_display_properties2 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_display_properties2" +typedef struct VkDisplayProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPropertiesKHR displayProperties; +} VkDisplayProperties2KHR; + +typedef struct VkDisplayPlaneProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlanePropertiesKHR displayPlaneProperties; +} VkDisplayPlaneProperties2KHR; + +typedef struct VkDisplayModeProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayModePropertiesKHR displayModeProperties; +} VkDisplayModeProperties2KHR; + +typedef struct VkDisplayPlaneInfo2KHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeKHR mode; + uint32_t planeIndex; +} VkDisplayPlaneInfo2KHR; + +typedef struct VkDisplayPlaneCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlaneCapabilitiesKHR capabilities; +} VkDisplayPlaneCapabilities2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayProperties2KHR* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlaneProperties2KHR* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModeProperties2KHR* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR* pCapabilities); +#endif +#endif + + +// VK_KHR_dedicated_allocation is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_dedicated_allocation 1 +#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 +#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" +typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR; + +typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR; + + + +// VK_KHR_storage_buffer_storage_class is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_storage_buffer_storage_class 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" + + +// VK_KHR_shader_bfloat16 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_bfloat16 1 +#define VK_KHR_SHADER_BFLOAT16_SPEC_VERSION 1 +#define VK_KHR_SHADER_BFLOAT16_EXTENSION_NAME "VK_KHR_shader_bfloat16" +typedef struct VkPhysicalDeviceShaderBfloat16FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderBFloat16Type; + VkBool32 shaderBFloat16DotProduct; + VkBool32 shaderBFloat16CooperativeMatrix; +} VkPhysicalDeviceShaderBfloat16FeaturesKHR; + + + +// VK_KHR_relaxed_block_layout is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_relaxed_block_layout 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" + + +// VK_KHR_get_memory_requirements2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_get_memory_requirements2 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" +typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR; + +typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR; + +typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR; + +typedef VkMemoryRequirements2 VkMemoryRequirements2KHR; + +typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +#endif +#endif + + +// VK_KHR_image_format_list is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_image_format_list 1 +#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 +#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" +typedef VkImageFormatListCreateInfo VkImageFormatListCreateInfoKHR; + + + +// VK_KHR_sampler_ycbcr_conversion is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_sampler_ycbcr_conversion 1 +typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR; + +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 14 +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" +typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR; + +typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR; + +typedef VkChromaLocation VkChromaLocationKHR; + +typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR; + +typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR; + +typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR; + +typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR; + +typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; + +typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif +#endif + + +// VK_KHR_bind_memory2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_bind_memory2 1 +#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" +typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR; + +typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); +#endif +#endif + + +// VK_KHR_maintenance3 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance3 1 +#define VK_KHR_MAINTENANCE_3_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_3_EXTENSION_NAME "VK_KHR_maintenance3" +// VK_KHR_MAINTENANCE3_SPEC_VERSION is a legacy alias +#define VK_KHR_MAINTENANCE3_SPEC_VERSION VK_KHR_MAINTENANCE_3_SPEC_VERSION +// VK_KHR_MAINTENANCE3_EXTENSION_NAME is a legacy alias +#define VK_KHR_MAINTENANCE3_EXTENSION_NAME VK_KHR_MAINTENANCE_3_EXTENSION_NAME +typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; + +typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif +#endif + + +// VK_KHR_draw_indirect_count is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_draw_indirect_count 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_KHR_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif +#endif + + +// VK_KHR_shader_subgroup_extended_types is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_subgroup_extended_types 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME "VK_KHR_shader_subgroup_extended_types" +typedef VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR; + + + +// VK_KHR_8bit_storage is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_8bit_storage 1 +#define VK_KHR_8BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_8BIT_STORAGE_EXTENSION_NAME "VK_KHR_8bit_storage" +typedef VkPhysicalDevice8BitStorageFeatures VkPhysicalDevice8BitStorageFeaturesKHR; + + + +// VK_KHR_shader_atomic_int64 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_atomic_int64 1 +#define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME "VK_KHR_shader_atomic_int64" +typedef VkPhysicalDeviceShaderAtomicInt64Features VkPhysicalDeviceShaderAtomicInt64FeaturesKHR; + + + +// VK_KHR_shader_clock is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_clock 1 +#define VK_KHR_SHADER_CLOCK_SPEC_VERSION 1 +#define VK_KHR_SHADER_CLOCK_EXTENSION_NAME "VK_KHR_shader_clock" +typedef struct VkPhysicalDeviceShaderClockFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupClock; + VkBool32 shaderDeviceClock; +} VkPhysicalDeviceShaderClockFeaturesKHR; + + + +// VK_KHR_video_decode_h265 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_decode_h265 1 +#include "vk_video/vulkan_video_codec_h265std_decode.h" +#define VK_KHR_VIDEO_DECODE_H265_SPEC_VERSION 8 +#define VK_KHR_VIDEO_DECODE_H265_EXTENSION_NAME "VK_KHR_video_decode_h265" +typedef struct VkVideoDecodeH265ProfileInfoKHR { + VkStructureType sType; + const void* pNext; + StdVideoH265ProfileIdc stdProfileIdc; +} VkVideoDecodeH265ProfileInfoKHR; + +typedef struct VkVideoDecodeH265CapabilitiesKHR { + VkStructureType sType; + void* pNext; + StdVideoH265LevelIdc maxLevelIdc; +} VkVideoDecodeH265CapabilitiesKHR; + +typedef struct VkVideoDecodeH265SessionParametersAddInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t stdVPSCount; + const StdVideoH265VideoParameterSet* pStdVPSs; + uint32_t stdSPSCount; + const StdVideoH265SequenceParameterSet* pStdSPSs; + uint32_t stdPPSCount; + const StdVideoH265PictureParameterSet* pStdPPSs; +} VkVideoDecodeH265SessionParametersAddInfoKHR; + +typedef struct VkVideoDecodeH265SessionParametersCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxStdVPSCount; + uint32_t maxStdSPSCount; + uint32_t maxStdPPSCount; + const VkVideoDecodeH265SessionParametersAddInfoKHR* pParametersAddInfo; +} VkVideoDecodeH265SessionParametersCreateInfoKHR; + +typedef struct VkVideoDecodeH265PictureInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoDecodeH265PictureInfo* pStdPictureInfo; + uint32_t sliceSegmentCount; + const uint32_t* pSliceSegmentOffsets; +} VkVideoDecodeH265PictureInfoKHR; + +typedef struct VkVideoDecodeH265DpbSlotInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoDecodeH265ReferenceInfo* pStdReferenceInfo; +} VkVideoDecodeH265DpbSlotInfoKHR; + + + +// VK_KHR_global_priority is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_global_priority 1 +#define VK_KHR_GLOBAL_PRIORITY_SPEC_VERSION 1 +#define VK_KHR_GLOBAL_PRIORITY_EXTENSION_NAME "VK_KHR_global_priority" +#define VK_MAX_GLOBAL_PRIORITY_SIZE_KHR VK_MAX_GLOBAL_PRIORITY_SIZE +typedef VkQueueGlobalPriority VkQueueGlobalPriorityKHR; + +typedef VkDeviceQueueGlobalPriorityCreateInfo VkDeviceQueueGlobalPriorityCreateInfoKHR; + +typedef VkPhysicalDeviceGlobalPriorityQueryFeatures VkPhysicalDeviceGlobalPriorityQueryFeaturesKHR; + +typedef VkQueueFamilyGlobalPriorityProperties VkQueueFamilyGlobalPriorityPropertiesKHR; + + + +// VK_KHR_driver_properties is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_driver_properties 1 +#define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME "VK_KHR_driver_properties" +#define VK_MAX_DRIVER_NAME_SIZE_KHR VK_MAX_DRIVER_NAME_SIZE +#define VK_MAX_DRIVER_INFO_SIZE_KHR VK_MAX_DRIVER_INFO_SIZE +typedef VkDriverId VkDriverIdKHR; + +typedef VkConformanceVersion VkConformanceVersionKHR; + +typedef VkPhysicalDeviceDriverProperties VkPhysicalDeviceDriverPropertiesKHR; + + + +// VK_KHR_shader_float_controls is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_float_controls 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 4 +#define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME "VK_KHR_shader_float_controls" +typedef VkShaderFloatControlsIndependence VkShaderFloatControlsIndependenceKHR; + +typedef VkPhysicalDeviceFloatControlsProperties VkPhysicalDeviceFloatControlsPropertiesKHR; + + + +// VK_KHR_depth_stencil_resolve is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_depth_stencil_resolve 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME "VK_KHR_depth_stencil_resolve" +typedef VkResolveModeFlagBits VkResolveModeFlagBitsKHR; + +typedef VkResolveModeFlags VkResolveModeFlagsKHR; + +typedef VkSubpassDescriptionDepthStencilResolve VkSubpassDescriptionDepthStencilResolveKHR; + +typedef VkPhysicalDeviceDepthStencilResolveProperties VkPhysicalDeviceDepthStencilResolvePropertiesKHR; + + + +// VK_KHR_swapchain_mutable_format is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_swapchain_mutable_format 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME "VK_KHR_swapchain_mutable_format" + + +// VK_KHR_timeline_semaphore is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_timeline_semaphore 1 +#define VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION 2 +#define VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME "VK_KHR_timeline_semaphore" +typedef VkSemaphoreType VkSemaphoreTypeKHR; + +typedef VkSemaphoreWaitFlagBits VkSemaphoreWaitFlagBitsKHR; + +typedef VkSemaphoreWaitFlags VkSemaphoreWaitFlagsKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreFeatures VkPhysicalDeviceTimelineSemaphoreFeaturesKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreProperties VkPhysicalDeviceTimelineSemaphorePropertiesKHR; + +typedef VkSemaphoreTypeCreateInfo VkSemaphoreTypeCreateInfoKHR; + +typedef VkTimelineSemaphoreSubmitInfo VkTimelineSemaphoreSubmitInfoKHR; + +typedef VkSemaphoreWaitInfo VkSemaphoreWaitInfoKHR; + +typedef VkSemaphoreSignalInfo VkSemaphoreSignalInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValueKHR)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphoresKHR)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphoreKHR)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValueKHR( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphoresKHR( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphoreKHR( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); +#endif +#endif + + +// VK_KHR_vulkan_memory_model is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_vulkan_memory_model 1 +#define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 3 +#define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME "VK_KHR_vulkan_memory_model" +typedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryModelFeaturesKHR; + + + +// VK_KHR_shader_terminate_invocation is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_terminate_invocation 1 +#define VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION 1 +#define VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME "VK_KHR_shader_terminate_invocation" +typedef VkPhysicalDeviceShaderTerminateInvocationFeatures VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR; + + + +// VK_KHR_fragment_shading_rate is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_fragment_shading_rate 1 +#define VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION 2 +#define VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME "VK_KHR_fragment_shading_rate" + +typedef enum VkFragmentShadingRateCombinerOpKHR { + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR = 0, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR = 1, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR = 2, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR = 3, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR = 4, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_ENUM_KHR = 0x7FFFFFFF +} VkFragmentShadingRateCombinerOpKHR; +typedef struct VkFragmentShadingRateAttachmentInfoKHR { + VkStructureType sType; + const void* pNext; + const VkAttachmentReference2* pFragmentShadingRateAttachment; + VkExtent2D shadingRateAttachmentTexelSize; +} VkFragmentShadingRateAttachmentInfoKHR; + +typedef struct VkPipelineFragmentShadingRateStateCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExtent2D fragmentSize; + VkFragmentShadingRateCombinerOpKHR combinerOps[2]; +} VkPipelineFragmentShadingRateStateCreateInfoKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRateFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineFragmentShadingRate; + VkBool32 primitiveFragmentShadingRate; + VkBool32 attachmentFragmentShadingRate; +} VkPhysicalDeviceFragmentShadingRateFeaturesKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRatePropertiesKHR { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentShadingRateAttachmentTexelSize; + VkExtent2D maxFragmentShadingRateAttachmentTexelSize; + uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio; + VkBool32 primitiveFragmentShadingRateWithMultipleViewports; + VkBool32 layeredShadingRateAttachments; + VkBool32 fragmentShadingRateNonTrivialCombinerOps; + VkExtent2D maxFragmentSize; + uint32_t maxFragmentSizeAspectRatio; + uint32_t maxFragmentShadingRateCoverageSamples; + VkSampleCountFlagBits maxFragmentShadingRateRasterizationSamples; + VkBool32 fragmentShadingRateWithShaderDepthStencilWrites; + VkBool32 fragmentShadingRateWithSampleMask; + VkBool32 fragmentShadingRateWithShaderSampleMask; + VkBool32 fragmentShadingRateWithConservativeRasterization; + VkBool32 fragmentShadingRateWithFragmentShaderInterlock; + VkBool32 fragmentShadingRateWithCustomSampleLocations; + VkBool32 fragmentShadingRateStrictMultiplyCombiner; +} VkPhysicalDeviceFragmentShadingRatePropertiesKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRateKHR { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleCounts; + VkExtent2D fragmentSize; +} VkPhysicalDeviceFragmentShadingRateKHR; + +typedef struct VkRenderingFragmentShadingRateAttachmentInfoKHR { + VkStructureType sType; + const void* pNext; + VkImageView imageView; + VkImageLayout imageLayout; + VkExtent2D shadingRateAttachmentTexelSize; +} VkRenderingFragmentShadingRateAttachmentInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); +typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateKHR)(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceFragmentShadingRatesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pFragmentShadingRateCount, + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateKHR( + VkCommandBuffer commandBuffer, + const VkExtent2D* pFragmentSize, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); +#endif +#endif + + +// VK_KHR_dynamic_rendering_local_read is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_dynamic_rendering_local_read 1 +#define VK_KHR_DYNAMIC_RENDERING_LOCAL_READ_SPEC_VERSION 1 +#define VK_KHR_DYNAMIC_RENDERING_LOCAL_READ_EXTENSION_NAME "VK_KHR_dynamic_rendering_local_read" +typedef VkPhysicalDeviceDynamicRenderingLocalReadFeatures VkPhysicalDeviceDynamicRenderingLocalReadFeaturesKHR; + +typedef VkRenderingAttachmentLocationInfo VkRenderingAttachmentLocationInfoKHR; + +typedef VkRenderingInputAttachmentIndexInfo VkRenderingInputAttachmentIndexInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingAttachmentLocationsKHR)(VkCommandBuffer commandBuffer, const VkRenderingAttachmentLocationInfo* pLocationInfo); +typedef void (VKAPI_PTR *PFN_vkCmdSetRenderingInputAttachmentIndicesKHR)(VkCommandBuffer commandBuffer, const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingAttachmentLocationsKHR( + VkCommandBuffer commandBuffer, + const VkRenderingAttachmentLocationInfo* pLocationInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetRenderingInputAttachmentIndicesKHR( + VkCommandBuffer commandBuffer, + const VkRenderingInputAttachmentIndexInfo* pInputAttachmentIndexInfo); +#endif +#endif + + +// VK_KHR_shader_quad_control is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_quad_control 1 +#define VK_KHR_SHADER_QUAD_CONTROL_SPEC_VERSION 1 +#define VK_KHR_SHADER_QUAD_CONTROL_EXTENSION_NAME "VK_KHR_shader_quad_control" +typedef struct VkPhysicalDeviceShaderQuadControlFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderQuadControl; +} VkPhysicalDeviceShaderQuadControlFeaturesKHR; + + + +// VK_KHR_spirv_1_4 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_spirv_1_4 1 +#define VK_KHR_SPIRV_1_4_SPEC_VERSION 1 +#define VK_KHR_SPIRV_1_4_EXTENSION_NAME "VK_KHR_spirv_1_4" + + +// VK_KHR_surface_protected_capabilities is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_surface_protected_capabilities 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME "VK_KHR_surface_protected_capabilities" +typedef struct VkSurfaceProtectedCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 supportsProtected; +} VkSurfaceProtectedCapabilitiesKHR; + + + +// VK_KHR_separate_depth_stencil_layouts is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_separate_depth_stencil_layouts 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME "VK_KHR_separate_depth_stencil_layouts" +typedef VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR; + +typedef VkAttachmentReferenceStencilLayout VkAttachmentReferenceStencilLayoutKHR; + +typedef VkAttachmentDescriptionStencilLayout VkAttachmentDescriptionStencilLayoutKHR; + + + +// VK_KHR_present_wait is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_present_wait 1 +#define VK_KHR_PRESENT_WAIT_SPEC_VERSION 1 +#define VK_KHR_PRESENT_WAIT_EXTENSION_NAME "VK_KHR_present_wait" +typedef struct VkPhysicalDevicePresentWaitFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 presentWait; +} VkPhysicalDevicePresentWaitFeaturesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkWaitForPresentKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t presentId, uint64_t timeout); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForPresentKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t presentId, + uint64_t timeout); +#endif +#endif + + +// VK_KHR_uniform_buffer_standard_layout is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_uniform_buffer_standard_layout 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME "VK_KHR_uniform_buffer_standard_layout" +typedef VkPhysicalDeviceUniformBufferStandardLayoutFeatures VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR; + + + +// VK_KHR_buffer_device_address is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_buffer_device_address 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_KHR_buffer_device_address" +typedef VkPhysicalDeviceBufferDeviceAddressFeatures VkPhysicalDeviceBufferDeviceAddressFeaturesKHR; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoKHR; + +typedef VkBufferOpaqueCaptureAddressCreateInfo VkBufferOpaqueCaptureAddressCreateInfoKHR; + +typedef VkMemoryOpaqueCaptureAddressAllocateInfo VkMemoryOpaqueCaptureAddressAllocateInfoKHR; + +typedef VkDeviceMemoryOpaqueCaptureAddressInfo VkDeviceMemoryOpaqueCaptureAddressInfoKHR; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddressKHR( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif +#endif + + +// VK_KHR_deferred_host_operations is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_deferred_host_operations 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) +#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 4 +#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations" +typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation); +typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator); +typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR( + VkDevice device, + const VkAllocationCallbacks* pAllocator, + VkDeferredOperationKHR* pDeferredOperation); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR( + VkDevice device, + VkDeferredOperationKHR operation, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR( + VkDevice device, + VkDeferredOperationKHR operation); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR( + VkDevice device, + VkDeferredOperationKHR operation); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR( + VkDevice device, + VkDeferredOperationKHR operation); +#endif +#endif + + +// VK_KHR_pipeline_executable_properties is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_pipeline_executable_properties 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME "VK_KHR_pipeline_executable_properties" + +typedef enum VkPipelineExecutableStatisticFormatKHR { + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR = 0, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR = 1, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR = 2, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR = 3, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPipelineExecutableStatisticFormatKHR; +typedef struct VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineExecutableInfo; +} VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + +typedef struct VkPipelineInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; +} VkPipelineInfoKHR; + +typedef struct VkPipelineExecutablePropertiesKHR { + VkStructureType sType; + void* pNext; + VkShaderStageFlags stages; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + uint32_t subgroupSize; +} VkPipelineExecutablePropertiesKHR; + +typedef struct VkPipelineExecutableInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; + uint32_t executableIndex; +} VkPipelineExecutableInfoKHR; + +typedef union VkPipelineExecutableStatisticValueKHR { + VkBool32 b32; + int64_t i64; + uint64_t u64; + double f64; +} VkPipelineExecutableStatisticValueKHR; + +typedef struct VkPipelineExecutableStatisticKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkPipelineExecutableStatisticFormatKHR format; + VkPipelineExecutableStatisticValueKHR value; +} VkPipelineExecutableStatisticKHR; + +typedef struct VkPipelineExecutableInternalRepresentationKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkBool32 isText; + size_t dataSize; + void* pData; +} VkPipelineExecutableInternalRepresentationKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutablePropertiesKHR)(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableStatisticsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableInternalRepresentationsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutablePropertiesKHR( + VkDevice device, + const VkPipelineInfoKHR* pPipelineInfo, + uint32_t* pExecutableCount, + VkPipelineExecutablePropertiesKHR* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableStatisticsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pStatisticCount, + VkPipelineExecutableStatisticKHR* pStatistics); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pInternalRepresentationCount, + VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); +#endif +#endif + + +// VK_KHR_map_memory2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_map_memory2 1 +#define VK_KHR_MAP_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_MAP_MEMORY_2_EXTENSION_NAME "VK_KHR_map_memory2" +typedef VkMemoryUnmapFlagBits VkMemoryUnmapFlagBitsKHR; + +typedef VkMemoryUnmapFlags VkMemoryUnmapFlagsKHR; + +typedef VkMemoryMapInfo VkMemoryMapInfoKHR; + +typedef VkMemoryUnmapInfo VkMemoryUnmapInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory2KHR)(VkDevice device, const VkMemoryMapInfo* pMemoryMapInfo, void** ppData); +typedef VkResult (VKAPI_PTR *PFN_vkUnmapMemory2KHR)(VkDevice device, const VkMemoryUnmapInfo* pMemoryUnmapInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory2KHR( + VkDevice device, + const VkMemoryMapInfo* pMemoryMapInfo, + void** ppData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkUnmapMemory2KHR( + VkDevice device, + const VkMemoryUnmapInfo* pMemoryUnmapInfo); +#endif +#endif + + +// VK_KHR_shader_integer_dot_product is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_integer_dot_product 1 +#define VK_KHR_SHADER_INTEGER_DOT_PRODUCT_SPEC_VERSION 1 +#define VK_KHR_SHADER_INTEGER_DOT_PRODUCT_EXTENSION_NAME "VK_KHR_shader_integer_dot_product" +typedef VkPhysicalDeviceShaderIntegerDotProductFeatures VkPhysicalDeviceShaderIntegerDotProductFeaturesKHR; + +typedef VkPhysicalDeviceShaderIntegerDotProductProperties VkPhysicalDeviceShaderIntegerDotProductPropertiesKHR; + + + +// VK_KHR_pipeline_library is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_pipeline_library 1 +#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library" +typedef struct VkPipelineLibraryCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t libraryCount; + const VkPipeline* pLibraries; +} VkPipelineLibraryCreateInfoKHR; + + + +// VK_KHR_shader_non_semantic_info is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_non_semantic_info 1 +#define VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION 1 +#define VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME "VK_KHR_shader_non_semantic_info" + + +// VK_KHR_present_id is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_present_id 1 +#define VK_KHR_PRESENT_ID_SPEC_VERSION 1 +#define VK_KHR_PRESENT_ID_EXTENSION_NAME "VK_KHR_present_id" +typedef struct VkPresentIdKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint64_t* pPresentIds; +} VkPresentIdKHR; + +typedef struct VkPhysicalDevicePresentIdFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 presentId; +} VkPhysicalDevicePresentIdFeaturesKHR; + + + +// VK_KHR_video_encode_queue is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_encode_queue 1 +#define VK_KHR_VIDEO_ENCODE_QUEUE_SPEC_VERSION 12 +#define VK_KHR_VIDEO_ENCODE_QUEUE_EXTENSION_NAME "VK_KHR_video_encode_queue" + +typedef enum VkVideoEncodeTuningModeKHR { + VK_VIDEO_ENCODE_TUNING_MODE_DEFAULT_KHR = 0, + VK_VIDEO_ENCODE_TUNING_MODE_HIGH_QUALITY_KHR = 1, + VK_VIDEO_ENCODE_TUNING_MODE_LOW_LATENCY_KHR = 2, + VK_VIDEO_ENCODE_TUNING_MODE_ULTRA_LOW_LATENCY_KHR = 3, + VK_VIDEO_ENCODE_TUNING_MODE_LOSSLESS_KHR = 4, + VK_VIDEO_ENCODE_TUNING_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeTuningModeKHR; + +typedef enum VkVideoEncodeFlagBitsKHR { + VK_VIDEO_ENCODE_INTRA_REFRESH_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_WITH_QUANTIZATION_DELTA_MAP_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_WITH_EMPHASIS_MAP_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeFlagBitsKHR; +typedef VkFlags VkVideoEncodeFlagsKHR; + +typedef enum VkVideoEncodeCapabilityFlagBitsKHR { + VK_VIDEO_ENCODE_CAPABILITY_PRECEDING_EXTERNALLY_ENCODED_BYTES_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_CAPABILITY_INSUFFICIENT_BITSTREAM_BUFFER_RANGE_DETECTION_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_CAPABILITY_QUANTIZATION_DELTA_MAP_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_CAPABILITY_EMPHASIS_MAP_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_CAPABILITY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeCapabilityFlagBitsKHR; +typedef VkFlags VkVideoEncodeCapabilityFlagsKHR; + +typedef enum VkVideoEncodeRateControlModeFlagBitsKHR { + VK_VIDEO_ENCODE_RATE_CONTROL_MODE_DEFAULT_KHR = 0, + VK_VIDEO_ENCODE_RATE_CONTROL_MODE_DISABLED_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_RATE_CONTROL_MODE_CBR_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_RATE_CONTROL_MODE_VBR_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_RATE_CONTROL_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeRateControlModeFlagBitsKHR; +typedef VkFlags VkVideoEncodeRateControlModeFlagsKHR; + +typedef enum VkVideoEncodeFeedbackFlagBitsKHR { + VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_BUFFER_OFFSET_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_BYTES_WRITTEN_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_FEEDBACK_BITSTREAM_HAS_OVERRIDES_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_FEEDBACK_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeFeedbackFlagBitsKHR; +typedef VkFlags VkVideoEncodeFeedbackFlagsKHR; + +typedef enum VkVideoEncodeUsageFlagBitsKHR { + VK_VIDEO_ENCODE_USAGE_DEFAULT_KHR = 0, + VK_VIDEO_ENCODE_USAGE_TRANSCODING_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_USAGE_STREAMING_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_USAGE_RECORDING_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_USAGE_CONFERENCING_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_USAGE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeUsageFlagBitsKHR; +typedef VkFlags VkVideoEncodeUsageFlagsKHR; + +typedef enum VkVideoEncodeContentFlagBitsKHR { + VK_VIDEO_ENCODE_CONTENT_DEFAULT_KHR = 0, + VK_VIDEO_ENCODE_CONTENT_CAMERA_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_CONTENT_DESKTOP_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_CONTENT_RENDERED_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_CONTENT_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeContentFlagBitsKHR; +typedef VkFlags VkVideoEncodeContentFlagsKHR; +typedef VkFlags VkVideoEncodeRateControlFlagsKHR; +typedef struct VkVideoEncodeInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeFlagsKHR flags; + VkBuffer dstBuffer; + VkDeviceSize dstBufferOffset; + VkDeviceSize dstBufferRange; + VkVideoPictureResourceInfoKHR srcPictureResource; + const VkVideoReferenceSlotInfoKHR* pSetupReferenceSlot; + uint32_t referenceSlotCount; + const VkVideoReferenceSlotInfoKHR* pReferenceSlots; + uint32_t precedingExternallyEncodedBytes; +} VkVideoEncodeInfoKHR; + +typedef struct VkVideoEncodeCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeCapabilityFlagsKHR flags; + VkVideoEncodeRateControlModeFlagsKHR rateControlModes; + uint32_t maxRateControlLayers; + uint64_t maxBitrate; + uint32_t maxQualityLevels; + VkExtent2D encodeInputPictureGranularity; + VkVideoEncodeFeedbackFlagsKHR supportedEncodeFeedbackFlags; +} VkVideoEncodeCapabilitiesKHR; + +typedef struct VkQueryPoolVideoEncodeFeedbackCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeFeedbackFlagsKHR encodeFeedbackFlags; +} VkQueryPoolVideoEncodeFeedbackCreateInfoKHR; + +typedef struct VkVideoEncodeUsageInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeUsageFlagsKHR videoUsageHints; + VkVideoEncodeContentFlagsKHR videoContentHints; + VkVideoEncodeTuningModeKHR tuningMode; +} VkVideoEncodeUsageInfoKHR; + +typedef struct VkVideoEncodeRateControlLayerInfoKHR { + VkStructureType sType; + const void* pNext; + uint64_t averageBitrate; + uint64_t maxBitrate; + uint32_t frameRateNumerator; + uint32_t frameRateDenominator; +} VkVideoEncodeRateControlLayerInfoKHR; + +typedef struct VkVideoEncodeRateControlInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeRateControlFlagsKHR flags; + VkVideoEncodeRateControlModeFlagBitsKHR rateControlMode; + uint32_t layerCount; + const VkVideoEncodeRateControlLayerInfoKHR* pLayers; + uint32_t virtualBufferSizeInMs; + uint32_t initialVirtualBufferSizeInMs; +} VkVideoEncodeRateControlInfoKHR; + +typedef struct VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR { + VkStructureType sType; + const void* pNext; + const VkVideoProfileInfoKHR* pVideoProfile; + uint32_t qualityLevel; +} VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR; + +typedef struct VkVideoEncodeQualityLevelPropertiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeRateControlModeFlagBitsKHR preferredRateControlMode; + uint32_t preferredRateControlLayerCount; +} VkVideoEncodeQualityLevelPropertiesKHR; + +typedef struct VkVideoEncodeQualityLevelInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t qualityLevel; +} VkVideoEncodeQualityLevelInfoKHR; + +typedef struct VkVideoEncodeSessionParametersGetInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoSessionParametersKHR videoSessionParameters; +} VkVideoEncodeSessionParametersGetInfoKHR; + +typedef struct VkVideoEncodeSessionParametersFeedbackInfoKHR { + VkStructureType sType; + void* pNext; + VkBool32 hasOverrides; +} VkVideoEncodeSessionParametersFeedbackInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR* pQualityLevelInfo, VkVideoEncodeQualityLevelPropertiesKHR* pQualityLevelProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetEncodedVideoSessionParametersKHR)(VkDevice device, const VkVideoEncodeSessionParametersGetInfoKHR* pVideoSessionParametersInfo, VkVideoEncodeSessionParametersFeedbackInfoKHR* pFeedbackInfo, size_t* pDataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdEncodeVideoKHR)(VkCommandBuffer commandBuffer, const VkVideoEncodeInfoKHR* pEncodeInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceVideoEncodeQualityLevelPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceVideoEncodeQualityLevelInfoKHR* pQualityLevelInfo, + VkVideoEncodeQualityLevelPropertiesKHR* pQualityLevelProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetEncodedVideoSessionParametersKHR( + VkDevice device, + const VkVideoEncodeSessionParametersGetInfoKHR* pVideoSessionParametersInfo, + VkVideoEncodeSessionParametersFeedbackInfoKHR* pFeedbackInfo, + size_t* pDataSize, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEncodeVideoKHR( + VkCommandBuffer commandBuffer, + const VkVideoEncodeInfoKHR* pEncodeInfo); +#endif +#endif + + +// VK_KHR_synchronization2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_synchronization2 1 +#define VK_KHR_SYNCHRONIZATION_2_SPEC_VERSION 1 +#define VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME "VK_KHR_synchronization2" +typedef VkPipelineStageFlags2 VkPipelineStageFlags2KHR; + +typedef VkPipelineStageFlagBits2 VkPipelineStageFlagBits2KHR; + +typedef VkAccessFlags2 VkAccessFlags2KHR; + +typedef VkAccessFlagBits2 VkAccessFlagBits2KHR; + +typedef VkSubmitFlagBits VkSubmitFlagBitsKHR; + +typedef VkSubmitFlags VkSubmitFlagsKHR; + +typedef VkMemoryBarrier2 VkMemoryBarrier2KHR; + +typedef VkBufferMemoryBarrier2 VkBufferMemoryBarrier2KHR; + +typedef VkImageMemoryBarrier2 VkImageMemoryBarrier2KHR; + +typedef VkDependencyInfo VkDependencyInfoKHR; + +typedef VkSubmitInfo2 VkSubmitInfo2KHR; + +typedef VkSemaphoreSubmitInfo VkSemaphoreSubmitInfoKHR; + +typedef VkCommandBufferSubmitInfo VkCommandBufferSubmitInfoKHR; + +typedef VkPhysicalDeviceSynchronization2Features VkPhysicalDeviceSynchronization2FeaturesKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfo* pDependencyInfo); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent2KHR)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2 stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents2KHR)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfo* pDependencyInfos); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier2KHR)(VkCommandBuffer commandBuffer, const VkDependencyInfo* pDependencyInfo); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp2KHR)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkQueryPool queryPool, uint32_t query); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit2KHR)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent2KHR( + VkCommandBuffer commandBuffer, + VkEvent event, + const VkDependencyInfo* pDependencyInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent2KHR( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags2 stageMask); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents2KHR( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + const VkDependencyInfo* pDependencyInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier2KHR( + VkCommandBuffer commandBuffer, + const VkDependencyInfo* pDependencyInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp2KHR( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags2 stage, + VkQueryPool queryPool, + uint32_t query); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit2KHR( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo2* pSubmits, + VkFence fence); +#endif +#endif + + +// VK_KHR_fragment_shader_barycentric is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_fragment_shader_barycentric 1 +#define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1 +#define VK_KHR_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_KHR_fragment_shader_barycentric" +typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderBarycentric; +} VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR; + +typedef struct VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 triStripVertexOrderIndependentOfProvokingVertex; +} VkPhysicalDeviceFragmentShaderBarycentricPropertiesKHR; + + + +// VK_KHR_shader_subgroup_uniform_control_flow is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_subgroup_uniform_control_flow 1 +#define VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_SPEC_VERSION 1 +#define VK_KHR_SHADER_SUBGROUP_UNIFORM_CONTROL_FLOW_EXTENSION_NAME "VK_KHR_shader_subgroup_uniform_control_flow" +typedef struct VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupUniformControlFlow; +} VkPhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKHR; + + + +// VK_KHR_zero_initialize_workgroup_memory is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_zero_initialize_workgroup_memory 1 +#define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_SPEC_VERSION 1 +#define VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_EXTENSION_NAME "VK_KHR_zero_initialize_workgroup_memory" +typedef VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeatures VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR; + + + +// VK_KHR_workgroup_memory_explicit_layout is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_workgroup_memory_explicit_layout 1 +#define VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME "VK_KHR_workgroup_memory_explicit_layout" +typedef struct VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 workgroupMemoryExplicitLayout; + VkBool32 workgroupMemoryExplicitLayoutScalarBlockLayout; + VkBool32 workgroupMemoryExplicitLayout8BitAccess; + VkBool32 workgroupMemoryExplicitLayout16BitAccess; +} VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR; + + + +// VK_KHR_copy_commands2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_copy_commands2 1 +#define VK_KHR_COPY_COMMANDS_2_SPEC_VERSION 1 +#define VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME "VK_KHR_copy_commands2" +typedef VkCopyBufferInfo2 VkCopyBufferInfo2KHR; + +typedef VkCopyImageInfo2 VkCopyImageInfo2KHR; + +typedef VkCopyBufferToImageInfo2 VkCopyBufferToImageInfo2KHR; + +typedef VkCopyImageToBufferInfo2 VkCopyImageToBufferInfo2KHR; + +typedef VkBlitImageInfo2 VkBlitImageInfo2KHR; + +typedef VkResolveImageInfo2 VkResolveImageInfo2KHR; + +typedef VkBufferCopy2 VkBufferCopy2KHR; + +typedef VkImageCopy2 VkImageCopy2KHR; + +typedef VkImageBlit2 VkImageBlit2KHR; + +typedef VkBufferImageCopy2 VkBufferImageCopy2KHR; + +typedef VkImageResolve2 VkImageResolve2KHR; + +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2* pCopyBufferInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2* pCopyImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2KHR)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2* pBlitImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2KHR)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2* pResolveImageInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2KHR( + VkCommandBuffer commandBuffer, + const VkCopyBufferInfo2* pCopyBufferInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2KHR( + VkCommandBuffer commandBuffer, + const VkCopyImageInfo2* pCopyImageInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2KHR( + VkCommandBuffer commandBuffer, + const VkCopyBufferToImageInfo2* pCopyBufferToImageInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2KHR( + VkCommandBuffer commandBuffer, + const VkCopyImageToBufferInfo2* pCopyImageToBufferInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2KHR( + VkCommandBuffer commandBuffer, + const VkBlitImageInfo2* pBlitImageInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2KHR( + VkCommandBuffer commandBuffer, + const VkResolveImageInfo2* pResolveImageInfo); +#endif +#endif + + +// VK_KHR_format_feature_flags2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_format_feature_flags2 1 +#define VK_KHR_FORMAT_FEATURE_FLAGS_2_SPEC_VERSION 2 +#define VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME "VK_KHR_format_feature_flags2" +typedef VkFormatFeatureFlags2 VkFormatFeatureFlags2KHR; + +typedef VkFormatFeatureFlagBits2 VkFormatFeatureFlagBits2KHR; + +typedef VkFormatProperties3 VkFormatProperties3KHR; + + + +// VK_KHR_ray_tracing_maintenance1 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_ray_tracing_maintenance1 1 +#define VK_KHR_RAY_TRACING_MAINTENANCE_1_SPEC_VERSION 1 +#define VK_KHR_RAY_TRACING_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_ray_tracing_maintenance1" +typedef struct VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingMaintenance1; + VkBool32 rayTracingPipelineTraceRaysIndirect2; +} VkPhysicalDeviceRayTracingMaintenance1FeaturesKHR; + +typedef struct VkTraceRaysIndirectCommand2KHR { + VkDeviceAddress raygenShaderRecordAddress; + VkDeviceSize raygenShaderRecordSize; + VkDeviceAddress missShaderBindingTableAddress; + VkDeviceSize missShaderBindingTableSize; + VkDeviceSize missShaderBindingTableStride; + VkDeviceAddress hitShaderBindingTableAddress; + VkDeviceSize hitShaderBindingTableSize; + VkDeviceSize hitShaderBindingTableStride; + VkDeviceAddress callableShaderBindingTableAddress; + VkDeviceSize callableShaderBindingTableSize; + VkDeviceSize callableShaderBindingTableStride; + uint32_t width; + uint32_t height; + uint32_t depth; +} VkTraceRaysIndirectCommand2KHR; + +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirect2KHR)(VkCommandBuffer commandBuffer, VkDeviceAddress indirectDeviceAddress); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirect2KHR( + VkCommandBuffer commandBuffer, + VkDeviceAddress indirectDeviceAddress); +#endif +#endif + + +// VK_KHR_shader_untyped_pointers is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_untyped_pointers 1 +#define VK_KHR_SHADER_UNTYPED_POINTERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_UNTYPED_POINTERS_EXTENSION_NAME "VK_KHR_shader_untyped_pointers" +typedef struct VkPhysicalDeviceShaderUntypedPointersFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderUntypedPointers; +} VkPhysicalDeviceShaderUntypedPointersFeaturesKHR; + + + +// VK_KHR_portability_enumeration is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_portability_enumeration 1 +#define VK_KHR_PORTABILITY_ENUMERATION_SPEC_VERSION 1 +#define VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME "VK_KHR_portability_enumeration" + + +// VK_KHR_maintenance4 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance4 1 +#define VK_KHR_MAINTENANCE_4_SPEC_VERSION 2 +#define VK_KHR_MAINTENANCE_4_EXTENSION_NAME "VK_KHR_maintenance4" +typedef VkPhysicalDeviceMaintenance4Features VkPhysicalDeviceMaintenance4FeaturesKHR; + +typedef VkPhysicalDeviceMaintenance4Properties VkPhysicalDeviceMaintenance4PropertiesKHR; + +typedef VkDeviceBufferMemoryRequirements VkDeviceBufferMemoryRequirementsKHR; + +typedef VkDeviceImageMemoryRequirements VkDeviceImageMemoryRequirementsKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDeviceBufferMemoryRequirementsKHR)(VkDevice device, const VkDeviceBufferMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetDeviceImageMemoryRequirementsKHR)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSparseMemoryRequirementsKHR)(VkDevice device, const VkDeviceImageMemoryRequirements* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceBufferMemoryRequirementsKHR( + VkDevice device, + const VkDeviceBufferMemoryRequirements* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageMemoryRequirementsKHR( + VkDevice device, + const VkDeviceImageMemoryRequirements* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSparseMemoryRequirementsKHR( + VkDevice device, + const VkDeviceImageMemoryRequirements* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +#endif +#endif + + +// VK_KHR_shader_subgroup_rotate is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_subgroup_rotate 1 +#define VK_KHR_SHADER_SUBGROUP_ROTATE_SPEC_VERSION 2 +#define VK_KHR_SHADER_SUBGROUP_ROTATE_EXTENSION_NAME "VK_KHR_shader_subgroup_rotate" +typedef VkPhysicalDeviceShaderSubgroupRotateFeatures VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR; + + + +// VK_KHR_shader_maximal_reconvergence is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_maximal_reconvergence 1 +#define VK_KHR_SHADER_MAXIMAL_RECONVERGENCE_SPEC_VERSION 1 +#define VK_KHR_SHADER_MAXIMAL_RECONVERGENCE_EXTENSION_NAME "VK_KHR_shader_maximal_reconvergence" +typedef struct VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderMaximalReconvergence; +} VkPhysicalDeviceShaderMaximalReconvergenceFeaturesKHR; + + + +// VK_KHR_maintenance5 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance5 1 +#define VK_KHR_MAINTENANCE_5_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_5_EXTENSION_NAME "VK_KHR_maintenance5" +typedef VkPipelineCreateFlags2 VkPipelineCreateFlags2KHR; + +typedef VkPipelineCreateFlagBits2 VkPipelineCreateFlagBits2KHR; + +typedef VkBufferUsageFlags2 VkBufferUsageFlags2KHR; + +typedef VkBufferUsageFlagBits2 VkBufferUsageFlagBits2KHR; + +typedef VkPhysicalDeviceMaintenance5Features VkPhysicalDeviceMaintenance5FeaturesKHR; + +typedef VkPhysicalDeviceMaintenance5Properties VkPhysicalDeviceMaintenance5PropertiesKHR; + +typedef VkRenderingAreaInfo VkRenderingAreaInfoKHR; + +typedef VkDeviceImageSubresourceInfo VkDeviceImageSubresourceInfoKHR; + +typedef VkImageSubresource2 VkImageSubresource2KHR; + +typedef VkSubresourceLayout2 VkSubresourceLayout2KHR; + +typedef VkPipelineCreateFlags2CreateInfo VkPipelineCreateFlags2CreateInfoKHR; + +typedef VkBufferUsageFlags2CreateInfo VkBufferUsageFlags2CreateInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer2KHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkGetRenderingAreaGranularityKHR)(VkDevice device, const VkRenderingAreaInfo* pRenderingAreaInfo, VkExtent2D* pGranularity); +typedef void (VKAPI_PTR *PFN_vkGetDeviceImageSubresourceLayoutKHR)(VkDevice device, const VkDeviceImageSubresourceInfo* pInfo, VkSubresourceLayout2* pLayout); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2KHR)(VkDevice device, VkImage image, const VkImageSubresource2* pSubresource, VkSubresourceLayout2* pLayout); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer2KHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkDeviceSize size, + VkIndexType indexType); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetRenderingAreaGranularityKHR( + VkDevice device, + const VkRenderingAreaInfo* pRenderingAreaInfo, + VkExtent2D* pGranularity); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceImageSubresourceLayoutKHR( + VkDevice device, + const VkDeviceImageSubresourceInfo* pInfo, + VkSubresourceLayout2* pLayout); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2KHR( + VkDevice device, + VkImage image, + const VkImageSubresource2* pSubresource, + VkSubresourceLayout2* pLayout); +#endif +#endif + + +// VK_KHR_present_id2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_present_id2 1 +#define VK_KHR_PRESENT_ID_2_SPEC_VERSION 1 +#define VK_KHR_PRESENT_ID_2_EXTENSION_NAME "VK_KHR_present_id2" +typedef struct VkSurfaceCapabilitiesPresentId2KHR { + VkStructureType sType; + void* pNext; + VkBool32 presentId2Supported; +} VkSurfaceCapabilitiesPresentId2KHR; + +typedef struct VkPresentId2KHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint64_t* pPresentIds; +} VkPresentId2KHR; + +typedef struct VkPhysicalDevicePresentId2FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 presentId2; +} VkPhysicalDevicePresentId2FeaturesKHR; + + + +// VK_KHR_present_wait2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_present_wait2 1 +#define VK_KHR_PRESENT_WAIT_2_SPEC_VERSION 1 +#define VK_KHR_PRESENT_WAIT_2_EXTENSION_NAME "VK_KHR_present_wait2" +typedef struct VkSurfaceCapabilitiesPresentWait2KHR { + VkStructureType sType; + void* pNext; + VkBool32 presentWait2Supported; +} VkSurfaceCapabilitiesPresentWait2KHR; + +typedef struct VkPhysicalDevicePresentWait2FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 presentWait2; +} VkPhysicalDevicePresentWait2FeaturesKHR; + +typedef struct VkPresentWait2InfoKHR { + VkStructureType sType; + const void* pNext; + uint64_t presentId; + uint64_t timeout; +} VkPresentWait2InfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkWaitForPresent2KHR)(VkDevice device, VkSwapchainKHR swapchain, const VkPresentWait2InfoKHR* pPresentWait2Info); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForPresent2KHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkPresentWait2InfoKHR* pPresentWait2Info); +#endif +#endif + + +// VK_KHR_ray_tracing_position_fetch is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_ray_tracing_position_fetch 1 +#define VK_KHR_RAY_TRACING_POSITION_FETCH_SPEC_VERSION 1 +#define VK_KHR_RAY_TRACING_POSITION_FETCH_EXTENSION_NAME "VK_KHR_ray_tracing_position_fetch" +typedef struct VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingPositionFetch; +} VkPhysicalDeviceRayTracingPositionFetchFeaturesKHR; + + + +// VK_KHR_pipeline_binary is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_pipeline_binary 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineBinaryKHR) +#define VK_MAX_PIPELINE_BINARY_KEY_SIZE_KHR 32U +#define VK_KHR_PIPELINE_BINARY_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_BINARY_EXTENSION_NAME "VK_KHR_pipeline_binary" +typedef struct VkPhysicalDevicePipelineBinaryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineBinaries; +} VkPhysicalDevicePipelineBinaryFeaturesKHR; + +typedef struct VkPhysicalDevicePipelineBinaryPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineBinaryInternalCache; + VkBool32 pipelineBinaryInternalCacheControl; + VkBool32 pipelineBinaryPrefersInternalCache; + VkBool32 pipelineBinaryPrecompiledInternalCache; + VkBool32 pipelineBinaryCompressedData; +} VkPhysicalDevicePipelineBinaryPropertiesKHR; + +typedef struct VkDevicePipelineBinaryInternalCacheControlKHR { + VkStructureType sType; + const void* pNext; + VkBool32 disableInternalCache; +} VkDevicePipelineBinaryInternalCacheControlKHR; + +typedef struct VkPipelineBinaryKeyKHR { + VkStructureType sType; + void* pNext; + uint32_t keySize; + uint8_t key[VK_MAX_PIPELINE_BINARY_KEY_SIZE_KHR]; +} VkPipelineBinaryKeyKHR; + +typedef struct VkPipelineBinaryDataKHR { + size_t dataSize; + void* pData; +} VkPipelineBinaryDataKHR; + +typedef struct VkPipelineBinaryKeysAndDataKHR { + uint32_t binaryCount; + const VkPipelineBinaryKeyKHR* pPipelineBinaryKeys; + const VkPipelineBinaryDataKHR* pPipelineBinaryData; +} VkPipelineBinaryKeysAndDataKHR; + +typedef struct VkPipelineCreateInfoKHR { + VkStructureType sType; + void* pNext; +} VkPipelineCreateInfoKHR; + +typedef struct VkPipelineBinaryCreateInfoKHR { + VkStructureType sType; + const void* pNext; + const VkPipelineBinaryKeysAndDataKHR* pKeysAndDataInfo; + VkPipeline pipeline; + const VkPipelineCreateInfoKHR* pPipelineCreateInfo; +} VkPipelineBinaryCreateInfoKHR; + +typedef struct VkPipelineBinaryInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t binaryCount; + const VkPipelineBinaryKHR* pPipelineBinaries; +} VkPipelineBinaryInfoKHR; + +typedef struct VkReleaseCapturedPipelineDataInfoKHR { + VkStructureType sType; + void* pNext; + VkPipeline pipeline; +} VkReleaseCapturedPipelineDataInfoKHR; + +typedef struct VkPipelineBinaryDataInfoKHR { + VkStructureType sType; + void* pNext; + VkPipelineBinaryKHR pipelineBinary; +} VkPipelineBinaryDataInfoKHR; + +typedef struct VkPipelineBinaryHandlesInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t pipelineBinaryCount; + VkPipelineBinaryKHR* pPipelineBinaries; +} VkPipelineBinaryHandlesInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineBinariesKHR)(VkDevice device, const VkPipelineBinaryCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineBinaryHandlesInfoKHR* pBinaries); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineBinaryKHR)(VkDevice device, VkPipelineBinaryKHR pipelineBinary, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineKeyKHR)(VkDevice device, const VkPipelineCreateInfoKHR* pPipelineCreateInfo, VkPipelineBinaryKeyKHR* pPipelineKey); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineBinaryDataKHR)(VkDevice device, const VkPipelineBinaryDataInfoKHR* pInfo, VkPipelineBinaryKeyKHR* pPipelineBinaryKey, size_t* pPipelineBinaryDataSize, void* pPipelineBinaryData); +typedef VkResult (VKAPI_PTR *PFN_vkReleaseCapturedPipelineDataKHR)(VkDevice device, const VkReleaseCapturedPipelineDataInfoKHR* pInfo, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineBinariesKHR( + VkDevice device, + const VkPipelineBinaryCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineBinaryHandlesInfoKHR* pBinaries); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineBinaryKHR( + VkDevice device, + VkPipelineBinaryKHR pipelineBinary, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineKeyKHR( + VkDevice device, + const VkPipelineCreateInfoKHR* pPipelineCreateInfo, + VkPipelineBinaryKeyKHR* pPipelineKey); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineBinaryDataKHR( + VkDevice device, + const VkPipelineBinaryDataInfoKHR* pInfo, + VkPipelineBinaryKeyKHR* pPipelineBinaryKey, + size_t* pPipelineBinaryDataSize, + void* pPipelineBinaryData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseCapturedPipelineDataKHR( + VkDevice device, + const VkReleaseCapturedPipelineDataInfoKHR* pInfo, + const VkAllocationCallbacks* pAllocator); +#endif +#endif + + +// VK_KHR_surface_maintenance1 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_surface_maintenance1 1 +#define VK_KHR_SURFACE_MAINTENANCE_1_SPEC_VERSION 1 +#define VK_KHR_SURFACE_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_surface_maintenance1" + +typedef enum VkPresentScalingFlagBitsKHR { + VK_PRESENT_SCALING_ONE_TO_ONE_BIT_KHR = 0x00000001, + VK_PRESENT_SCALING_ASPECT_RATIO_STRETCH_BIT_KHR = 0x00000002, + VK_PRESENT_SCALING_STRETCH_BIT_KHR = 0x00000004, + VK_PRESENT_SCALING_ONE_TO_ONE_BIT_EXT = VK_PRESENT_SCALING_ONE_TO_ONE_BIT_KHR, + VK_PRESENT_SCALING_ASPECT_RATIO_STRETCH_BIT_EXT = VK_PRESENT_SCALING_ASPECT_RATIO_STRETCH_BIT_KHR, + VK_PRESENT_SCALING_STRETCH_BIT_EXT = VK_PRESENT_SCALING_STRETCH_BIT_KHR, + VK_PRESENT_SCALING_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentScalingFlagBitsKHR; +typedef VkFlags VkPresentScalingFlagsKHR; + +typedef enum VkPresentGravityFlagBitsKHR { + VK_PRESENT_GRAVITY_MIN_BIT_KHR = 0x00000001, + VK_PRESENT_GRAVITY_MAX_BIT_KHR = 0x00000002, + VK_PRESENT_GRAVITY_CENTERED_BIT_KHR = 0x00000004, + VK_PRESENT_GRAVITY_MIN_BIT_EXT = VK_PRESENT_GRAVITY_MIN_BIT_KHR, + VK_PRESENT_GRAVITY_MAX_BIT_EXT = VK_PRESENT_GRAVITY_MAX_BIT_KHR, + VK_PRESENT_GRAVITY_CENTERED_BIT_EXT = VK_PRESENT_GRAVITY_CENTERED_BIT_KHR, + VK_PRESENT_GRAVITY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentGravityFlagBitsKHR; +typedef VkFlags VkPresentGravityFlagsKHR; +typedef struct VkSurfacePresentModeKHR { + VkStructureType sType; + void* pNext; + VkPresentModeKHR presentMode; +} VkSurfacePresentModeKHR; + +typedef struct VkSurfacePresentScalingCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkPresentScalingFlagsKHR supportedPresentScaling; + VkPresentGravityFlagsKHR supportedPresentGravityX; + VkPresentGravityFlagsKHR supportedPresentGravityY; + VkExtent2D minScaledImageExtent; + VkExtent2D maxScaledImageExtent; +} VkSurfacePresentScalingCapabilitiesKHR; + +typedef struct VkSurfacePresentModeCompatibilityKHR { + VkStructureType sType; + void* pNext; + uint32_t presentModeCount; + VkPresentModeKHR* pPresentModes; +} VkSurfacePresentModeCompatibilityKHR; + + + +// VK_KHR_swapchain_maintenance1 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_swapchain_maintenance1 1 +#define VK_KHR_SWAPCHAIN_MAINTENANCE_1_SPEC_VERSION 1 +#define VK_KHR_SWAPCHAIN_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_swapchain_maintenance1" +typedef struct VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 swapchainMaintenance1; +} VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR; + +typedef struct VkSwapchainPresentFenceInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkFence* pFences; +} VkSwapchainPresentFenceInfoKHR; + +typedef struct VkSwapchainPresentModesCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t presentModeCount; + const VkPresentModeKHR* pPresentModes; +} VkSwapchainPresentModesCreateInfoKHR; + +typedef struct VkSwapchainPresentModeInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentModeKHR* pPresentModes; +} VkSwapchainPresentModeInfoKHR; + +typedef struct VkSwapchainPresentScalingCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkPresentScalingFlagsKHR scalingBehavior; + VkPresentGravityFlagsKHR presentGravityX; + VkPresentGravityFlagsKHR presentGravityY; +} VkSwapchainPresentScalingCreateInfoKHR; + +typedef struct VkReleaseSwapchainImagesInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndexCount; + const uint32_t* pImageIndices; +} VkReleaseSwapchainImagesInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkReleaseSwapchainImagesKHR)(VkDevice device, const VkReleaseSwapchainImagesInfoKHR* pReleaseInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseSwapchainImagesKHR( + VkDevice device, + const VkReleaseSwapchainImagesInfoKHR* pReleaseInfo); +#endif +#endif + + +// VK_KHR_internally_synchronized_queues is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_internally_synchronized_queues 1 +#define VK_KHR_INTERNALLY_SYNCHRONIZED_QUEUES_SPEC_VERSION 1 +#define VK_KHR_INTERNALLY_SYNCHRONIZED_QUEUES_EXTENSION_NAME "VK_KHR_internally_synchronized_queues" +typedef struct VkPhysicalDeviceInternallySynchronizedQueuesFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 internallySynchronizedQueues; +} VkPhysicalDeviceInternallySynchronizedQueuesFeaturesKHR; + + + +// VK_KHR_cooperative_matrix is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_cooperative_matrix 1 +#define VK_KHR_COOPERATIVE_MATRIX_SPEC_VERSION 2 +#define VK_KHR_COOPERATIVE_MATRIX_EXTENSION_NAME "VK_KHR_cooperative_matrix" + +typedef enum VkComponentTypeKHR { + VK_COMPONENT_TYPE_FLOAT16_KHR = 0, + VK_COMPONENT_TYPE_FLOAT32_KHR = 1, + VK_COMPONENT_TYPE_FLOAT64_KHR = 2, + VK_COMPONENT_TYPE_SINT8_KHR = 3, + VK_COMPONENT_TYPE_SINT16_KHR = 4, + VK_COMPONENT_TYPE_SINT32_KHR = 5, + VK_COMPONENT_TYPE_SINT64_KHR = 6, + VK_COMPONENT_TYPE_UINT8_KHR = 7, + VK_COMPONENT_TYPE_UINT16_KHR = 8, + VK_COMPONENT_TYPE_UINT32_KHR = 9, + VK_COMPONENT_TYPE_UINT64_KHR = 10, + VK_COMPONENT_TYPE_BFLOAT16_KHR = 1000141000, + VK_COMPONENT_TYPE_SINT8_PACKED_NV = 1000491000, + VK_COMPONENT_TYPE_UINT8_PACKED_NV = 1000491001, + VK_COMPONENT_TYPE_FLOAT8_E4M3_EXT = 1000491002, + VK_COMPONENT_TYPE_FLOAT8_E5M2_EXT = 1000491003, + VK_COMPONENT_TYPE_FLOAT16_NV = VK_COMPONENT_TYPE_FLOAT16_KHR, + VK_COMPONENT_TYPE_FLOAT32_NV = VK_COMPONENT_TYPE_FLOAT32_KHR, + VK_COMPONENT_TYPE_FLOAT64_NV = VK_COMPONENT_TYPE_FLOAT64_KHR, + VK_COMPONENT_TYPE_SINT8_NV = VK_COMPONENT_TYPE_SINT8_KHR, + VK_COMPONENT_TYPE_SINT16_NV = VK_COMPONENT_TYPE_SINT16_KHR, + VK_COMPONENT_TYPE_SINT32_NV = VK_COMPONENT_TYPE_SINT32_KHR, + VK_COMPONENT_TYPE_SINT64_NV = VK_COMPONENT_TYPE_SINT64_KHR, + VK_COMPONENT_TYPE_UINT8_NV = VK_COMPONENT_TYPE_UINT8_KHR, + VK_COMPONENT_TYPE_UINT16_NV = VK_COMPONENT_TYPE_UINT16_KHR, + VK_COMPONENT_TYPE_UINT32_NV = VK_COMPONENT_TYPE_UINT32_KHR, + VK_COMPONENT_TYPE_UINT64_NV = VK_COMPONENT_TYPE_UINT64_KHR, + VK_COMPONENT_TYPE_FLOAT_E4M3_NV = VK_COMPONENT_TYPE_FLOAT8_E4M3_EXT, + VK_COMPONENT_TYPE_FLOAT_E5M2_NV = VK_COMPONENT_TYPE_FLOAT8_E5M2_EXT, + VK_COMPONENT_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkComponentTypeKHR; + +typedef enum VkScopeKHR { + VK_SCOPE_DEVICE_KHR = 1, + VK_SCOPE_WORKGROUP_KHR = 2, + VK_SCOPE_SUBGROUP_KHR = 3, + VK_SCOPE_QUEUE_FAMILY_KHR = 5, + VK_SCOPE_DEVICE_NV = VK_SCOPE_DEVICE_KHR, + VK_SCOPE_WORKGROUP_NV = VK_SCOPE_WORKGROUP_KHR, + VK_SCOPE_SUBGROUP_NV = VK_SCOPE_SUBGROUP_KHR, + VK_SCOPE_QUEUE_FAMILY_NV = VK_SCOPE_QUEUE_FAMILY_KHR, + VK_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkScopeKHR; +typedef struct VkCooperativeMatrixPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t MSize; + uint32_t NSize; + uint32_t KSize; + VkComponentTypeKHR AType; + VkComponentTypeKHR BType; + VkComponentTypeKHR CType; + VkComponentTypeKHR ResultType; + VkBool32 saturatingAccumulation; + VkScopeKHR scope; +} VkCooperativeMatrixPropertiesKHR; + +typedef struct VkPhysicalDeviceCooperativeMatrixFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 cooperativeMatrix; + VkBool32 cooperativeMatrixRobustBufferAccess; +} VkPhysicalDeviceCooperativeMatrixFeaturesKHR; + +typedef struct VkPhysicalDeviceCooperativeMatrixPropertiesKHR { + VkStructureType sType; + void* pNext; + VkShaderStageFlags cooperativeMatrixSupportedStages; +} VkPhysicalDeviceCooperativeMatrixPropertiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesKHR* pProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixPropertiesKHR* pProperties); +#endif +#endif + + +// VK_KHR_compute_shader_derivatives is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_compute_shader_derivatives 1 +#define VK_KHR_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 +#define VK_KHR_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_KHR_compute_shader_derivatives" +typedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 computeDerivativeGroupQuads; + VkBool32 computeDerivativeGroupLinear; +} VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR; + +typedef struct VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 meshAndTaskShaderDerivatives; +} VkPhysicalDeviceComputeShaderDerivativesPropertiesKHR; + + + +// VK_KHR_video_decode_av1 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_decode_av1 1 +#include "vk_video/vulkan_video_codec_av1std.h" +#include "vk_video/vulkan_video_codec_av1std_decode.h" +#define VK_MAX_VIDEO_AV1_REFERENCES_PER_FRAME_KHR 7U +#define VK_KHR_VIDEO_DECODE_AV1_SPEC_VERSION 1 +#define VK_KHR_VIDEO_DECODE_AV1_EXTENSION_NAME "VK_KHR_video_decode_av1" +typedef struct VkVideoDecodeAV1ProfileInfoKHR { + VkStructureType sType; + const void* pNext; + StdVideoAV1Profile stdProfile; + VkBool32 filmGrainSupport; +} VkVideoDecodeAV1ProfileInfoKHR; + +typedef struct VkVideoDecodeAV1CapabilitiesKHR { + VkStructureType sType; + void* pNext; + StdVideoAV1Level maxLevel; +} VkVideoDecodeAV1CapabilitiesKHR; + +typedef struct VkVideoDecodeAV1SessionParametersCreateInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoAV1SequenceHeader* pStdSequenceHeader; +} VkVideoDecodeAV1SessionParametersCreateInfoKHR; + +typedef struct VkVideoDecodeAV1PictureInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoDecodeAV1PictureInfo* pStdPictureInfo; + int32_t referenceNameSlotIndices[VK_MAX_VIDEO_AV1_REFERENCES_PER_FRAME_KHR]; + uint32_t frameHeaderOffset; + uint32_t tileCount; + const uint32_t* pTileOffsets; + const uint32_t* pTileSizes; +} VkVideoDecodeAV1PictureInfoKHR; + +typedef struct VkVideoDecodeAV1DpbSlotInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoDecodeAV1ReferenceInfo* pStdReferenceInfo; +} VkVideoDecodeAV1DpbSlotInfoKHR; + + + +// VK_KHR_video_encode_av1 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_encode_av1 1 +#include "vk_video/vulkan_video_codec_av1std_encode.h" +#define VK_KHR_VIDEO_ENCODE_AV1_SPEC_VERSION 1 +#define VK_KHR_VIDEO_ENCODE_AV1_EXTENSION_NAME "VK_KHR_video_encode_av1" + +typedef enum VkVideoEncodeAV1PredictionModeKHR { + VK_VIDEO_ENCODE_AV1_PREDICTION_MODE_INTRA_ONLY_KHR = 0, + VK_VIDEO_ENCODE_AV1_PREDICTION_MODE_SINGLE_REFERENCE_KHR = 1, + VK_VIDEO_ENCODE_AV1_PREDICTION_MODE_UNIDIRECTIONAL_COMPOUND_KHR = 2, + VK_VIDEO_ENCODE_AV1_PREDICTION_MODE_BIDIRECTIONAL_COMPOUND_KHR = 3, + VK_VIDEO_ENCODE_AV1_PREDICTION_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeAV1PredictionModeKHR; + +typedef enum VkVideoEncodeAV1RateControlGroupKHR { + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_GROUP_INTRA_KHR = 0, + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_GROUP_PREDICTIVE_KHR = 1, + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_GROUP_BIPREDICTIVE_KHR = 2, + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_GROUP_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeAV1RateControlGroupKHR; + +typedef enum VkVideoEncodeAV1CapabilityFlagBitsKHR { + VK_VIDEO_ENCODE_AV1_CAPABILITY_PER_RATE_CONTROL_GROUP_MIN_MAX_Q_INDEX_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_AV1_CAPABILITY_GENERATE_OBU_EXTENSION_HEADER_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_AV1_CAPABILITY_PRIMARY_REFERENCE_CDF_ONLY_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_AV1_CAPABILITY_FRAME_SIZE_OVERRIDE_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_AV1_CAPABILITY_MOTION_VECTOR_SCALING_BIT_KHR = 0x00000010, + VK_VIDEO_ENCODE_AV1_CAPABILITY_COMPOUND_PREDICTION_INTRA_REFRESH_BIT_KHR = 0x00000020, + VK_VIDEO_ENCODE_AV1_CAPABILITY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeAV1CapabilityFlagBitsKHR; +typedef VkFlags VkVideoEncodeAV1CapabilityFlagsKHR; + +typedef enum VkVideoEncodeAV1StdFlagBitsKHR { + VK_VIDEO_ENCODE_AV1_STD_UNIFORM_TILE_SPACING_FLAG_SET_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_AV1_STD_SKIP_MODE_PRESENT_UNSET_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_AV1_STD_PRIMARY_REF_FRAME_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_AV1_STD_DELTA_Q_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_AV1_STD_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeAV1StdFlagBitsKHR; +typedef VkFlags VkVideoEncodeAV1StdFlagsKHR; + +typedef enum VkVideoEncodeAV1SuperblockSizeFlagBitsKHR { + VK_VIDEO_ENCODE_AV1_SUPERBLOCK_SIZE_64_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_AV1_SUPERBLOCK_SIZE_128_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_AV1_SUPERBLOCK_SIZE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeAV1SuperblockSizeFlagBitsKHR; +typedef VkFlags VkVideoEncodeAV1SuperblockSizeFlagsKHR; + +typedef enum VkVideoEncodeAV1RateControlFlagBitsKHR { + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_REGULAR_GOP_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_TEMPORAL_LAYER_PATTERN_DYADIC_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_REFERENCE_PATTERN_FLAT_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_REFERENCE_PATTERN_DYADIC_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_AV1_RATE_CONTROL_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeAV1RateControlFlagBitsKHR; +typedef VkFlags VkVideoEncodeAV1RateControlFlagsKHR; +typedef struct VkPhysicalDeviceVideoEncodeAV1FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 videoEncodeAV1; +} VkPhysicalDeviceVideoEncodeAV1FeaturesKHR; + +typedef struct VkVideoEncodeAV1CapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeAV1CapabilityFlagsKHR flags; + StdVideoAV1Level maxLevel; + VkExtent2D codedPictureAlignment; + VkExtent2D maxTiles; + VkExtent2D minTileSize; + VkExtent2D maxTileSize; + VkVideoEncodeAV1SuperblockSizeFlagsKHR superblockSizes; + uint32_t maxSingleReferenceCount; + uint32_t singleReferenceNameMask; + uint32_t maxUnidirectionalCompoundReferenceCount; + uint32_t maxUnidirectionalCompoundGroup1ReferenceCount; + uint32_t unidirectionalCompoundReferenceNameMask; + uint32_t maxBidirectionalCompoundReferenceCount; + uint32_t maxBidirectionalCompoundGroup1ReferenceCount; + uint32_t maxBidirectionalCompoundGroup2ReferenceCount; + uint32_t bidirectionalCompoundReferenceNameMask; + uint32_t maxTemporalLayerCount; + uint32_t maxSpatialLayerCount; + uint32_t maxOperatingPoints; + uint32_t minQIndex; + uint32_t maxQIndex; + VkBool32 prefersGopRemainingFrames; + VkBool32 requiresGopRemainingFrames; + VkVideoEncodeAV1StdFlagsKHR stdSyntaxFlags; +} VkVideoEncodeAV1CapabilitiesKHR; + +typedef struct VkVideoEncodeAV1QIndexKHR { + uint32_t intraQIndex; + uint32_t predictiveQIndex; + uint32_t bipredictiveQIndex; +} VkVideoEncodeAV1QIndexKHR; + +typedef struct VkVideoEncodeAV1QualityLevelPropertiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeAV1RateControlFlagsKHR preferredRateControlFlags; + uint32_t preferredGopFrameCount; + uint32_t preferredKeyFramePeriod; + uint32_t preferredConsecutiveBipredictiveFrameCount; + uint32_t preferredTemporalLayerCount; + VkVideoEncodeAV1QIndexKHR preferredConstantQIndex; + uint32_t preferredMaxSingleReferenceCount; + uint32_t preferredSingleReferenceNameMask; + uint32_t preferredMaxUnidirectionalCompoundReferenceCount; + uint32_t preferredMaxUnidirectionalCompoundGroup1ReferenceCount; + uint32_t preferredUnidirectionalCompoundReferenceNameMask; + uint32_t preferredMaxBidirectionalCompoundReferenceCount; + uint32_t preferredMaxBidirectionalCompoundGroup1ReferenceCount; + uint32_t preferredMaxBidirectionalCompoundGroup2ReferenceCount; + uint32_t preferredBidirectionalCompoundReferenceNameMask; +} VkVideoEncodeAV1QualityLevelPropertiesKHR; + +typedef struct VkVideoEncodeAV1SessionCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useMaxLevel; + StdVideoAV1Level maxLevel; +} VkVideoEncodeAV1SessionCreateInfoKHR; + +typedef struct VkVideoEncodeAV1SessionParametersCreateInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoAV1SequenceHeader* pStdSequenceHeader; + const StdVideoEncodeAV1DecoderModelInfo* pStdDecoderModelInfo; + uint32_t stdOperatingPointCount; + const StdVideoEncodeAV1OperatingPointInfo* pStdOperatingPoints; +} VkVideoEncodeAV1SessionParametersCreateInfoKHR; + +typedef struct VkVideoEncodeAV1PictureInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeAV1PredictionModeKHR predictionMode; + VkVideoEncodeAV1RateControlGroupKHR rateControlGroup; + uint32_t constantQIndex; + const StdVideoEncodeAV1PictureInfo* pStdPictureInfo; + int32_t referenceNameSlotIndices[VK_MAX_VIDEO_AV1_REFERENCES_PER_FRAME_KHR]; + VkBool32 primaryReferenceCdfOnly; + VkBool32 generateObuExtensionHeader; +} VkVideoEncodeAV1PictureInfoKHR; + +typedef struct VkVideoEncodeAV1DpbSlotInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoEncodeAV1ReferenceInfo* pStdReferenceInfo; +} VkVideoEncodeAV1DpbSlotInfoKHR; + +typedef struct VkVideoEncodeAV1ProfileInfoKHR { + VkStructureType sType; + const void* pNext; + StdVideoAV1Profile stdProfile; +} VkVideoEncodeAV1ProfileInfoKHR; + +typedef struct VkVideoEncodeAV1FrameSizeKHR { + uint32_t intraFrameSize; + uint32_t predictiveFrameSize; + uint32_t bipredictiveFrameSize; +} VkVideoEncodeAV1FrameSizeKHR; + +typedef struct VkVideoEncodeAV1GopRemainingFrameInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useGopRemainingFrames; + uint32_t gopRemainingIntra; + uint32_t gopRemainingPredictive; + uint32_t gopRemainingBipredictive; +} VkVideoEncodeAV1GopRemainingFrameInfoKHR; + +typedef struct VkVideoEncodeAV1RateControlInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeAV1RateControlFlagsKHR flags; + uint32_t gopFrameCount; + uint32_t keyFramePeriod; + uint32_t consecutiveBipredictiveFrameCount; + uint32_t temporalLayerCount; +} VkVideoEncodeAV1RateControlInfoKHR; + +typedef struct VkVideoEncodeAV1RateControlLayerInfoKHR { + VkStructureType sType; + const void* pNext; + VkBool32 useMinQIndex; + VkVideoEncodeAV1QIndexKHR minQIndex; + VkBool32 useMaxQIndex; + VkVideoEncodeAV1QIndexKHR maxQIndex; + VkBool32 useMaxFrameSize; + VkVideoEncodeAV1FrameSizeKHR maxFrameSize; +} VkVideoEncodeAV1RateControlLayerInfoKHR; + + + +// VK_KHR_video_decode_vp9 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_decode_vp9 1 +#include "vk_video/vulkan_video_codec_vp9std.h" +#include "vk_video/vulkan_video_codec_vp9std_decode.h" +#define VK_MAX_VIDEO_VP9_REFERENCES_PER_FRAME_KHR 3U +#define VK_KHR_VIDEO_DECODE_VP9_SPEC_VERSION 1 +#define VK_KHR_VIDEO_DECODE_VP9_EXTENSION_NAME "VK_KHR_video_decode_vp9" +typedef struct VkPhysicalDeviceVideoDecodeVP9FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 videoDecodeVP9; +} VkPhysicalDeviceVideoDecodeVP9FeaturesKHR; + +typedef struct VkVideoDecodeVP9ProfileInfoKHR { + VkStructureType sType; + const void* pNext; + StdVideoVP9Profile stdProfile; +} VkVideoDecodeVP9ProfileInfoKHR; + +typedef struct VkVideoDecodeVP9CapabilitiesKHR { + VkStructureType sType; + void* pNext; + StdVideoVP9Level maxLevel; +} VkVideoDecodeVP9CapabilitiesKHR; + +typedef struct VkVideoDecodeVP9PictureInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoDecodeVP9PictureInfo* pStdPictureInfo; + int32_t referenceNameSlotIndices[VK_MAX_VIDEO_VP9_REFERENCES_PER_FRAME_KHR]; + uint32_t uncompressedHeaderOffset; + uint32_t compressedHeaderOffset; + uint32_t tilesOffset; +} VkVideoDecodeVP9PictureInfoKHR; + + + +// VK_KHR_video_maintenance1 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_maintenance1 1 +#define VK_KHR_VIDEO_MAINTENANCE_1_SPEC_VERSION 1 +#define VK_KHR_VIDEO_MAINTENANCE_1_EXTENSION_NAME "VK_KHR_video_maintenance1" +typedef struct VkPhysicalDeviceVideoMaintenance1FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 videoMaintenance1; +} VkPhysicalDeviceVideoMaintenance1FeaturesKHR; + +typedef struct VkVideoInlineQueryInfoKHR { + VkStructureType sType; + const void* pNext; + VkQueryPool queryPool; + uint32_t firstQuery; + uint32_t queryCount; +} VkVideoInlineQueryInfoKHR; + + + +// VK_KHR_vertex_attribute_divisor is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_vertex_attribute_divisor 1 +#define VK_KHR_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 1 +#define VK_KHR_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_KHR_vertex_attribute_divisor" +typedef VkPhysicalDeviceVertexAttributeDivisorProperties VkPhysicalDeviceVertexAttributeDivisorPropertiesKHR; + +typedef VkVertexInputBindingDivisorDescription VkVertexInputBindingDivisorDescriptionKHR; + +typedef VkPipelineVertexInputDivisorStateCreateInfo VkPipelineVertexInputDivisorStateCreateInfoKHR; + +typedef VkPhysicalDeviceVertexAttributeDivisorFeatures VkPhysicalDeviceVertexAttributeDivisorFeaturesKHR; + + + +// VK_KHR_load_store_op_none is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_load_store_op_none 1 +#define VK_KHR_LOAD_STORE_OP_NONE_SPEC_VERSION 1 +#define VK_KHR_LOAD_STORE_OP_NONE_EXTENSION_NAME "VK_KHR_load_store_op_none" + + +// VK_KHR_unified_image_layouts is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_unified_image_layouts 1 +#define VK_KHR_UNIFIED_IMAGE_LAYOUTS_SPEC_VERSION 1 +#define VK_KHR_UNIFIED_IMAGE_LAYOUTS_EXTENSION_NAME "VK_KHR_unified_image_layouts" +typedef struct VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 unifiedImageLayouts; + VkBool32 unifiedImageLayoutsVideo; +} VkPhysicalDeviceUnifiedImageLayoutsFeaturesKHR; + +typedef struct VkAttachmentFeedbackLoopInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 feedbackLoopEnable; +} VkAttachmentFeedbackLoopInfoEXT; + + + +// VK_KHR_shader_float_controls2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_float_controls2 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_2_SPEC_VERSION 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_2_EXTENSION_NAME "VK_KHR_shader_float_controls2" +typedef VkPhysicalDeviceShaderFloatControls2Features VkPhysicalDeviceShaderFloatControls2FeaturesKHR; + + + +// VK_KHR_index_type_uint8 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_index_type_uint8 1 +#define VK_KHR_INDEX_TYPE_UINT8_SPEC_VERSION 1 +#define VK_KHR_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_KHR_index_type_uint8" +typedef VkPhysicalDeviceIndexTypeUint8Features VkPhysicalDeviceIndexTypeUint8FeaturesKHR; + + + +// VK_KHR_line_rasterization is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_line_rasterization 1 +#define VK_KHR_LINE_RASTERIZATION_SPEC_VERSION 1 +#define VK_KHR_LINE_RASTERIZATION_EXTENSION_NAME "VK_KHR_line_rasterization" +typedef VkLineRasterizationMode VkLineRasterizationModeKHR; + +typedef VkPhysicalDeviceLineRasterizationFeatures VkPhysicalDeviceLineRasterizationFeaturesKHR; + +typedef VkPhysicalDeviceLineRasterizationProperties VkPhysicalDeviceLineRasterizationPropertiesKHR; + +typedef VkPipelineRasterizationLineStateCreateInfo VkPipelineRasterizationLineStateCreateInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleKHR)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleKHR( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern); +#endif +#endif + + +// VK_KHR_calibrated_timestamps is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_calibrated_timestamps 1 +#define VK_KHR_CALIBRATED_TIMESTAMPS_SPEC_VERSION 1 +#define VK_KHR_CALIBRATED_TIMESTAMPS_EXTENSION_NAME "VK_KHR_calibrated_timestamps" + +typedef enum VkTimeDomainKHR { + VK_TIME_DOMAIN_DEVICE_KHR = 0, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_KHR = 1, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_KHR = 2, + VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_KHR = 3, + VK_TIME_DOMAIN_PRESENT_STAGE_LOCAL_EXT = 1000208000, + VK_TIME_DOMAIN_SWAPCHAIN_LOCAL_EXT = 1000208001, + VK_TIME_DOMAIN_DEVICE_EXT = VK_TIME_DOMAIN_DEVICE_KHR, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT = VK_TIME_DOMAIN_CLOCK_MONOTONIC_KHR, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT = VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_KHR, + VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_KHR, + VK_TIME_DOMAIN_MAX_ENUM_KHR = 0x7FFFFFFF +} VkTimeDomainKHR; +typedef struct VkCalibratedTimestampInfoKHR { + VkStructureType sType; + const void* pNext; + VkTimeDomainKHR timeDomain; +} VkCalibratedTimestampInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsKHR)(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainKHR* pTimeDomains); +typedef VkResult (VKAPI_PTR *PFN_vkGetCalibratedTimestampsKHR)(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoKHR* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCalibrateableTimeDomainsKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pTimeDomainCount, + VkTimeDomainKHR* pTimeDomains); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsKHR( + VkDevice device, + uint32_t timestampCount, + const VkCalibratedTimestampInfoKHR* pTimestampInfos, + uint64_t* pTimestamps, + uint64_t* pMaxDeviation); +#endif +#endif + + +// VK_KHR_shader_expect_assume is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_expect_assume 1 +#define VK_KHR_SHADER_EXPECT_ASSUME_SPEC_VERSION 1 +#define VK_KHR_SHADER_EXPECT_ASSUME_EXTENSION_NAME "VK_KHR_shader_expect_assume" +typedef VkPhysicalDeviceShaderExpectAssumeFeatures VkPhysicalDeviceShaderExpectAssumeFeaturesKHR; + + + +// VK_KHR_maintenance6 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance6 1 +#define VK_KHR_MAINTENANCE_6_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_6_EXTENSION_NAME "VK_KHR_maintenance6" +typedef VkPhysicalDeviceMaintenance6Features VkPhysicalDeviceMaintenance6FeaturesKHR; + +typedef VkPhysicalDeviceMaintenance6Properties VkPhysicalDeviceMaintenance6PropertiesKHR; + +typedef VkBindMemoryStatus VkBindMemoryStatusKHR; + +typedef VkBindDescriptorSetsInfo VkBindDescriptorSetsInfoKHR; + +typedef VkPushConstantsInfo VkPushConstantsInfoKHR; + +typedef VkPushDescriptorSetInfo VkPushDescriptorSetInfoKHR; + +typedef VkPushDescriptorSetWithTemplateInfo VkPushDescriptorSetWithTemplateInfoKHR; + +typedef struct VkSetDescriptorBufferOffsetsInfoEXT { + VkStructureType sType; + const void* pNext; + VkShaderStageFlags stageFlags; + VkPipelineLayout layout; + uint32_t firstSet; + uint32_t setCount; + const uint32_t* pBufferIndices; + const VkDeviceSize* pOffsets; +} VkSetDescriptorBufferOffsetsInfoEXT; + +typedef struct VkBindDescriptorBufferEmbeddedSamplersInfoEXT { + VkStructureType sType; + const void* pNext; + VkShaderStageFlags stageFlags; + VkPipelineLayout layout; + uint32_t set; +} VkBindDescriptorBufferEmbeddedSamplersInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets2KHR)(VkCommandBuffer commandBuffer, const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants2KHR)(VkCommandBuffer commandBuffer, const VkPushConstantsInfo* pPushConstantsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSet2KHR)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetInfo* pPushDescriptorSetInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplate2KHR)(VkCommandBuffer commandBuffer, const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo); +typedef void (VKAPI_PTR *PFN_vkCmdSetDescriptorBufferOffsets2EXT)(VkCommandBuffer commandBuffer, const VkSetDescriptorBufferOffsetsInfoEXT* pSetDescriptorBufferOffsetsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorBufferEmbeddedSamplers2EXT)(VkCommandBuffer commandBuffer, const VkBindDescriptorBufferEmbeddedSamplersInfoEXT* pBindDescriptorBufferEmbeddedSamplersInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets2KHR( + VkCommandBuffer commandBuffer, + const VkBindDescriptorSetsInfo* pBindDescriptorSetsInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants2KHR( + VkCommandBuffer commandBuffer, + const VkPushConstantsInfo* pPushConstantsInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSet2KHR( + VkCommandBuffer commandBuffer, + const VkPushDescriptorSetInfo* pPushDescriptorSetInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplate2KHR( + VkCommandBuffer commandBuffer, + const VkPushDescriptorSetWithTemplateInfo* pPushDescriptorSetWithTemplateInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDescriptorBufferOffsets2EXT( + VkCommandBuffer commandBuffer, + const VkSetDescriptorBufferOffsetsInfoEXT* pSetDescriptorBufferOffsetsInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorBufferEmbeddedSamplers2EXT( + VkCommandBuffer commandBuffer, + const VkBindDescriptorBufferEmbeddedSamplersInfoEXT* pBindDescriptorBufferEmbeddedSamplersInfo); +#endif +#endif + + +// VK_KHR_copy_memory_indirect is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_copy_memory_indirect 1 +#define VK_KHR_COPY_MEMORY_INDIRECT_SPEC_VERSION 1 +#define VK_KHR_COPY_MEMORY_INDIRECT_EXTENSION_NAME "VK_KHR_copy_memory_indirect" + +typedef enum VkAddressCopyFlagBitsKHR { + VK_ADDRESS_COPY_DEVICE_LOCAL_BIT_KHR = 0x00000001, + VK_ADDRESS_COPY_SPARSE_BIT_KHR = 0x00000002, + VK_ADDRESS_COPY_PROTECTED_BIT_KHR = 0x00000004, + VK_ADDRESS_COPY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAddressCopyFlagBitsKHR; +typedef VkFlags VkAddressCopyFlagsKHR; +typedef struct VkStridedDeviceAddressRangeKHR { + VkDeviceAddress address; + VkDeviceSize size; + VkDeviceSize stride; +} VkStridedDeviceAddressRangeKHR; + +typedef struct VkCopyMemoryIndirectCommandKHR { + VkDeviceAddress srcAddress; + VkDeviceAddress dstAddress; + VkDeviceSize size; +} VkCopyMemoryIndirectCommandKHR; + +typedef struct VkCopyMemoryIndirectInfoKHR { + VkStructureType sType; + const void* pNext; + VkAddressCopyFlagsKHR srcCopyFlags; + VkAddressCopyFlagsKHR dstCopyFlags; + uint32_t copyCount; + VkStridedDeviceAddressRangeKHR copyAddressRange; +} VkCopyMemoryIndirectInfoKHR; + +typedef struct VkCopyMemoryToImageIndirectCommandKHR { + VkDeviceAddress srcAddress; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkCopyMemoryToImageIndirectCommandKHR; + +typedef struct VkCopyMemoryToImageIndirectInfoKHR { + VkStructureType sType; + const void* pNext; + VkAddressCopyFlagsKHR srcCopyFlags; + uint32_t copyCount; + VkStridedDeviceAddressRangeKHR copyAddressRange; + VkImage dstImage; + VkImageLayout dstImageLayout; + const VkImageSubresourceLayers* pImageSubresources; +} VkCopyMemoryToImageIndirectInfoKHR; + +typedef struct VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 indirectMemoryCopy; + VkBool32 indirectMemoryToImageCopy; +} VkPhysicalDeviceCopyMemoryIndirectFeaturesKHR; + +typedef struct VkPhysicalDeviceCopyMemoryIndirectPropertiesKHR { + VkStructureType sType; + void* pNext; + VkQueueFlags supportedQueues; +} VkPhysicalDeviceCopyMemoryIndirectPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryIndirectKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryIndirectInfoKHR* pCopyMemoryIndirectInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToImageIndirectKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToImageIndirectInfoKHR* pCopyMemoryToImageIndirectInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryIndirectKHR( + VkCommandBuffer commandBuffer, + const VkCopyMemoryIndirectInfoKHR* pCopyMemoryIndirectInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToImageIndirectKHR( + VkCommandBuffer commandBuffer, + const VkCopyMemoryToImageIndirectInfoKHR* pCopyMemoryToImageIndirectInfo); +#endif +#endif + + +// VK_KHR_video_encode_intra_refresh is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_encode_intra_refresh 1 +#define VK_KHR_VIDEO_ENCODE_INTRA_REFRESH_SPEC_VERSION 1 +#define VK_KHR_VIDEO_ENCODE_INTRA_REFRESH_EXTENSION_NAME "VK_KHR_video_encode_intra_refresh" + +typedef enum VkVideoEncodeIntraRefreshModeFlagBitsKHR { + VK_VIDEO_ENCODE_INTRA_REFRESH_MODE_NONE_KHR = 0, + VK_VIDEO_ENCODE_INTRA_REFRESH_MODE_PER_PICTURE_PARTITION_BIT_KHR = 0x00000001, + VK_VIDEO_ENCODE_INTRA_REFRESH_MODE_BLOCK_BASED_BIT_KHR = 0x00000002, + VK_VIDEO_ENCODE_INTRA_REFRESH_MODE_BLOCK_ROW_BASED_BIT_KHR = 0x00000004, + VK_VIDEO_ENCODE_INTRA_REFRESH_MODE_BLOCK_COLUMN_BASED_BIT_KHR = 0x00000008, + VK_VIDEO_ENCODE_INTRA_REFRESH_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkVideoEncodeIntraRefreshModeFlagBitsKHR; +typedef VkFlags VkVideoEncodeIntraRefreshModeFlagsKHR; +typedef struct VkVideoEncodeIntraRefreshCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeIntraRefreshModeFlagsKHR intraRefreshModes; + uint32_t maxIntraRefreshCycleDuration; + uint32_t maxIntraRefreshActiveReferencePictures; + VkBool32 partitionIndependentIntraRefreshRegions; + VkBool32 nonRectangularIntraRefreshRegions; +} VkVideoEncodeIntraRefreshCapabilitiesKHR; + +typedef struct VkVideoEncodeSessionIntraRefreshCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkVideoEncodeIntraRefreshModeFlagBitsKHR intraRefreshMode; +} VkVideoEncodeSessionIntraRefreshCreateInfoKHR; + +typedef struct VkVideoEncodeIntraRefreshInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t intraRefreshCycleDuration; + uint32_t intraRefreshIndex; +} VkVideoEncodeIntraRefreshInfoKHR; + +typedef struct VkVideoReferenceIntraRefreshInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t dirtyIntraRefreshRegions; +} VkVideoReferenceIntraRefreshInfoKHR; + +typedef struct VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 videoEncodeIntraRefresh; +} VkPhysicalDeviceVideoEncodeIntraRefreshFeaturesKHR; + + + +// VK_KHR_video_encode_quantization_map is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_encode_quantization_map 1 +#define VK_KHR_VIDEO_ENCODE_QUANTIZATION_MAP_SPEC_VERSION 2 +#define VK_KHR_VIDEO_ENCODE_QUANTIZATION_MAP_EXTENSION_NAME "VK_KHR_video_encode_quantization_map" +typedef struct VkVideoEncodeQuantizationMapCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkExtent2D maxQuantizationMapExtent; +} VkVideoEncodeQuantizationMapCapabilitiesKHR; + +typedef struct VkVideoFormatQuantizationMapPropertiesKHR { + VkStructureType sType; + void* pNext; + VkExtent2D quantizationMapTexelSize; +} VkVideoFormatQuantizationMapPropertiesKHR; + +typedef struct VkVideoEncodeQuantizationMapInfoKHR { + VkStructureType sType; + const void* pNext; + VkImageView quantizationMap; + VkExtent2D quantizationMapExtent; +} VkVideoEncodeQuantizationMapInfoKHR; + +typedef struct VkVideoEncodeQuantizationMapSessionParametersCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExtent2D quantizationMapTexelSize; +} VkVideoEncodeQuantizationMapSessionParametersCreateInfoKHR; + +typedef struct VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 videoEncodeQuantizationMap; +} VkPhysicalDeviceVideoEncodeQuantizationMapFeaturesKHR; + +typedef struct VkVideoEncodeH264QuantizationMapCapabilitiesKHR { + VkStructureType sType; + void* pNext; + int32_t minQpDelta; + int32_t maxQpDelta; +} VkVideoEncodeH264QuantizationMapCapabilitiesKHR; + +typedef struct VkVideoEncodeH265QuantizationMapCapabilitiesKHR { + VkStructureType sType; + void* pNext; + int32_t minQpDelta; + int32_t maxQpDelta; +} VkVideoEncodeH265QuantizationMapCapabilitiesKHR; + +typedef struct VkVideoFormatH265QuantizationMapPropertiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeH265CtbSizeFlagsKHR compatibleCtbSizes; +} VkVideoFormatH265QuantizationMapPropertiesKHR; + +typedef struct VkVideoEncodeAV1QuantizationMapCapabilitiesKHR { + VkStructureType sType; + void* pNext; + int32_t minQIndexDelta; + int32_t maxQIndexDelta; +} VkVideoEncodeAV1QuantizationMapCapabilitiesKHR; + +typedef struct VkVideoFormatAV1QuantizationMapPropertiesKHR { + VkStructureType sType; + void* pNext; + VkVideoEncodeAV1SuperblockSizeFlagsKHR compatibleSuperblockSizes; +} VkVideoFormatAV1QuantizationMapPropertiesKHR; + + + +// VK_KHR_shader_relaxed_extended_instruction is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_relaxed_extended_instruction 1 +#define VK_KHR_SHADER_RELAXED_EXTENDED_INSTRUCTION_SPEC_VERSION 1 +#define VK_KHR_SHADER_RELAXED_EXTENDED_INSTRUCTION_EXTENSION_NAME "VK_KHR_shader_relaxed_extended_instruction" +typedef struct VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderRelaxedExtendedInstruction; +} VkPhysicalDeviceShaderRelaxedExtendedInstructionFeaturesKHR; + + + +// VK_KHR_maintenance7 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance7 1 +#define VK_KHR_MAINTENANCE_7_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_7_EXTENSION_NAME "VK_KHR_maintenance7" + +typedef enum VkPhysicalDeviceLayeredApiKHR { + VK_PHYSICAL_DEVICE_LAYERED_API_VULKAN_KHR = 0, + VK_PHYSICAL_DEVICE_LAYERED_API_D3D12_KHR = 1, + VK_PHYSICAL_DEVICE_LAYERED_API_METAL_KHR = 2, + VK_PHYSICAL_DEVICE_LAYERED_API_OPENGL_KHR = 3, + VK_PHYSICAL_DEVICE_LAYERED_API_OPENGLES_KHR = 4, + VK_PHYSICAL_DEVICE_LAYERED_API_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPhysicalDeviceLayeredApiKHR; +typedef struct VkPhysicalDeviceMaintenance7FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 maintenance7; +} VkPhysicalDeviceMaintenance7FeaturesKHR; + +typedef struct VkPhysicalDeviceMaintenance7PropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 robustFragmentShadingRateAttachmentAccess; + VkBool32 separateDepthStencilAttachmentAccess; + uint32_t maxDescriptorSetTotalUniformBuffersDynamic; + uint32_t maxDescriptorSetTotalStorageBuffersDynamic; + uint32_t maxDescriptorSetTotalBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindTotalUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindTotalStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindTotalBuffersDynamic; +} VkPhysicalDeviceMaintenance7PropertiesKHR; + +typedef struct VkPhysicalDeviceLayeredApiPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceLayeredApiKHR layeredAPI; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; +} VkPhysicalDeviceLayeredApiPropertiesKHR; + +typedef struct VkPhysicalDeviceLayeredApiPropertiesListKHR { + VkStructureType sType; + void* pNext; + uint32_t layeredApiCount; + VkPhysicalDeviceLayeredApiPropertiesKHR* pLayeredApis; +} VkPhysicalDeviceLayeredApiPropertiesListKHR; + +typedef struct VkPhysicalDeviceLayeredApiVulkanPropertiesKHR { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties2 properties; +} VkPhysicalDeviceLayeredApiVulkanPropertiesKHR; + + + +// VK_KHR_maintenance8 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance8 1 +#define VK_KHR_MAINTENANCE_8_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_8_EXTENSION_NAME "VK_KHR_maintenance8" +typedef VkFlags64 VkAccessFlags3KHR; + +// Flag bits for VkAccessFlagBits3KHR +typedef VkFlags64 VkAccessFlagBits3KHR; +static const VkAccessFlagBits3KHR VK_ACCESS_3_NONE_KHR = 0ULL; + +typedef struct VkMemoryBarrierAccessFlags3KHR { + VkStructureType sType; + const void* pNext; + VkAccessFlags3KHR srcAccessMask3; + VkAccessFlags3KHR dstAccessMask3; +} VkMemoryBarrierAccessFlags3KHR; + +typedef struct VkPhysicalDeviceMaintenance8FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 maintenance8; +} VkPhysicalDeviceMaintenance8FeaturesKHR; + + + +// VK_KHR_shader_fma is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_shader_fma 1 +#define VK_KHR_SHADER_FMA_SPEC_VERSION 1 +#define VK_KHR_SHADER_FMA_EXTENSION_NAME "VK_KHR_shader_fma" +typedef struct VkPhysicalDeviceShaderFmaFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderFmaFloat16; + VkBool32 shaderFmaFloat32; + VkBool32 shaderFmaFloat64; +} VkPhysicalDeviceShaderFmaFeaturesKHR; + + + +// VK_KHR_maintenance9 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance9 1 +#define VK_KHR_MAINTENANCE_9_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_9_EXTENSION_NAME "VK_KHR_maintenance9" + +typedef enum VkDefaultVertexAttributeValueKHR { + VK_DEFAULT_VERTEX_ATTRIBUTE_VALUE_ZERO_ZERO_ZERO_ZERO_KHR = 0, + VK_DEFAULT_VERTEX_ATTRIBUTE_VALUE_ZERO_ZERO_ZERO_ONE_KHR = 1, + VK_DEFAULT_VERTEX_ATTRIBUTE_VALUE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDefaultVertexAttributeValueKHR; +typedef struct VkPhysicalDeviceMaintenance9FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 maintenance9; +} VkPhysicalDeviceMaintenance9FeaturesKHR; + +typedef struct VkPhysicalDeviceMaintenance9PropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 image2DViewOf3DSparse; + VkDefaultVertexAttributeValueKHR defaultVertexAttributeValue; +} VkPhysicalDeviceMaintenance9PropertiesKHR; + +typedef struct VkQueueFamilyOwnershipTransferPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t optimalImageTransferToQueueFamilies; +} VkQueueFamilyOwnershipTransferPropertiesKHR; + + + +// VK_KHR_video_maintenance2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_video_maintenance2 1 +#define VK_KHR_VIDEO_MAINTENANCE_2_SPEC_VERSION 1 +#define VK_KHR_VIDEO_MAINTENANCE_2_EXTENSION_NAME "VK_KHR_video_maintenance2" +typedef struct VkPhysicalDeviceVideoMaintenance2FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 videoMaintenance2; +} VkPhysicalDeviceVideoMaintenance2FeaturesKHR; + +typedef struct VkVideoDecodeH264InlineSessionParametersInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoH264SequenceParameterSet* pStdSPS; + const StdVideoH264PictureParameterSet* pStdPPS; +} VkVideoDecodeH264InlineSessionParametersInfoKHR; + +typedef struct VkVideoDecodeH265InlineSessionParametersInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoH265VideoParameterSet* pStdVPS; + const StdVideoH265SequenceParameterSet* pStdSPS; + const StdVideoH265PictureParameterSet* pStdPPS; +} VkVideoDecodeH265InlineSessionParametersInfoKHR; + +typedef struct VkVideoDecodeAV1InlineSessionParametersInfoKHR { + VkStructureType sType; + const void* pNext; + const StdVideoAV1SequenceHeader* pStdSequenceHeader; +} VkVideoDecodeAV1InlineSessionParametersInfoKHR; + + + +// VK_KHR_depth_clamp_zero_one is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_depth_clamp_zero_one 1 +#define VK_KHR_DEPTH_CLAMP_ZERO_ONE_SPEC_VERSION 1 +#define VK_KHR_DEPTH_CLAMP_ZERO_ONE_EXTENSION_NAME "VK_KHR_depth_clamp_zero_one" +typedef struct VkPhysicalDeviceDepthClampZeroOneFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 depthClampZeroOne; +} VkPhysicalDeviceDepthClampZeroOneFeaturesKHR; + + + +// VK_KHR_robustness2 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_robustness2 1 +#define VK_KHR_ROBUSTNESS_2_SPEC_VERSION 1 +#define VK_KHR_ROBUSTNESS_2_EXTENSION_NAME "VK_KHR_robustness2" +typedef struct VkPhysicalDeviceRobustness2FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 robustBufferAccess2; + VkBool32 robustImageAccess2; + VkBool32 nullDescriptor; +} VkPhysicalDeviceRobustness2FeaturesKHR; + +typedef struct VkPhysicalDeviceRobustness2PropertiesKHR { + VkStructureType sType; + void* pNext; + VkDeviceSize robustStorageBufferAccessSizeAlignment; + VkDeviceSize robustUniformBufferAccessSizeAlignment; +} VkPhysicalDeviceRobustness2PropertiesKHR; + + + +// VK_KHR_present_mode_fifo_latest_ready is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_present_mode_fifo_latest_ready 1 +#define VK_KHR_PRESENT_MODE_FIFO_LATEST_READY_SPEC_VERSION 1 +#define VK_KHR_PRESENT_MODE_FIFO_LATEST_READY_EXTENSION_NAME "VK_KHR_present_mode_fifo_latest_ready" +typedef struct VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 presentModeFifoLatestReady; +} VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR; + + + +// VK_KHR_maintenance10 is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_maintenance10 1 +#define VK_KHR_MAINTENANCE_10_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE_10_EXTENSION_NAME "VK_KHR_maintenance10" + +typedef enum VkRenderingAttachmentFlagBitsKHR { + VK_RENDERING_ATTACHMENT_INPUT_ATTACHMENT_FEEDBACK_BIT_KHR = 0x00000001, + VK_RENDERING_ATTACHMENT_RESOLVE_SKIP_TRANSFER_FUNCTION_BIT_KHR = 0x00000002, + VK_RENDERING_ATTACHMENT_RESOLVE_ENABLE_TRANSFER_FUNCTION_BIT_KHR = 0x00000004, + VK_RENDERING_ATTACHMENT_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkRenderingAttachmentFlagBitsKHR; +typedef VkFlags VkRenderingAttachmentFlagsKHR; + +typedef enum VkResolveImageFlagBitsKHR { + VK_RESOLVE_IMAGE_SKIP_TRANSFER_FUNCTION_BIT_KHR = 0x00000001, + VK_RESOLVE_IMAGE_ENABLE_TRANSFER_FUNCTION_BIT_KHR = 0x00000002, + VK_RESOLVE_IMAGE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkResolveImageFlagBitsKHR; +typedef VkFlags VkResolveImageFlagsKHR; +typedef struct VkPhysicalDeviceMaintenance10FeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 maintenance10; +} VkPhysicalDeviceMaintenance10FeaturesKHR; + +typedef struct VkPhysicalDeviceMaintenance10PropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rgba4OpaqueBlackSwizzled; + VkBool32 resolveSrgbFormatAppliesTransferFunction; + VkBool32 resolveSrgbFormatSupportsTransferFunctionControl; +} VkPhysicalDeviceMaintenance10PropertiesKHR; + +typedef struct VkRenderingEndInfoKHR { + VkStructureType sType; + const void* pNext; +} VkRenderingEndInfoKHR; + +typedef struct VkRenderingAttachmentFlagsInfoKHR { + VkStructureType sType; + const void* pNext; + VkRenderingAttachmentFlagsKHR flags; +} VkRenderingAttachmentFlagsInfoKHR; + +typedef struct VkResolveImageModeInfoKHR { + VkStructureType sType; + const void* pNext; + VkResolveImageFlagsKHR flags; + VkResolveModeFlagBits resolveMode; + VkResolveModeFlagBits stencilResolveMode; +} VkResolveImageModeInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdEndRendering2KHR)(VkCommandBuffer commandBuffer, const VkRenderingEndInfoKHR* pRenderingEndInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndRendering2KHR( + VkCommandBuffer commandBuffer, + const VkRenderingEndInfoKHR* pRenderingEndInfo); +#endif +#endif + + +// VK_EXT_debug_report is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_debug_report 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) +#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 10 +#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" + +typedef enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, + VK_DEBUG_REPORT_OBJECT_TYPE_CU_MODULE_NVX_EXT = 1000029000, + VK_DEBUG_REPORT_OBJECT_TYPE_CU_FUNCTION_NVX_EXT = 1000029001, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000150000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, + VK_DEBUG_REPORT_OBJECT_TYPE_CUDA_MODULE_NV_EXT = 1000307000, + VK_DEBUG_REPORT_OBJECT_TYPE_CUDA_FUNCTION_NV_EXT = 1000307001, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA_EXT = 1000366000, + // VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT is a legacy alias + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + // VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT is a legacy alias + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportObjectTypeEXT; + +typedef enum VkDebugReportFlagBitsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, + VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportFlagBitsEXT; +typedef VkFlags VkDebugReportFlagsEXT; +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + +typedef struct VkDebugReportCallbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; +} VkDebugReportCallbackCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( + VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugReportCallbackEXT* pCallback); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( + VkInstance instance, + VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( + VkInstance instance, + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage); +#endif +#endif + + +// VK_NV_glsl_shader is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_glsl_shader 1 +#define VK_NV_GLSL_SHADER_SPEC_VERSION 1 +#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" + + +// VK_EXT_depth_range_unrestricted is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_depth_range_unrestricted 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" + + +// VK_IMG_filter_cubic is a preprocessor guard. Do not pass it to API calls. +#define VK_IMG_filter_cubic 1 +#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 +#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" + + +// VK_AMD_rasterization_order is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_rasterization_order 1 +#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 +#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" + +typedef enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF +} VkRasterizationOrderAMD; +typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { + VkStructureType sType; + const void* pNext; + VkRasterizationOrderAMD rasterizationOrder; +} VkPipelineRasterizationStateRasterizationOrderAMD; + + + +// VK_AMD_shader_trinary_minmax is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_trinary_minmax 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" + + +// VK_AMD_shader_explicit_vertex_parameter is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_explicit_vertex_parameter 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" + + +// VK_EXT_debug_marker is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_debug_marker 1 +#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 +#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" +typedef struct VkDebugMarkerObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; +} VkDebugMarkerObjectNameInfoEXT; + +typedef struct VkDebugMarkerObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugMarkerObjectTagInfoEXT; + +typedef struct VkDebugMarkerMarkerInfoEXT { + VkStructureType sType; + const void* pNext; + const char* pMarkerName; + float color[4]; +} VkDebugMarkerMarkerInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif +#endif + + +// VK_AMD_gcn_shader is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_gcn_shader 1 +#define VK_AMD_GCN_SHADER_SPEC_VERSION 1 +#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" + + +// VK_NV_dedicated_allocation is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_dedicated_allocation 1 +#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" +typedef struct VkDedicatedAllocationImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationImageCreateInfoNV; + +typedef struct VkDedicatedAllocationBufferCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationBufferCreateInfoNV; + +typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkDedicatedAllocationMemoryAllocateInfoNV; + + + +// VK_EXT_transform_feedback is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_transform_feedback 1 +#define VK_EXT_TRANSFORM_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME "VK_EXT_transform_feedback" +typedef VkFlags VkPipelineRasterizationStateStreamCreateFlagsEXT; +typedef struct VkPhysicalDeviceTransformFeedbackFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 transformFeedback; + VkBool32 geometryStreams; +} VkPhysicalDeviceTransformFeedbackFeaturesEXT; + +typedef struct VkPhysicalDeviceTransformFeedbackPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxTransformFeedbackStreams; + uint32_t maxTransformFeedbackBuffers; + VkDeviceSize maxTransformFeedbackBufferSize; + uint32_t maxTransformFeedbackStreamDataSize; + uint32_t maxTransformFeedbackBufferDataSize; + uint32_t maxTransformFeedbackBufferDataStride; + VkBool32 transformFeedbackQueries; + VkBool32 transformFeedbackStreamsLinesTriangles; + VkBool32 transformFeedbackRasterizationStreamSelect; + VkBool32 transformFeedbackDraw; +} VkPhysicalDeviceTransformFeedbackPropertiesEXT; + +typedef struct VkPipelineRasterizationStateStreamCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateStreamCreateFlagsEXT flags; + uint32_t rasterizationStream; +} VkPipelineRasterizationStateStreamCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBindTransformFeedbackBuffersEXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes); +typedef void (VKAPI_PTR *PFN_vkCmdBeginTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdEndTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdEndQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectByteCountEXT)(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindTransformFeedbackBuffersEXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags, + uint32_t index); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + uint32_t index); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectByteCountEXT( + VkCommandBuffer commandBuffer, + uint32_t instanceCount, + uint32_t firstInstance, + VkBuffer counterBuffer, + VkDeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride); +#endif +#endif + + +// VK_NVX_binary_import is a preprocessor guard. Do not pass it to API calls. +#define VK_NVX_binary_import 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCuModuleNVX) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCuFunctionNVX) +#define VK_NVX_BINARY_IMPORT_SPEC_VERSION 2 +#define VK_NVX_BINARY_IMPORT_EXTENSION_NAME "VK_NVX_binary_import" +typedef struct VkCuModuleCreateInfoNVX { + VkStructureType sType; + const void* pNext; + size_t dataSize; + const void* pData; +} VkCuModuleCreateInfoNVX; + +typedef struct VkCuModuleTexturingModeCreateInfoNVX { + VkStructureType sType; + const void* pNext; + VkBool32 use64bitTexturing; +} VkCuModuleTexturingModeCreateInfoNVX; + +typedef struct VkCuFunctionCreateInfoNVX { + VkStructureType sType; + const void* pNext; + VkCuModuleNVX module; + const char* pName; +} VkCuFunctionCreateInfoNVX; + +typedef struct VkCuLaunchInfoNVX { + VkStructureType sType; + const void* pNext; + VkCuFunctionNVX function; + uint32_t gridDimX; + uint32_t gridDimY; + uint32_t gridDimZ; + uint32_t blockDimX; + uint32_t blockDimY; + uint32_t blockDimZ; + uint32_t sharedMemBytes; + size_t paramCount; + const void* const * pParams; + size_t extraCount; + const void* const * pExtras; +} VkCuLaunchInfoNVX; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateCuModuleNVX)(VkDevice device, const VkCuModuleCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuModuleNVX* pModule); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCuFunctionNVX)(VkDevice device, const VkCuFunctionCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCuFunctionNVX* pFunction); +typedef void (VKAPI_PTR *PFN_vkDestroyCuModuleNVX)(VkDevice device, VkCuModuleNVX module, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDestroyCuFunctionNVX)(VkDevice device, VkCuFunctionNVX function, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkCmdCuLaunchKernelNVX)(VkCommandBuffer commandBuffer, const VkCuLaunchInfoNVX* pLaunchInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCuModuleNVX( + VkDevice device, + const VkCuModuleCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCuModuleNVX* pModule); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCuFunctionNVX( + VkDevice device, + const VkCuFunctionCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCuFunctionNVX* pFunction); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyCuModuleNVX( + VkDevice device, + VkCuModuleNVX module, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyCuFunctionNVX( + VkDevice device, + VkCuFunctionNVX function, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCuLaunchKernelNVX( + VkCommandBuffer commandBuffer, + const VkCuLaunchInfoNVX* pLaunchInfo); +#endif +#endif + + +// VK_NVX_image_view_handle is a preprocessor guard. Do not pass it to API calls. +#define VK_NVX_image_view_handle 1 +#define VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION 4 +#define VK_NVX_IMAGE_VIEW_HANDLE_EXTENSION_NAME "VK_NVX_image_view_handle" +typedef struct VkImageViewHandleInfoNVX { + VkStructureType sType; + const void* pNext; + VkImageView imageView; + VkDescriptorType descriptorType; + VkSampler sampler; +} VkImageViewHandleInfoNVX; + +typedef struct VkImageViewAddressPropertiesNVX { + VkStructureType sType; + void* pNext; + VkDeviceAddress deviceAddress; + VkDeviceSize size; +} VkImageViewAddressPropertiesNVX; + +typedef uint32_t (VKAPI_PTR *PFN_vkGetImageViewHandleNVX)(VkDevice device, const VkImageViewHandleInfoNVX* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetImageViewHandle64NVX)(VkDevice device, const VkImageViewHandleInfoNVX* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetImageViewAddressNVX)(VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceCombinedImageSamplerIndexNVX)(VkDevice device, uint64_t imageViewIndex, uint64_t samplerIndex); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR uint32_t VKAPI_CALL vkGetImageViewHandleNVX( + VkDevice device, + const VkImageViewHandleInfoNVX* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR uint64_t VKAPI_CALL vkGetImageViewHandle64NVX( + VkDevice device, + const VkImageViewHandleInfoNVX* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageViewAddressNVX( + VkDevice device, + VkImageView imageView, + VkImageViewAddressPropertiesNVX* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceCombinedImageSamplerIndexNVX( + VkDevice device, + uint64_t imageViewIndex, + uint64_t samplerIndex); +#endif +#endif + + +// VK_AMD_draw_indirect_count is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_draw_indirect_count 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 2 +#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif +#endif + + +// VK_AMD_negative_viewport_height is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_negative_viewport_height 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" + + +// VK_AMD_gpu_shader_half_float is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_gpu_shader_half_float 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" + + +// VK_AMD_shader_ballot is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_ballot 1 +#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 +#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" + + +// VK_AMD_texture_gather_bias_lod is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_texture_gather_bias_lod 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" +typedef struct VkTextureLODGatherFormatPropertiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 supportsTextureGatherLODBiasAMD; +} VkTextureLODGatherFormatPropertiesAMD; + + + +// VK_AMD_shader_info is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_info 1 +#define VK_AMD_SHADER_INFO_SPEC_VERSION 1 +#define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info" + +typedef enum VkShaderInfoTypeAMD { + VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0, + VK_SHADER_INFO_TYPE_BINARY_AMD = 1, + VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2, + VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderInfoTypeAMD; +typedef struct VkShaderResourceUsageAMD { + uint32_t numUsedVgprs; + uint32_t numUsedSgprs; + uint32_t ldsSizePerLocalWorkGroup; + size_t ldsUsageSizeInBytes; + size_t scratchMemUsageInBytes; +} VkShaderResourceUsageAMD; + +typedef struct VkShaderStatisticsInfoAMD { + VkShaderStageFlags shaderStageMask; + VkShaderResourceUsageAMD resourceUsage; + uint32_t numPhysicalVgprs; + uint32_t numPhysicalSgprs; + uint32_t numAvailableVgprs; + uint32_t numAvailableSgprs; + uint32_t computeWorkGroupSize[3]; +} VkShaderStatisticsInfoAMD; + +typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD( + VkDevice device, + VkPipeline pipeline, + VkShaderStageFlagBits shaderStage, + VkShaderInfoTypeAMD infoType, + size_t* pInfoSize, + void* pInfo); +#endif +#endif + + +// VK_AMD_shader_image_load_store_lod is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_image_load_store_lod 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod" + + +// VK_NV_corner_sampled_image is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_corner_sampled_image 1 +#define VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION 2 +#define VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME "VK_NV_corner_sampled_image" +typedef struct VkPhysicalDeviceCornerSampledImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cornerSampledImage; +} VkPhysicalDeviceCornerSampledImageFeaturesNV; + + + +// VK_IMG_format_pvrtc is a preprocessor guard. Do not pass it to API calls. +#define VK_IMG_format_pvrtc 1 +#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 +#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" + + +// VK_NV_external_memory_capabilities is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_external_memory_capabilities 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" + +typedef enum VkExternalMemoryHandleTypeFlagBitsNV { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsNV; +typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + +typedef enum VkExternalMemoryFeatureFlagBitsNV { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsNV; +typedef VkFlags VkExternalMemoryFeatureFlagsNV; +typedef struct VkExternalImageFormatPropertiesNV { + VkImageFormatProperties imageFormatProperties; + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; +} VkExternalImageFormatPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); +#endif +#endif + + +// VK_NV_external_memory is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_external_memory 1 +#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" +typedef struct VkExternalMemoryImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExternalMemoryImageCreateInfoNV; + +typedef struct VkExportMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExportMemoryAllocateInfoNV; + + + +// VK_EXT_validation_flags is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_validation_flags 1 +#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 3 +#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" + +typedef enum VkValidationCheckEXT { + VK_VALIDATION_CHECK_ALL_EXT = 0, + VK_VALIDATION_CHECK_SHADERS_EXT = 1, + VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCheckEXT; +typedef struct VkValidationFlagsEXT { + VkStructureType sType; + const void* pNext; + uint32_t disabledValidationCheckCount; + const VkValidationCheckEXT* pDisabledValidationChecks; +} VkValidationFlagsEXT; + + + +// VK_EXT_shader_subgroup_ballot is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_subgroup_ballot 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" + + +// VK_EXT_shader_subgroup_vote is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_subgroup_vote 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" + + +// VK_EXT_texture_compression_astc_hdr is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_texture_compression_astc_hdr 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME "VK_EXT_texture_compression_astc_hdr" +typedef VkPhysicalDeviceTextureCompressionASTCHDRFeatures VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT; + + + +// VK_EXT_astc_decode_mode is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_astc_decode_mode 1 +#define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1 +#define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME "VK_EXT_astc_decode_mode" +typedef struct VkImageViewASTCDecodeModeEXT { + VkStructureType sType; + const void* pNext; + VkFormat decodeMode; +} VkImageViewASTCDecodeModeEXT; + +typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 decodeModeSharedExponent; +} VkPhysicalDeviceASTCDecodeFeaturesEXT; + + + +// VK_EXT_pipeline_robustness is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_pipeline_robustness 1 +#define VK_EXT_PIPELINE_ROBUSTNESS_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_pipeline_robustness" +typedef VkPipelineRobustnessBufferBehavior VkPipelineRobustnessBufferBehaviorEXT; + +typedef VkPipelineRobustnessImageBehavior VkPipelineRobustnessImageBehaviorEXT; + +typedef VkPhysicalDevicePipelineRobustnessFeatures VkPhysicalDevicePipelineRobustnessFeaturesEXT; + +typedef VkPhysicalDevicePipelineRobustnessProperties VkPhysicalDevicePipelineRobustnessPropertiesEXT; + +typedef VkPipelineRobustnessCreateInfo VkPipelineRobustnessCreateInfoEXT; + + + +// VK_EXT_conditional_rendering is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_conditional_rendering 1 +#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2 +#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering" + +typedef enum VkConditionalRenderingFlagBitsEXT { + VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT = 0x00000001, + VK_CONDITIONAL_RENDERING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConditionalRenderingFlagBitsEXT; +typedef VkFlags VkConditionalRenderingFlagsEXT; +typedef struct VkConditionalRenderingBeginInfoEXT { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceSize offset; + VkConditionalRenderingFlagsEXT flags; +} VkConditionalRenderingBeginInfoEXT; + +typedef struct VkPhysicalDeviceConditionalRenderingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 conditionalRendering; + VkBool32 inheritedConditionalRendering; +} VkPhysicalDeviceConditionalRenderingFeaturesEXT; + +typedef struct VkCommandBufferInheritanceConditionalRenderingInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 conditionalRenderingEnable; +} VkCommandBufferInheritanceConditionalRenderingInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBeginConditionalRenderingEXT)(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); +typedef void (VKAPI_PTR *PFN_vkCmdEndConditionalRenderingEXT)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginConditionalRenderingEXT( + VkCommandBuffer commandBuffer, + const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndConditionalRenderingEXT( + VkCommandBuffer commandBuffer); +#endif +#endif + + +// VK_NV_clip_space_w_scaling is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_clip_space_w_scaling 1 +#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 +#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" +typedef struct VkViewportWScalingNV { + float xcoeff; + float ycoeff; +} VkViewportWScalingNV; + +typedef struct VkPipelineViewportWScalingStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportWScalingEnable; + uint32_t viewportCount; + const VkViewportWScalingNV* pViewportWScalings; +} VkPipelineViewportWScalingStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings); +#endif +#endif + + +// VK_EXT_direct_mode_display is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_direct_mode_display 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" +typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); +#endif +#endif + + +// VK_EXT_display_surface_counter is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_display_surface_counter 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" + +typedef enum VkSurfaceCounterFlagBitsEXT { + VK_SURFACE_COUNTER_VBLANK_BIT_EXT = 0x00000001, + // VK_SURFACE_COUNTER_VBLANK_EXT is a legacy alias + VK_SURFACE_COUNTER_VBLANK_EXT = VK_SURFACE_COUNTER_VBLANK_BIT_EXT, + VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSurfaceCounterFlagBitsEXT; +typedef VkFlags VkSurfaceCounterFlagsEXT; +typedef struct VkSurfaceCapabilities2EXT { + VkStructureType sType; + void* pNext; + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; + VkSurfaceCounterFlagsEXT supportedSurfaceCounters; +} VkSurfaceCapabilities2EXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); +#endif +#endif + + +// VK_EXT_display_control is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_display_control 1 +#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" + +typedef enum VkDisplayPowerStateEXT { + VK_DISPLAY_POWER_STATE_OFF_EXT = 0, + VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, + VK_DISPLAY_POWER_STATE_ON_EXT = 2, + VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayPowerStateEXT; + +typedef enum VkDeviceEventTypeEXT { + VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, + VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceEventTypeEXT; + +typedef enum VkDisplayEventTypeEXT { + VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, + VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayEventTypeEXT; +typedef struct VkDisplayPowerInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayPowerStateEXT powerState; +} VkDisplayPowerInfoEXT; + +typedef struct VkDeviceEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceEventTypeEXT deviceEvent; +} VkDeviceEventInfoEXT; + +typedef struct VkDisplayEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayEventTypeEXT displayEvent; +} VkDisplayEventInfoEXT; + +typedef struct VkSwapchainCounterCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSurfaceCounterFlagsEXT surfaceCounters; +} VkSwapchainCounterCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue); +#endif +#endif + + +// VK_GOOGLE_display_timing is a preprocessor guard. Do not pass it to API calls. +#define VK_GOOGLE_display_timing 1 +#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 +#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" +typedef struct VkRefreshCycleDurationGOOGLE { + uint64_t refreshDuration; +} VkRefreshCycleDurationGOOGLE; + +typedef struct VkPastPresentationTimingGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; +} VkPastPresentationTimingGOOGLE; + +typedef struct VkPresentTimeGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; +} VkPresentTimeGOOGLE; + +typedef struct VkPresentTimesInfoGOOGLE { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimeGOOGLE* pTimes; +} VkPresentTimesInfoGOOGLE; + +typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings); +#endif +#endif + + +// VK_NV_sample_mask_override_coverage is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_sample_mask_override_coverage 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" + + +// VK_NV_geometry_shader_passthrough is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_geometry_shader_passthrough 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" + + +// VK_NV_viewport_array2 is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_viewport_array2 1 +#define VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME "VK_NV_viewport_array2" +// VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION is a legacy alias +#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION VK_NV_VIEWPORT_ARRAY_2_SPEC_VERSION +// VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME is a legacy alias +#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME + + +// VK_NVX_multiview_per_view_attributes is a preprocessor guard. Do not pass it to API calls. +#define VK_NVX_multiview_per_view_attributes 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" +typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + VkStructureType sType; + void* pNext; + VkBool32 perViewPositionAllComponents; +} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + +typedef struct VkMultiviewPerViewAttributesInfoNVX { + VkStructureType sType; + const void* pNext; + VkBool32 perViewAttributes; + VkBool32 perViewAttributesPositionXOnly; +} VkMultiviewPerViewAttributesInfoNVX; + + + +// VK_NV_viewport_swizzle is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_viewport_swizzle 1 +#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" + +typedef enum VkViewportCoordinateSwizzleNV { + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, + VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF +} VkViewportCoordinateSwizzleNV; +typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; +typedef struct VkViewportSwizzleNV { + VkViewportCoordinateSwizzleNV x; + VkViewportCoordinateSwizzleNV y; + VkViewportCoordinateSwizzleNV z; + VkViewportCoordinateSwizzleNV w; +} VkViewportSwizzleNV; + +typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const VkViewportSwizzleNV* pViewportSwizzles; +} VkPipelineViewportSwizzleStateCreateInfoNV; + + + +// VK_EXT_discard_rectangles is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_discard_rectangles 1 +#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 2 +#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" + +typedef enum VkDiscardRectangleModeEXT { + VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, + VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, + VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDiscardRectangleModeEXT; +typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxDiscardRectangles; +} VkPhysicalDeviceDiscardRectanglePropertiesEXT; + +typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineDiscardRectangleStateCreateFlagsEXT flags; + VkDiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const VkRect2D* pDiscardRectangles; +} VkPipelineDiscardRectangleStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 discardRectangleEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleModeEXT)(VkCommandBuffer commandBuffer, VkDiscardRectangleModeEXT discardRectangleMode); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 discardRectangleEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleModeEXT( + VkCommandBuffer commandBuffer, + VkDiscardRectangleModeEXT discardRectangleMode); +#endif +#endif + + +// VK_EXT_conservative_rasterization is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_conservative_rasterization 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization" + +typedef enum VkConservativeRasterizationModeEXT { + VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0, + VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1, + VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2, + VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConservativeRasterizationModeEXT; +typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + float primitiveOverestimationSize; + float maxExtraPrimitiveOverestimationSize; + float extraPrimitiveOverestimationSizeGranularity; + VkBool32 primitiveUnderestimation; + VkBool32 conservativePointAndLineRasterization; + VkBool32 degenerateTrianglesRasterized; + VkBool32 degenerateLinesRasterized; + VkBool32 fullyCoveredFragmentShaderInputVariable; + VkBool32 conservativeRasterizationPostDepthCoverage; +} VkPhysicalDeviceConservativeRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationConservativeStateCreateFlagsEXT flags; + VkConservativeRasterizationModeEXT conservativeRasterizationMode; + float extraPrimitiveOverestimationSize; +} VkPipelineRasterizationConservativeStateCreateInfoEXT; + + + +// VK_EXT_depth_clip_enable is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_depth_clip_enable 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_SPEC_VERSION 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME "VK_EXT_depth_clip_enable" +typedef VkFlags VkPipelineRasterizationDepthClipStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDepthClipEnableFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 depthClipEnable; +} VkPhysicalDeviceDepthClipEnableFeaturesEXT; + +typedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationDepthClipStateCreateFlagsEXT flags; + VkBool32 depthClipEnable; +} VkPipelineRasterizationDepthClipStateCreateInfoEXT; + + + +// VK_EXT_swapchain_colorspace is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_swapchain_colorspace 1 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 5 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" + + +// VK_EXT_hdr_metadata is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_hdr_metadata 1 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 3 +#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" +typedef struct VkXYColorEXT { + float x; + float y; +} VkXYColorEXT; + +typedef struct VkHdrMetadataEXT { + VkStructureType sType; + const void* pNext; + VkXYColorEXT displayPrimaryRed; + VkXYColorEXT displayPrimaryGreen; + VkXYColorEXT displayPrimaryBlue; + VkXYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; +} VkHdrMetadataEXT; + +typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata); +#endif +#endif + + +// VK_IMG_relaxed_line_rasterization is a preprocessor guard. Do not pass it to API calls. +#define VK_IMG_relaxed_line_rasterization 1 +#define VK_IMG_RELAXED_LINE_RASTERIZATION_SPEC_VERSION 1 +#define VK_IMG_RELAXED_LINE_RASTERIZATION_EXTENSION_NAME "VK_IMG_relaxed_line_rasterization" +typedef struct VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG { + VkStructureType sType; + void* pNext; + VkBool32 relaxedLineRasterization; +} VkPhysicalDeviceRelaxedLineRasterizationFeaturesIMG; + + + +// VK_EXT_external_memory_dma_buf is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_external_memory_dma_buf 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf" + + +// VK_EXT_queue_family_foreign is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_queue_family_foreign 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign" +#define VK_QUEUE_FAMILY_FOREIGN_EXT (~2U) + + +// VK_EXT_debug_utils is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_debug_utils 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) +#define VK_EXT_DEBUG_UTILS_SPEC_VERSION 2 +#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils" +typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; + +typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageSeverityFlagBitsEXT; + +typedef enum VkDebugUtilsMessageTypeFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, + VK_DEBUG_UTILS_MESSAGE_TYPE_DEVICE_ADDRESS_BINDING_BIT_EXT = 0x00000008, + VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageTypeFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; +typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; +typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; +typedef struct VkDebugUtilsLabelEXT { + VkStructureType sType; + const void* pNext; + const char* pLabelName; + float color[4]; +} VkDebugUtilsLabelEXT; + +typedef struct VkDebugUtilsObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + const char* pObjectName; +} VkDebugUtilsObjectNameInfoEXT; + +typedef struct VkDebugUtilsMessengerCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCallbackDataFlagsEXT flags; + const char* pMessageIdName; + int32_t messageIdNumber; + const char* pMessage; + uint32_t queueLabelCount; + const VkDebugUtilsLabelEXT* pQueueLabels; + uint32_t cmdBufLabelCount; + const VkDebugUtilsLabelEXT* pCmdBufLabels; + uint32_t objectCount; + const VkDebugUtilsObjectNameInfoEXT* pObjects; +} VkDebugUtilsMessengerCallbackDataEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDebugUtilsMessengerCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCreateFlagsEXT flags; + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; + VkDebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; + void* pUserData; +} VkDebugUtilsMessengerCreateInfoEXT; + +typedef struct VkDebugUtilsObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugUtilsObjectTagInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); +typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue); +typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT( + VkQueue queue); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( + VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugUtilsMessengerEXT* pMessenger); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( + VkInstance instance, + VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT( + VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); +#endif +#endif + + +// VK_EXT_sampler_filter_minmax is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_sampler_filter_minmax 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 2 +#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" +typedef VkSamplerReductionMode VkSamplerReductionModeEXT; + +typedef VkSamplerReductionModeCreateInfo VkSamplerReductionModeCreateInfoEXT; + +typedef VkPhysicalDeviceSamplerFilterMinmaxProperties VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + + +// VK_AMD_gpu_shader_int16 is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_gpu_shader_int16 1 +#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" + + +// VK_EXT_descriptor_heap is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_descriptor_heap 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkTensorARM) +#define VK_EXT_DESCRIPTOR_HEAP_SPEC_VERSION 1 +#define VK_EXT_DESCRIPTOR_HEAP_EXTENSION_NAME "VK_EXT_descriptor_heap" + +typedef enum VkDescriptorMappingSourceEXT { + VK_DESCRIPTOR_MAPPING_SOURCE_HEAP_WITH_CONSTANT_OFFSET_EXT = 0, + VK_DESCRIPTOR_MAPPING_SOURCE_HEAP_WITH_PUSH_INDEX_EXT = 1, + VK_DESCRIPTOR_MAPPING_SOURCE_HEAP_WITH_INDIRECT_INDEX_EXT = 2, + VK_DESCRIPTOR_MAPPING_SOURCE_HEAP_WITH_INDIRECT_INDEX_ARRAY_EXT = 3, + VK_DESCRIPTOR_MAPPING_SOURCE_RESOURCE_HEAP_DATA_EXT = 4, + VK_DESCRIPTOR_MAPPING_SOURCE_PUSH_DATA_EXT = 5, + VK_DESCRIPTOR_MAPPING_SOURCE_PUSH_ADDRESS_EXT = 6, + VK_DESCRIPTOR_MAPPING_SOURCE_INDIRECT_ADDRESS_EXT = 7, + VK_DESCRIPTOR_MAPPING_SOURCE_HEAP_WITH_SHADER_RECORD_INDEX_EXT = 8, + VK_DESCRIPTOR_MAPPING_SOURCE_SHADER_RECORD_DATA_EXT = 9, + VK_DESCRIPTOR_MAPPING_SOURCE_SHADER_RECORD_ADDRESS_EXT = 10, + VK_DESCRIPTOR_MAPPING_SOURCE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDescriptorMappingSourceEXT; +typedef VkFlags64 VkTensorViewCreateFlagsARM; + +// Flag bits for VkTensorViewCreateFlagBitsARM +typedef VkFlags64 VkTensorViewCreateFlagBitsARM; +static const VkTensorViewCreateFlagBitsARM VK_TENSOR_VIEW_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_ARM = 0x00000001ULL; + + +typedef enum VkSpirvResourceTypeFlagBitsEXT { + VK_SPIRV_RESOURCE_TYPE_ALL_EXT = 0x7FFFFFFF, + VK_SPIRV_RESOURCE_TYPE_SAMPLER_BIT_EXT = 0x00000001, + VK_SPIRV_RESOURCE_TYPE_SAMPLED_IMAGE_BIT_EXT = 0x00000002, + VK_SPIRV_RESOURCE_TYPE_READ_ONLY_IMAGE_BIT_EXT = 0x00000004, + VK_SPIRV_RESOURCE_TYPE_READ_WRITE_IMAGE_BIT_EXT = 0x00000008, + VK_SPIRV_RESOURCE_TYPE_COMBINED_SAMPLED_IMAGE_BIT_EXT = 0x00000010, + VK_SPIRV_RESOURCE_TYPE_UNIFORM_BUFFER_BIT_EXT = 0x00000020, + VK_SPIRV_RESOURCE_TYPE_READ_ONLY_STORAGE_BUFFER_BIT_EXT = 0x00000040, + VK_SPIRV_RESOURCE_TYPE_READ_WRITE_STORAGE_BUFFER_BIT_EXT = 0x00000080, + VK_SPIRV_RESOURCE_TYPE_ACCELERATION_STRUCTURE_BIT_EXT = 0x00000100, + VK_SPIRV_RESOURCE_TYPE_TENSOR_BIT_ARM = 0x00000200, + VK_SPIRV_RESOURCE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSpirvResourceTypeFlagBitsEXT; +typedef VkFlags VkSpirvResourceTypeFlagsEXT; +typedef struct VkHostAddressRangeEXT { + void* address; + size_t size; +} VkHostAddressRangeEXT; + +typedef struct VkHostAddressRangeConstEXT { + const void* address; + size_t size; +} VkHostAddressRangeConstEXT; + +typedef struct VkDeviceAddressRangeEXT { + VkDeviceAddress address; + VkDeviceSize size; +} VkDeviceAddressRangeEXT; + +typedef struct VkTexelBufferDescriptorInfoEXT { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkDeviceAddressRangeEXT addressRange; +} VkTexelBufferDescriptorInfoEXT; + +typedef struct VkImageDescriptorInfoEXT { + VkStructureType sType; + const void* pNext; + const VkImageViewCreateInfo* pView; + VkImageLayout layout; +} VkImageDescriptorInfoEXT; + +typedef struct VkTensorViewCreateInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorViewCreateFlagsARM flags; + VkTensorARM tensor; + VkFormat format; +} VkTensorViewCreateInfoARM; + +typedef union VkResourceDescriptorDataEXT { + const VkImageDescriptorInfoEXT* pImage; + const VkTexelBufferDescriptorInfoEXT* pTexelBuffer; + const VkDeviceAddressRangeEXT* pAddressRange; + const VkTensorViewCreateInfoARM* pTensorARM; +} VkResourceDescriptorDataEXT; + +typedef struct VkResourceDescriptorInfoEXT { + VkStructureType sType; + const void* pNext; + VkDescriptorType type; + VkResourceDescriptorDataEXT data; +} VkResourceDescriptorInfoEXT; + +typedef struct VkBindHeapInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceAddressRangeEXT heapRange; + VkDeviceSize reservedRangeOffset; + VkDeviceSize reservedRangeSize; +} VkBindHeapInfoEXT; + +typedef struct VkPushDataInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t offset; + VkHostAddressRangeConstEXT data; +} VkPushDataInfoEXT; + +typedef struct VkDescriptorMappingSourceConstantOffsetEXT { + uint32_t heapOffset; + uint32_t heapArrayStride; + const VkSamplerCreateInfo* pEmbeddedSampler; + uint32_t samplerHeapOffset; + uint32_t samplerHeapArrayStride; +} VkDescriptorMappingSourceConstantOffsetEXT; + +typedef struct VkDescriptorMappingSourcePushIndexEXT { + uint32_t heapOffset; + uint32_t pushOffset; + uint32_t heapIndexStride; + uint32_t heapArrayStride; + const VkSamplerCreateInfo* pEmbeddedSampler; + VkBool32 useCombinedImageSamplerIndex; + uint32_t samplerHeapOffset; + uint32_t samplerPushOffset; + uint32_t samplerHeapIndexStride; + uint32_t samplerHeapArrayStride; +} VkDescriptorMappingSourcePushIndexEXT; + +typedef struct VkDescriptorMappingSourceIndirectIndexEXT { + uint32_t heapOffset; + uint32_t pushOffset; + uint32_t addressOffset; + uint32_t heapIndexStride; + uint32_t heapArrayStride; + const VkSamplerCreateInfo* pEmbeddedSampler; + VkBool32 useCombinedImageSamplerIndex; + uint32_t samplerHeapOffset; + uint32_t samplerPushOffset; + uint32_t samplerAddressOffset; + uint32_t samplerHeapIndexStride; + uint32_t samplerHeapArrayStride; +} VkDescriptorMappingSourceIndirectIndexEXT; + +typedef struct VkDescriptorMappingSourceHeapDataEXT { + uint32_t heapOffset; + uint32_t pushOffset; +} VkDescriptorMappingSourceHeapDataEXT; + +typedef struct VkDescriptorMappingSourceIndirectAddressEXT { + uint32_t pushOffset; + uint32_t addressOffset; +} VkDescriptorMappingSourceIndirectAddressEXT; + +typedef struct VkDescriptorMappingSourceShaderRecordIndexEXT { + uint32_t heapOffset; + uint32_t shaderRecordOffset; + uint32_t heapIndexStride; + uint32_t heapArrayStride; + const VkSamplerCreateInfo* pEmbeddedSampler; + VkBool32 useCombinedImageSamplerIndex; + uint32_t samplerHeapOffset; + uint32_t samplerShaderRecordOffset; + uint32_t samplerHeapIndexStride; + uint32_t samplerHeapArrayStride; +} VkDescriptorMappingSourceShaderRecordIndexEXT; + +typedef struct VkDescriptorMappingSourceIndirectIndexArrayEXT { + uint32_t heapOffset; + uint32_t pushOffset; + uint32_t addressOffset; + uint32_t heapIndexStride; + const VkSamplerCreateInfo* pEmbeddedSampler; + VkBool32 useCombinedImageSamplerIndex; + uint32_t samplerHeapOffset; + uint32_t samplerPushOffset; + uint32_t samplerAddressOffset; + uint32_t samplerHeapIndexStride; +} VkDescriptorMappingSourceIndirectIndexArrayEXT; + +typedef union VkDescriptorMappingSourceDataEXT { + VkDescriptorMappingSourceConstantOffsetEXT constantOffset; + VkDescriptorMappingSourcePushIndexEXT pushIndex; + VkDescriptorMappingSourceIndirectIndexEXT indirectIndex; + VkDescriptorMappingSourceIndirectIndexArrayEXT indirectIndexArray; + VkDescriptorMappingSourceHeapDataEXT heapData; + uint32_t pushDataOffset; + uint32_t pushAddressOffset; + VkDescriptorMappingSourceIndirectAddressEXT indirectAddress; + VkDescriptorMappingSourceShaderRecordIndexEXT shaderRecordIndex; + uint32_t shaderRecordDataOffset; + uint32_t shaderRecordAddressOffset; +} VkDescriptorMappingSourceDataEXT; + +typedef struct VkDescriptorSetAndBindingMappingEXT { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSet; + uint32_t firstBinding; + uint32_t bindingCount; + VkSpirvResourceTypeFlagsEXT resourceMask; + VkDescriptorMappingSourceEXT source; + VkDescriptorMappingSourceDataEXT sourceData; +} VkDescriptorSetAndBindingMappingEXT; + +typedef struct VkShaderDescriptorSetAndBindingMappingInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t mappingCount; + const VkDescriptorSetAndBindingMappingEXT* pMappings; +} VkShaderDescriptorSetAndBindingMappingInfoEXT; + +typedef struct VkOpaqueCaptureDataCreateInfoEXT { + VkStructureType sType; + const void* pNext; + const VkHostAddressRangeConstEXT* pData; +} VkOpaqueCaptureDataCreateInfoEXT; + +typedef struct VkPhysicalDeviceDescriptorHeapFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 descriptorHeap; + VkBool32 descriptorHeapCaptureReplay; +} VkPhysicalDeviceDescriptorHeapFeaturesEXT; + +typedef struct VkPhysicalDeviceDescriptorHeapPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize samplerHeapAlignment; + VkDeviceSize resourceHeapAlignment; + VkDeviceSize maxSamplerHeapSize; + VkDeviceSize maxResourceHeapSize; + VkDeviceSize minSamplerHeapReservedRange; + VkDeviceSize minSamplerHeapReservedRangeWithEmbedded; + VkDeviceSize minResourceHeapReservedRange; + VkDeviceSize samplerDescriptorSize; + VkDeviceSize imageDescriptorSize; + VkDeviceSize bufferDescriptorSize; + VkDeviceSize samplerDescriptorAlignment; + VkDeviceSize imageDescriptorAlignment; + VkDeviceSize bufferDescriptorAlignment; + VkDeviceSize maxPushDataSize; + size_t imageCaptureReplayOpaqueDataSize; + uint32_t maxDescriptorHeapEmbeddedSamplers; + uint32_t samplerYcbcrConversionCount; + VkBool32 sparseDescriptorHeaps; + VkBool32 protectedDescriptorHeaps; +} VkPhysicalDeviceDescriptorHeapPropertiesEXT; + +typedef struct VkCommandBufferInheritanceDescriptorHeapInfoEXT { + VkStructureType sType; + const void* pNext; + const VkBindHeapInfoEXT* pSamplerHeapBindInfo; + const VkBindHeapInfoEXT* pResourceHeapBindInfo; +} VkCommandBufferInheritanceDescriptorHeapInfoEXT; + +typedef struct VkSamplerCustomBorderColorIndexCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t index; +} VkSamplerCustomBorderColorIndexCreateInfoEXT; + +typedef struct VkSamplerCustomBorderColorCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkClearColorValue customBorderColor; + VkFormat format; +} VkSamplerCustomBorderColorCreateInfoEXT; + +typedef struct VkIndirectCommandsLayoutPushDataTokenNV { + VkStructureType sType; + const void* pNext; + uint32_t pushDataOffset; + uint32_t pushDataSize; +} VkIndirectCommandsLayoutPushDataTokenNV; + +typedef struct VkSubsampledImageFormatPropertiesEXT { + VkStructureType sType; + const void* pNext; + uint32_t subsampledImageDescriptorCount; +} VkSubsampledImageFormatPropertiesEXT; + +typedef struct VkPhysicalDeviceDescriptorHeapTensorPropertiesARM { + VkStructureType sType; + void* pNext; + VkDeviceSize tensorDescriptorSize; + VkDeviceSize tensorDescriptorAlignment; + size_t tensorCaptureReplayOpaqueDataSize; +} VkPhysicalDeviceDescriptorHeapTensorPropertiesARM; + +typedef VkResult (VKAPI_PTR *PFN_vkWriteSamplerDescriptorsEXT)(VkDevice device, uint32_t samplerCount, const VkSamplerCreateInfo* pSamplers, const VkHostAddressRangeEXT* pDescriptors); +typedef VkResult (VKAPI_PTR *PFN_vkWriteResourceDescriptorsEXT)(VkDevice device, uint32_t resourceCount, const VkResourceDescriptorInfoEXT* pResources, const VkHostAddressRangeEXT* pDescriptors); +typedef void (VKAPI_PTR *PFN_vkCmdBindSamplerHeapEXT)(VkCommandBuffer commandBuffer, const VkBindHeapInfoEXT* pBindInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindResourceHeapEXT)(VkCommandBuffer commandBuffer, const VkBindHeapInfoEXT* pBindInfo); +typedef void (VKAPI_PTR *PFN_vkCmdPushDataEXT)(VkCommandBuffer commandBuffer, const VkPushDataInfoEXT* pPushDataInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetImageOpaqueCaptureDataEXT)(VkDevice device, uint32_t imageCount, const VkImage* pImages, VkHostAddressRangeEXT* pDatas); +typedef VkDeviceSize (VKAPI_PTR *PFN_vkGetPhysicalDeviceDescriptorSizeEXT)(VkPhysicalDevice physicalDevice, VkDescriptorType descriptorType); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterCustomBorderColorEXT)(VkDevice device, const VkSamplerCustomBorderColorCreateInfoEXT* pBorderColor, VkBool32 requestIndex, uint32_t* pIndex); +typedef void (VKAPI_PTR *PFN_vkUnregisterCustomBorderColorEXT)(VkDevice device, uint32_t index); +typedef VkResult (VKAPI_PTR *PFN_vkGetTensorOpaqueCaptureDataARM)(VkDevice device, uint32_t tensorCount, const VkTensorARM* pTensors, VkHostAddressRangeEXT* pDatas); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkWriteSamplerDescriptorsEXT( + VkDevice device, + uint32_t samplerCount, + const VkSamplerCreateInfo* pSamplers, + const VkHostAddressRangeEXT* pDescriptors); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkWriteResourceDescriptorsEXT( + VkDevice device, + uint32_t resourceCount, + const VkResourceDescriptorInfoEXT* pResources, + const VkHostAddressRangeEXT* pDescriptors); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindSamplerHeapEXT( + VkCommandBuffer commandBuffer, + const VkBindHeapInfoEXT* pBindInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindResourceHeapEXT( + VkCommandBuffer commandBuffer, + const VkBindHeapInfoEXT* pBindInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDataEXT( + VkCommandBuffer commandBuffer, + const VkPushDataInfoEXT* pPushDataInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageOpaqueCaptureDataEXT( + VkDevice device, + uint32_t imageCount, + const VkImage* pImages, + VkHostAddressRangeEXT* pDatas); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkDeviceSize VKAPI_CALL vkGetPhysicalDeviceDescriptorSizeEXT( + VkPhysicalDevice physicalDevice, + VkDescriptorType descriptorType); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterCustomBorderColorEXT( + VkDevice device, + const VkSamplerCustomBorderColorCreateInfoEXT* pBorderColor, + VkBool32 requestIndex, + uint32_t* pIndex); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkUnregisterCustomBorderColorEXT( + VkDevice device, + uint32_t index); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetTensorOpaqueCaptureDataARM( + VkDevice device, + uint32_t tensorCount, + const VkTensorARM* pTensors, + VkHostAddressRangeEXT* pDatas); +#endif +#endif + + +// VK_AMD_mixed_attachment_samples is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_mixed_attachment_samples 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" +typedef struct VkAttachmentSampleCountInfoAMD { + VkStructureType sType; + const void* pNext; + uint32_t colorAttachmentCount; + const VkSampleCountFlagBits* pColorAttachmentSamples; + VkSampleCountFlagBits depthStencilAttachmentSamples; +} VkAttachmentSampleCountInfoAMD; + + + +// VK_AMD_shader_fragment_mask is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_fragment_mask 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" + + +// VK_EXT_inline_uniform_block is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_inline_uniform_block 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME "VK_EXT_inline_uniform_block" +typedef VkPhysicalDeviceInlineUniformBlockFeatures VkPhysicalDeviceInlineUniformBlockFeaturesEXT; + +typedef VkPhysicalDeviceInlineUniformBlockProperties VkPhysicalDeviceInlineUniformBlockPropertiesEXT; + +typedef VkWriteDescriptorSetInlineUniformBlock VkWriteDescriptorSetInlineUniformBlockEXT; + +typedef VkDescriptorPoolInlineUniformBlockCreateInfo VkDescriptorPoolInlineUniformBlockCreateInfoEXT; + + + +// VK_EXT_shader_stencil_export is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_stencil_export 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" + + +// VK_EXT_sample_locations is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_sample_locations 1 +#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 +#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" +typedef struct VkSampleLocationEXT { + float x; + float y; +} VkSampleLocationEXT; + +typedef struct VkSampleLocationsInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampleCountFlagBits sampleLocationsPerPixel; + VkExtent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const VkSampleLocationEXT* pSampleLocations; +} VkSampleLocationsInfoEXT; + +typedef struct VkAttachmentSampleLocationsEXT { + uint32_t attachmentIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkAttachmentSampleLocationsEXT; + +typedef struct VkSubpassSampleLocationsEXT { + uint32_t subpassIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkSubpassSampleLocationsEXT; + +typedef struct VkRenderPassSampleLocationsBeginInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentInitialSampleLocationsCount; + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations; +} VkRenderPassSampleLocationsBeginInfoEXT; + +typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 sampleLocationsEnable; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkPipelineSampleLocationsStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleLocationSampleCounts; + VkExtent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + VkBool32 variableSampleLocations; +} VkPhysicalDeviceSampleLocationsPropertiesEXT; + +typedef struct VkMultisamplePropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D maxSampleLocationGridSize; +} VkMultisamplePropertiesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties); +#endif +#endif + + +// VK_EXT_blend_operation_advanced is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_blend_operation_advanced 1 +#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 +#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" + +typedef enum VkBlendOverlapEXT { + VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, + VK_BLEND_OVERLAP_DISJOINT_EXT = 1, + VK_BLEND_OVERLAP_CONJOINT_EXT = 2, + VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBlendOverlapEXT; +typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 advancedBlendCoherentOperations; +} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t advancedBlendMaxColorAttachments; + VkBool32 advancedBlendIndependentBlend; + VkBool32 advancedBlendNonPremultipliedSrcColor; + VkBool32 advancedBlendNonPremultipliedDstColor; + VkBool32 advancedBlendCorrelatedOverlap; + VkBool32 advancedBlendAllOperations; +} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; +} VkPipelineColorBlendAdvancedStateCreateInfoEXT; + + + +// VK_NV_fragment_coverage_to_color is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_fragment_coverage_to_color 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" +typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; +typedef struct VkPipelineCoverageToColorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageToColorStateCreateFlagsNV flags; + VkBool32 coverageToColorEnable; + uint32_t coverageToColorLocation; +} VkPipelineCoverageToColorStateCreateInfoNV; + + + +// VK_NV_framebuffer_mixed_samples is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_framebuffer_mixed_samples 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" + +typedef enum VkCoverageModulationModeNV { + VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, + VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, + VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, + VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, + VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageModulationModeNV; +typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; +typedef struct VkPipelineCoverageModulationStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageModulationStateCreateFlagsNV flags; + VkCoverageModulationModeNV coverageModulationMode; + VkBool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; +} VkPipelineCoverageModulationStateCreateInfoNV; + +typedef VkAttachmentSampleCountInfoAMD VkAttachmentSampleCountInfoNV; + + + +// VK_NV_fill_rectangle is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_fill_rectangle 1 +#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 +#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" + + +// VK_NV_shader_sm_builtins is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_shader_sm_builtins 1 +#define VK_NV_SHADER_SM_BUILTINS_SPEC_VERSION 1 +#define VK_NV_SHADER_SM_BUILTINS_EXTENSION_NAME "VK_NV_shader_sm_builtins" +typedef struct VkPhysicalDeviceShaderSMBuiltinsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderSMCount; + uint32_t shaderWarpsPerSM; +} VkPhysicalDeviceShaderSMBuiltinsPropertiesNV; + +typedef struct VkPhysicalDeviceShaderSMBuiltinsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shaderSMBuiltins; +} VkPhysicalDeviceShaderSMBuiltinsFeaturesNV; + + + +// VK_EXT_post_depth_coverage is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_post_depth_coverage 1 +#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 +#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" + + +// VK_EXT_image_drm_format_modifier is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_image_drm_format_modifier 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 2 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME "VK_EXT_image_drm_format_modifier" +typedef struct VkDrmFormatModifierPropertiesEXT { + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + VkFormatFeatureFlags drmFormatModifierTilingFeatures; +} VkDrmFormatModifierPropertiesEXT; + +typedef struct VkDrmFormatModifierPropertiesListEXT { + VkStructureType sType; + void* pNext; + uint32_t drmFormatModifierCount; + VkDrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties; +} VkDrmFormatModifierPropertiesListEXT; + +typedef struct VkPhysicalDeviceImageDrmFormatModifierInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkPhysicalDeviceImageDrmFormatModifierInfoEXT; + +typedef struct VkImageDrmFormatModifierListCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t drmFormatModifierCount; + const uint64_t* pDrmFormatModifiers; +} VkImageDrmFormatModifierListCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierExplicitCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + const VkSubresourceLayout* pPlaneLayouts; +} VkImageDrmFormatModifierExplicitCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierPropertiesEXT { + VkStructureType sType; + void* pNext; + uint64_t drmFormatModifier; +} VkImageDrmFormatModifierPropertiesEXT; + +typedef struct VkDrmFormatModifierProperties2EXT { + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + VkFormatFeatureFlags2 drmFormatModifierTilingFeatures; +} VkDrmFormatModifierProperties2EXT; + +typedef struct VkDrmFormatModifierPropertiesList2EXT { + VkStructureType sType; + void* pNext; + uint32_t drmFormatModifierCount; + VkDrmFormatModifierProperties2EXT* pDrmFormatModifierProperties; +} VkDrmFormatModifierPropertiesList2EXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetImageDrmFormatModifierPropertiesEXT)(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageDrmFormatModifierPropertiesEXT( + VkDevice device, + VkImage image, + VkImageDrmFormatModifierPropertiesEXT* pProperties); +#endif +#endif + + +// VK_EXT_validation_cache is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_validation_cache 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) +#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" + +typedef enum VkValidationCacheHeaderVersionEXT { + VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, + VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCacheHeaderVersionEXT; +typedef VkFlags VkValidationCacheCreateFlagsEXT; +typedef struct VkValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; +} VkValidationCacheCreateInfoEXT; + +typedef struct VkShaderModuleValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheEXT validationCache; +} VkShaderModuleValidationCacheCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); +typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData); +#endif +#endif + + +// VK_EXT_descriptor_indexing is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_descriptor_indexing 1 +#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2 +#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing" +typedef VkDescriptorBindingFlagBits VkDescriptorBindingFlagBitsEXT; + +typedef VkDescriptorBindingFlags VkDescriptorBindingFlagsEXT; + +typedef VkDescriptorSetLayoutBindingFlagsCreateInfo VkDescriptorSetLayoutBindingFlagsCreateInfoEXT; + +typedef VkPhysicalDeviceDescriptorIndexingFeatures VkPhysicalDeviceDescriptorIndexingFeaturesEXT; + +typedef VkPhysicalDeviceDescriptorIndexingProperties VkPhysicalDeviceDescriptorIndexingPropertiesEXT; + +typedef VkDescriptorSetVariableDescriptorCountAllocateInfo VkDescriptorSetVariableDescriptorCountAllocateInfoEXT; + +typedef VkDescriptorSetVariableDescriptorCountLayoutSupport VkDescriptorSetVariableDescriptorCountLayoutSupportEXT; + + + +// VK_EXT_shader_viewport_index_layer is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_viewport_index_layer 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" + + +// VK_NV_shading_rate_image is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_shading_rate_image 1 +#define VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION 3 +#define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME "VK_NV_shading_rate_image" + +typedef enum VkShadingRatePaletteEntryNV { + VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV = 0, + VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV = 1, + VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV = 2, + VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV = 3, + VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV = 4, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV = 5, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV = 6, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV = 7, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV = 8, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV = 10, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV = 11, + VK_SHADING_RATE_PALETTE_ENTRY_MAX_ENUM_NV = 0x7FFFFFFF +} VkShadingRatePaletteEntryNV; + +typedef enum VkCoarseSampleOrderTypeNV { + VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV = 0, + VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV = 1, + VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV = 2, + VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3, + VK_COARSE_SAMPLE_ORDER_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoarseSampleOrderTypeNV; +typedef struct VkShadingRatePaletteNV { + uint32_t shadingRatePaletteEntryCount; + const VkShadingRatePaletteEntryNV* pShadingRatePaletteEntries; +} VkShadingRatePaletteNV; + +typedef struct VkPipelineViewportShadingRateImageStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 shadingRateImageEnable; + uint32_t viewportCount; + const VkShadingRatePaletteNV* pShadingRatePalettes; +} VkPipelineViewportShadingRateImageStateCreateInfoNV; + +typedef struct VkPhysicalDeviceShadingRateImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shadingRateImage; + VkBool32 shadingRateCoarseSampleOrder; +} VkPhysicalDeviceShadingRateImageFeaturesNV; + +typedef struct VkPhysicalDeviceShadingRateImagePropertiesNV { + VkStructureType sType; + void* pNext; + VkExtent2D shadingRateTexelSize; + uint32_t shadingRatePaletteSize; + uint32_t shadingRateMaxCoarseSamples; +} VkPhysicalDeviceShadingRateImagePropertiesNV; + +typedef struct VkCoarseSampleLocationNV { + uint32_t pixelX; + uint32_t pixelY; + uint32_t sample; +} VkCoarseSampleLocationNV; + +typedef struct VkCoarseSampleOrderCustomNV { + VkShadingRatePaletteEntryNV shadingRate; + uint32_t sampleCount; + uint32_t sampleLocationCount; + const VkCoarseSampleLocationNV* pSampleLocations; +} VkCoarseSampleOrderCustomNV; + +typedef struct VkPipelineViewportCoarseSampleOrderStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkCoarseSampleOrderTypeNV sampleOrderType; + uint32_t customSampleOrderCount; + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders; +} VkPipelineViewportCoarseSampleOrderStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdBindShadingRateImageNV)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportShadingRatePaletteNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoarseSampleOrderNV)(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindShadingRateImageNV( + VkCommandBuffer commandBuffer, + VkImageView imageView, + VkImageLayout imageLayout); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportShadingRatePaletteNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkShadingRatePaletteNV* pShadingRatePalettes); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV( + VkCommandBuffer commandBuffer, + VkCoarseSampleOrderTypeNV sampleOrderType, + uint32_t customSampleOrderCount, + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); +#endif +#endif + + +// VK_NV_ray_tracing is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_ray_tracing 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) +#define VK_NV_RAY_TRACING_SPEC_VERSION 3 +#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" +#define VK_SHADER_UNUSED_KHR (~0U) +#define VK_SHADER_UNUSED_NV VK_SHADER_UNUSED_KHR + +typedef enum VkRayTracingShaderGroupTypeKHR { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR = 2, + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkRayTracingShaderGroupTypeKHR; +typedef VkRayTracingShaderGroupTypeKHR VkRayTracingShaderGroupTypeNV; + + +typedef enum VkGeometryTypeKHR { + VK_GEOMETRY_TYPE_TRIANGLES_KHR = 0, + VK_GEOMETRY_TYPE_AABBS_KHR = 1, + VK_GEOMETRY_TYPE_INSTANCES_KHR = 2, + VK_GEOMETRY_TYPE_SPHERES_NV = 1000429004, + VK_GEOMETRY_TYPE_LINEAR_SWEPT_SPHERES_NV = 1000429005, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_GEOMETRY_TYPE_DENSE_GEOMETRY_FORMAT_TRIANGLES_AMDX = 1000478000, +#endif + VK_GEOMETRY_TYPE_TRIANGLES_NV = VK_GEOMETRY_TYPE_TRIANGLES_KHR, + VK_GEOMETRY_TYPE_AABBS_NV = VK_GEOMETRY_TYPE_AABBS_KHR, + VK_GEOMETRY_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryTypeKHR; +typedef VkGeometryTypeKHR VkGeometryTypeNV; + + +typedef enum VkAccelerationStructureTypeKHR { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR = 1, + VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR = 2, + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureTypeKHR; +typedef VkAccelerationStructureTypeKHR VkAccelerationStructureTypeNV; + + +typedef enum VkCopyAccelerationStructureModeKHR { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR = 1, + VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR = 2, + VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR = 3, + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR, + VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCopyAccelerationStructureModeKHR; +typedef VkCopyAccelerationStructureModeKHR VkCopyAccelerationStructureModeNV; + + +typedef enum VkAccelerationStructureMemoryRequirementsTypeNV { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureMemoryRequirementsTypeNV; + +typedef enum VkGeometryFlagBitsKHR { + VK_GEOMETRY_OPAQUE_BIT_KHR = 0x00000001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR = 0x00000002, + VK_GEOMETRY_OPAQUE_BIT_NV = VK_GEOMETRY_OPAQUE_BIT_KHR, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR, + VK_GEOMETRY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryFlagBitsKHR; +typedef VkFlags VkGeometryFlagsKHR; +typedef VkGeometryFlagsKHR VkGeometryFlagsNV; + +typedef VkGeometryFlagBitsKHR VkGeometryFlagBitsNV; + + +typedef enum VkGeometryInstanceFlagBitsKHR { + VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR = 0x00000001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FLIP_FACING_BIT_KHR = 0x00000002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR = 0x00000004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR = 0x00000008, + VK_GEOMETRY_INSTANCE_FORCE_OPACITY_MICROMAP_2_STATE_BIT_EXT = 0x00000010, + VK_GEOMETRY_INSTANCE_DISABLE_OPACITY_MICROMAPS_BIT_EXT = 0x00000020, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = VK_GEOMETRY_INSTANCE_TRIANGLE_FLIP_FACING_BIT_KHR, + VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR, + // VK_GEOMETRY_INSTANCE_FORCE_OPACITY_MICROMAP_2_STATE_EXT is a legacy alias + VK_GEOMETRY_INSTANCE_FORCE_OPACITY_MICROMAP_2_STATE_EXT = VK_GEOMETRY_INSTANCE_FORCE_OPACITY_MICROMAP_2_STATE_BIT_EXT, + // VK_GEOMETRY_INSTANCE_DISABLE_OPACITY_MICROMAPS_EXT is a legacy alias + VK_GEOMETRY_INSTANCE_DISABLE_OPACITY_MICROMAPS_EXT = VK_GEOMETRY_INSTANCE_DISABLE_OPACITY_MICROMAPS_BIT_EXT, + VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryInstanceFlagBitsKHR; +typedef VkFlags VkGeometryInstanceFlagsKHR; +typedef VkGeometryInstanceFlagsKHR VkGeometryInstanceFlagsNV; + +typedef VkGeometryInstanceFlagBitsKHR VkGeometryInstanceFlagBitsNV; + + +typedef enum VkBuildAccelerationStructureFlagBitsKHR { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR = 0x00000001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR = 0x00000002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR = 0x00000004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR = 0x00000008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR = 0x00000010, + VK_BUILD_ACCELERATION_STRUCTURE_MOTION_BIT_NV = 0x00000020, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_UPDATE_BIT_EXT = 0x00000040, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISABLE_OPACITY_MICROMAPS_BIT_EXT = 0x00000080, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_DATA_UPDATE_BIT_EXT = 0x00000100, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISPLACEMENT_MICROMAP_UPDATE_BIT_NV = 0x00000200, +#endif + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DATA_ACCESS_BIT_KHR = 0x00000800, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_CLUSTER_OPACITY_MICROMAPS_BIT_NV = 0x00001000, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR, + // VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_UPDATE_EXT is a legacy alias + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_UPDATE_EXT = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_UPDATE_BIT_EXT, + // VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISABLE_OPACITY_MICROMAPS_EXT is a legacy alias + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISABLE_OPACITY_MICROMAPS_EXT = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISABLE_OPACITY_MICROMAPS_BIT_EXT, + // VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_DATA_UPDATE_EXT is a legacy alias + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_DATA_UPDATE_EXT = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_OPACITY_MICROMAP_DATA_UPDATE_BIT_EXT, +#ifdef VK_ENABLE_BETA_EXTENSIONS + // VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISPLACEMENT_MICROMAP_UPDATE_NV is a legacy alias + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISPLACEMENT_MICROMAP_UPDATE_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DISPLACEMENT_MICROMAP_UPDATE_BIT_NV, +#endif + // VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DATA_ACCESS_KHR is a legacy alias + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DATA_ACCESS_KHR = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_DATA_ACCESS_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkBuildAccelerationStructureFlagBitsKHR; +typedef VkFlags VkBuildAccelerationStructureFlagsKHR; +typedef VkBuildAccelerationStructureFlagsKHR VkBuildAccelerationStructureFlagsNV; + +typedef VkBuildAccelerationStructureFlagBitsKHR VkBuildAccelerationStructureFlagBitsNV; + +typedef struct VkRayTracingShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; +} VkRayTracingShaderGroupCreateInfoNV; + +typedef struct VkRayTracingPipelineCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoNV* pGroups; + uint32_t maxRecursionDepth; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoNV; + +typedef struct VkGeometryTrianglesNV { + VkStructureType sType; + const void* pNext; + VkBuffer vertexData; + VkDeviceSize vertexOffset; + uint32_t vertexCount; + VkDeviceSize vertexStride; + VkFormat vertexFormat; + VkBuffer indexData; + VkDeviceSize indexOffset; + uint32_t indexCount; + VkIndexType indexType; + VkBuffer transformData; + VkDeviceSize transformOffset; +} VkGeometryTrianglesNV; + +typedef struct VkGeometryAABBNV { + VkStructureType sType; + const void* pNext; + VkBuffer aabbData; + uint32_t numAABBs; + uint32_t stride; + VkDeviceSize offset; +} VkGeometryAABBNV; + +typedef struct VkGeometryDataNV { + VkGeometryTrianglesNV triangles; + VkGeometryAABBNV aabbs; +} VkGeometryDataNV; + +typedef struct VkGeometryNV { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkGeometryDataNV geometry; + VkGeometryFlagsKHR flags; +} VkGeometryNV; + +typedef struct VkAccelerationStructureInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeNV type; + VkBuildAccelerationStructureFlagsNV flags; + uint32_t instanceCount; + uint32_t geometryCount; + const VkGeometryNV* pGeometries; +} VkAccelerationStructureInfoNV; + +typedef struct VkAccelerationStructureCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceSize compactedSize; + VkAccelerationStructureInfoNV info; +} VkAccelerationStructureCreateInfoNV; + +typedef struct VkBindAccelerationStructureMemoryInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureNV accelerationStructure; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindAccelerationStructureMemoryInfoNV; + +typedef struct VkWriteDescriptorSetAccelerationStructureNV { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureNV* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureNV; + +typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureMemoryRequirementsTypeNV type; + VkAccelerationStructureNV accelerationStructure; +} VkAccelerationStructureMemoryRequirementsInfoNV; + +typedef struct VkPhysicalDeviceRayTracingPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxTriangleCount; + uint32_t maxDescriptorSetAccelerationStructures; +} VkPhysicalDeviceRayTracingPropertiesNV; + +typedef struct VkTransformMatrixKHR { + float matrix[3][4]; +} VkTransformMatrixKHR; + +typedef VkTransformMatrixKHR VkTransformMatrixNV; + +typedef struct VkAabbPositionsKHR { + float minX; + float minY; + float minZ; + float maxX; + float maxY; + float maxZ; +} VkAabbPositionsKHR; + +typedef VkAabbPositionsKHR VkAabbPositionsNV; + +typedef struct VkAccelerationStructureInstanceKHR { + VkTransformMatrixKHR transform; + uint32_t instanceCustomIndex:24; + uint32_t mask:8; + uint32_t instanceShaderBindingTableRecordOffset:24; + VkGeometryInstanceFlagsKHR flags:8; + uint64_t accelerationStructureReference; +} VkAccelerationStructureInstanceKHR; + +typedef VkAccelerationStructureInstanceKHR VkAccelerationStructureInstanceNV; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( + VkDevice device, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureNV* pAccelerationStructure); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( + VkDevice device, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( + VkDevice device, + uint32_t bindInfoCount, + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( + VkCommandBuffer commandBuffer, + const VkAccelerationStructureInfoNV* pInfo, + VkBuffer instanceData, + VkDeviceSize instanceOffset, + VkBool32 update, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkBuffer scratch, + VkDeviceSize scratchOffset); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( + VkCommandBuffer commandBuffer, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkCopyAccelerationStructureModeKHR mode); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( + VkCommandBuffer commandBuffer, + VkBuffer raygenShaderBindingTableBuffer, + VkDeviceSize raygenShaderBindingOffset, + VkBuffer missShaderBindingTableBuffer, + VkDeviceSize missShaderBindingOffset, + VkDeviceSize missShaderBindingStride, + VkBuffer hitShaderBindingTableBuffer, + VkDeviceSize hitShaderBindingOffset, + VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + size_t dataSize, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureNV* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV( + VkDevice device, + VkPipeline pipeline, + uint32_t shader); +#endif +#endif + + +// VK_NV_representative_fragment_test is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_representative_fragment_test 1 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 2 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME "VK_NV_representative_fragment_test" +typedef struct VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 representativeFragmentTest; +} VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV; + +typedef struct VkPipelineRepresentativeFragmentTestStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 representativeFragmentTestEnable; +} VkPipelineRepresentativeFragmentTestStateCreateInfoNV; + + + +// VK_EXT_filter_cubic is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_filter_cubic 1 +#define VK_EXT_FILTER_CUBIC_SPEC_VERSION 3 +#define VK_EXT_FILTER_CUBIC_EXTENSION_NAME "VK_EXT_filter_cubic" +typedef struct VkPhysicalDeviceImageViewImageFormatInfoEXT { + VkStructureType sType; + void* pNext; + VkImageViewType imageViewType; +} VkPhysicalDeviceImageViewImageFormatInfoEXT; + +typedef struct VkFilterCubicImageViewImageFormatPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 filterCubic; + VkBool32 filterCubicMinmax; +} VkFilterCubicImageViewImageFormatPropertiesEXT; + + + +// VK_QCOM_render_pass_shader_resolve is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_render_pass_shader_resolve 1 +#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_SPEC_VERSION 4 +#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME "VK_QCOM_render_pass_shader_resolve" + + +// VK_EXT_global_priority is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_global_priority 1 +#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 +#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" +typedef VkQueueGlobalPriority VkQueueGlobalPriorityEXT; + +typedef VkDeviceQueueGlobalPriorityCreateInfo VkDeviceQueueGlobalPriorityCreateInfoEXT; + + + +// VK_EXT_external_memory_host is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_external_memory_host 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host" +typedef struct VkImportMemoryHostPointerInfoEXT { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + void* pHostPointer; +} VkImportMemoryHostPointerInfoEXT; + +typedef struct VkMemoryHostPointerPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryHostPointerPropertiesEXT; + +typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize minImportedHostPointerAlignment; +} VkPhysicalDeviceExternalMemoryHostPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + const void* pHostPointer, + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); +#endif +#endif + + +// VK_AMD_buffer_marker is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_buffer_marker 1 +#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1 +#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker" +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarker2AMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlags2 stage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarker2AMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags2 stage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); +#endif +#endif + + +// VK_AMD_pipeline_compiler_control is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_pipeline_compiler_control 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_EXTENSION_NAME "VK_AMD_pipeline_compiler_control" + +typedef enum VkPipelineCompilerControlFlagBitsAMD { + VK_PIPELINE_COMPILER_CONTROL_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkPipelineCompilerControlFlagBitsAMD; +typedef VkFlags VkPipelineCompilerControlFlagsAMD; +typedef struct VkPipelineCompilerControlCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkPipelineCompilerControlFlagsAMD compilerControlFlags; +} VkPipelineCompilerControlCreateInfoAMD; + + + +// VK_EXT_calibrated_timestamps is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_calibrated_timestamps 1 +#define VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION 2 +#define VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME "VK_EXT_calibrated_timestamps" +typedef VkTimeDomainKHR VkTimeDomainEXT; + +typedef VkCalibratedTimestampInfoKHR VkCalibratedTimestampInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainKHR* pTimeDomains); +typedef VkResult (VKAPI_PTR *PFN_vkGetCalibratedTimestampsEXT)(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoKHR* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pTimeDomainCount, + VkTimeDomainKHR* pTimeDomains); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsEXT( + VkDevice device, + uint32_t timestampCount, + const VkCalibratedTimestampInfoKHR* pTimestampInfos, + uint64_t* pTimestamps, + uint64_t* pMaxDeviation); +#endif +#endif + + +// VK_AMD_shader_core_properties is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_core_properties 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 2 +#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" +typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { + VkStructureType sType; + void* pNext; + uint32_t shaderEngineCount; + uint32_t shaderArraysPerEngineCount; + uint32_t computeUnitsPerShaderArray; + uint32_t simdPerComputeUnit; + uint32_t wavefrontsPerSimd; + uint32_t wavefrontSize; + uint32_t sgprsPerSimd; + uint32_t minSgprAllocation; + uint32_t maxSgprAllocation; + uint32_t sgprAllocationGranularity; + uint32_t vgprsPerSimd; + uint32_t minVgprAllocation; + uint32_t maxVgprAllocation; + uint32_t vgprAllocationGranularity; +} VkPhysicalDeviceShaderCorePropertiesAMD; + + + +// VK_AMD_memory_overallocation_behavior is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_memory_overallocation_behavior 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior" + +typedef enum VkMemoryOverallocationBehaviorAMD { + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF +} VkMemoryOverallocationBehaviorAMD; +typedef struct VkDeviceMemoryOverallocationCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkMemoryOverallocationBehaviorAMD overallocationBehavior; +} VkDeviceMemoryOverallocationCreateInfoAMD; + + + +// VK_EXT_vertex_attribute_divisor is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_vertex_attribute_divisor 1 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" +typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; +} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; + +typedef VkVertexInputBindingDivisorDescription VkVertexInputBindingDivisorDescriptionEXT; + +typedef VkPipelineVertexInputDivisorStateCreateInfo VkPipelineVertexInputDivisorStateCreateInfoEXT; + +typedef VkPhysicalDeviceVertexAttributeDivisorFeatures VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; + + + +// VK_EXT_pipeline_creation_feedback is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_pipeline_creation_feedback 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME "VK_EXT_pipeline_creation_feedback" +typedef VkPipelineCreationFeedbackFlagBits VkPipelineCreationFeedbackFlagBitsEXT; + +typedef VkPipelineCreationFeedbackFlags VkPipelineCreationFeedbackFlagsEXT; + +typedef VkPipelineCreationFeedbackCreateInfo VkPipelineCreationFeedbackCreateInfoEXT; + +typedef VkPipelineCreationFeedback VkPipelineCreationFeedbackEXT; + + + +// VK_NV_shader_subgroup_partitioned is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_shader_subgroup_partitioned 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned" + + +// VK_NV_compute_shader_derivatives is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_compute_shader_derivatives 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_NV_compute_shader_derivatives" +typedef VkPhysicalDeviceComputeShaderDerivativesFeaturesKHR VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; + + + +// VK_NV_mesh_shader is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_mesh_shader 1 +#define VK_NV_MESH_SHADER_SPEC_VERSION 1 +#define VK_NV_MESH_SHADER_EXTENSION_NAME "VK_NV_mesh_shader" +typedef struct VkPhysicalDeviceMeshShaderFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 taskShader; + VkBool32 meshShader; +} VkPhysicalDeviceMeshShaderFeaturesNV; + +typedef struct VkPhysicalDeviceMeshShaderPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxDrawMeshTasksCount; + uint32_t maxTaskWorkGroupInvocations; + uint32_t maxTaskWorkGroupSize[3]; + uint32_t maxTaskTotalMemorySize; + uint32_t maxTaskOutputCount; + uint32_t maxMeshWorkGroupInvocations; + uint32_t maxMeshWorkGroupSize[3]; + uint32_t maxMeshTotalMemorySize; + uint32_t maxMeshOutputVertices; + uint32_t maxMeshOutputPrimitives; + uint32_t maxMeshMultiviewViewCount; + uint32_t meshOutputPerVertexGranularity; + uint32_t meshOutputPerPrimitiveGranularity; +} VkPhysicalDeviceMeshShaderPropertiesNV; + +typedef struct VkDrawMeshTasksIndirectCommandNV { + uint32_t taskCount; + uint32_t firstTask; +} VkDrawMeshTasksIndirectCommandNV; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksNV)(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksNV( + VkCommandBuffer commandBuffer, + uint32_t taskCount, + uint32_t firstTask); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif +#endif + + +// VK_NV_fragment_shader_barycentric is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_fragment_shader_barycentric 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_NV_fragment_shader_barycentric" +typedef VkPhysicalDeviceFragmentShaderBarycentricFeaturesKHR VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV; + + + +// VK_NV_shader_image_footprint is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_shader_image_footprint 1 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 2 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME "VK_NV_shader_image_footprint" +typedef struct VkPhysicalDeviceShaderImageFootprintFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 imageFootprint; +} VkPhysicalDeviceShaderImageFootprintFeaturesNV; + + + +// VK_NV_scissor_exclusive is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_scissor_exclusive 1 +#define VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION 2 +#define VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME "VK_NV_scissor_exclusive" +typedef struct VkPipelineViewportExclusiveScissorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t exclusiveScissorCount; + const VkRect2D* pExclusiveScissors; +} VkPipelineViewportExclusiveScissorStateCreateInfoNV; + +typedef struct VkPhysicalDeviceExclusiveScissorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 exclusiveScissor; +} VkPhysicalDeviceExclusiveScissorFeaturesNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetExclusiveScissorEnableNV)(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkBool32* pExclusiveScissorEnables); +typedef void (VKAPI_PTR *PFN_vkCmdSetExclusiveScissorNV)(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetExclusiveScissorEnableNV( + VkCommandBuffer commandBuffer, + uint32_t firstExclusiveScissor, + uint32_t exclusiveScissorCount, + const VkBool32* pExclusiveScissorEnables); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetExclusiveScissorNV( + VkCommandBuffer commandBuffer, + uint32_t firstExclusiveScissor, + uint32_t exclusiveScissorCount, + const VkRect2D* pExclusiveScissors); +#endif +#endif + + +// VK_NV_device_diagnostic_checkpoints is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_device_diagnostic_checkpoints 1 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION 2 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME "VK_NV_device_diagnostic_checkpoints" +typedef struct VkQueueFamilyCheckpointPropertiesNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags checkpointExecutionStageMask; +} VkQueueFamilyCheckpointPropertiesNV; + +typedef struct VkCheckpointDataNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlagBits stage; + void* pCheckpointMarker; +} VkCheckpointDataNV; + +typedef struct VkQueueFamilyCheckpointProperties2NV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags2 checkpointExecutionStageMask; +} VkQueueFamilyCheckpointProperties2NV; + +typedef struct VkCheckpointData2NV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags2 stage; + void* pCheckpointMarker; +} VkCheckpointData2NV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetCheckpointNV)(VkCommandBuffer commandBuffer, const void* pCheckpointMarker); +typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointDataNV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData); +typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointData2NV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointData2NV* pCheckpointData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCheckpointNV( + VkCommandBuffer commandBuffer, + const void* pCheckpointMarker); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointDataNV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointDataNV* pCheckpointData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointData2NV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointData2NV* pCheckpointData); +#endif +#endif + + +// VK_EXT_present_timing is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_present_timing 1 +#define VK_EXT_PRESENT_TIMING_SPEC_VERSION 3 +#define VK_EXT_PRESENT_TIMING_EXTENSION_NAME "VK_EXT_present_timing" + +typedef enum VkPresentStageFlagBitsEXT { + VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT = 0x00000001, + VK_PRESENT_STAGE_REQUEST_DEQUEUED_BIT_EXT = 0x00000002, + VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_OUT_BIT_EXT = 0x00000004, + VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_VISIBLE_BIT_EXT = 0x00000008, + VK_PRESENT_STAGE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPresentStageFlagBitsEXT; +typedef VkFlags VkPresentStageFlagsEXT; + +typedef enum VkPastPresentationTimingFlagBitsEXT { + VK_PAST_PRESENTATION_TIMING_ALLOW_PARTIAL_RESULTS_BIT_EXT = 0x00000001, + VK_PAST_PRESENTATION_TIMING_ALLOW_OUT_OF_ORDER_RESULTS_BIT_EXT = 0x00000002, + VK_PAST_PRESENTATION_TIMING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPastPresentationTimingFlagBitsEXT; +typedef VkFlags VkPastPresentationTimingFlagsEXT; + +typedef enum VkPresentTimingInfoFlagBitsEXT { + VK_PRESENT_TIMING_INFO_PRESENT_AT_RELATIVE_TIME_BIT_EXT = 0x00000001, + VK_PRESENT_TIMING_INFO_PRESENT_AT_NEAREST_REFRESH_CYCLE_BIT_EXT = 0x00000002, + VK_PRESENT_TIMING_INFO_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPresentTimingInfoFlagBitsEXT; +typedef VkFlags VkPresentTimingInfoFlagsEXT; +typedef struct VkPhysicalDevicePresentTimingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 presentTiming; + VkBool32 presentAtAbsoluteTime; + VkBool32 presentAtRelativeTime; +} VkPhysicalDevicePresentTimingFeaturesEXT; + +typedef struct VkPresentTimingSurfaceCapabilitiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 presentTimingSupported; + VkBool32 presentAtAbsoluteTimeSupported; + VkBool32 presentAtRelativeTimeSupported; + VkPresentStageFlagsEXT presentStageQueries; +} VkPresentTimingSurfaceCapabilitiesEXT; + +typedef struct VkSwapchainCalibratedTimestampInfoEXT { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + VkPresentStageFlagsEXT presentStage; + uint64_t timeDomainId; +} VkSwapchainCalibratedTimestampInfoEXT; + +typedef struct VkSwapchainTimingPropertiesEXT { + VkStructureType sType; + void* pNext; + uint64_t refreshDuration; + uint64_t refreshInterval; +} VkSwapchainTimingPropertiesEXT; + +typedef struct VkSwapchainTimeDomainPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t timeDomainCount; + VkTimeDomainKHR* pTimeDomains; + uint64_t* pTimeDomainIds; +} VkSwapchainTimeDomainPropertiesEXT; + +typedef struct VkPastPresentationTimingInfoEXT { + VkStructureType sType; + const void* pNext; + VkPastPresentationTimingFlagsEXT flags; + VkSwapchainKHR swapchain; +} VkPastPresentationTimingInfoEXT; + +typedef struct VkPresentStageTimeEXT { + VkPresentStageFlagsEXT stage; + uint64_t time; +} VkPresentStageTimeEXT; + +typedef struct VkPastPresentationTimingEXT { + VkStructureType sType; + void* pNext; + uint64_t presentId; + uint64_t targetTime; + uint32_t presentStageCount; + VkPresentStageTimeEXT* pPresentStages; + VkTimeDomainKHR timeDomain; + uint64_t timeDomainId; + VkBool32 reportComplete; +} VkPastPresentationTimingEXT; + +typedef struct VkPastPresentationTimingPropertiesEXT { + VkStructureType sType; + void* pNext; + uint64_t timingPropertiesCounter; + uint64_t timeDomainsCounter; + uint32_t presentationTimingCount; + VkPastPresentationTimingEXT* pPresentationTimings; +} VkPastPresentationTimingPropertiesEXT; + +typedef struct VkPresentTimingInfoEXT { + VkStructureType sType; + const void* pNext; + VkPresentTimingInfoFlagsEXT flags; + uint64_t targetTime; + uint64_t timeDomainId; + VkPresentStageFlagsEXT presentStageQueries; + VkPresentStageFlagsEXT targetTimeDomainPresentStage; +} VkPresentTimingInfoEXT; + +typedef struct VkPresentTimingsInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimingInfoEXT* pTimingInfos; +} VkPresentTimingsInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkSetSwapchainPresentTimingQueueSizeEXT)(VkDevice device, VkSwapchainKHR swapchain, uint32_t size); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainTimingPropertiesEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSwapchainTimingPropertiesEXT* pSwapchainTimingProperties, uint64_t* pSwapchainTimingPropertiesCounter); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainTimeDomainPropertiesEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSwapchainTimeDomainPropertiesEXT* pSwapchainTimeDomainProperties, uint64_t* pTimeDomainsCounter); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingEXT)(VkDevice device, const VkPastPresentationTimingInfoEXT* pPastPresentationTimingInfo, VkPastPresentationTimingPropertiesEXT* pPastPresentationTimingProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetSwapchainPresentTimingQueueSizeEXT( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t size); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainTimingPropertiesEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSwapchainTimingPropertiesEXT* pSwapchainTimingProperties, + uint64_t* pSwapchainTimingPropertiesCounter); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainTimeDomainPropertiesEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSwapchainTimeDomainPropertiesEXT* pSwapchainTimeDomainProperties, + uint64_t* pTimeDomainsCounter); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingEXT( + VkDevice device, + const VkPastPresentationTimingInfoEXT* pPastPresentationTimingInfo, + VkPastPresentationTimingPropertiesEXT* pPastPresentationTimingProperties); +#endif +#endif + + +// VK_INTEL_shader_integer_functions2 is a preprocessor guard. Do not pass it to API calls. +#define VK_INTEL_shader_integer_functions2 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_SPEC_VERSION 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME "VK_INTEL_shader_integer_functions2" +typedef struct VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + VkStructureType sType; + void* pNext; + VkBool32 shaderIntegerFunctions2; +} VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + + + +// VK_INTEL_performance_query is a preprocessor guard. Do not pass it to API calls. +#define VK_INTEL_performance_query 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPerformanceConfigurationINTEL) +#define VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION 2 +#define VK_INTEL_PERFORMANCE_QUERY_EXTENSION_NAME "VK_INTEL_performance_query" + +typedef enum VkPerformanceConfigurationTypeINTEL { + VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL = 0, + VK_PERFORMANCE_CONFIGURATION_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceConfigurationTypeINTEL; + +typedef enum VkQueryPoolSamplingModeINTEL { + VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL = 0, + VK_QUERY_POOL_SAMPLING_MODE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkQueryPoolSamplingModeINTEL; + +typedef enum VkPerformanceOverrideTypeINTEL { + VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL = 0, + VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL = 1, + VK_PERFORMANCE_OVERRIDE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceOverrideTypeINTEL; + +typedef enum VkPerformanceParameterTypeINTEL { + VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL = 0, + VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL = 1, + VK_PERFORMANCE_PARAMETER_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceParameterTypeINTEL; + +typedef enum VkPerformanceValueTypeINTEL { + VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL = 0, + VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL = 1, + VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL = 2, + VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL = 3, + VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL = 4, + VK_PERFORMANCE_VALUE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceValueTypeINTEL; +typedef union VkPerformanceValueDataINTEL { + uint32_t value32; + uint64_t value64; + float valueFloat; + VkBool32 valueBool; + const char* valueString; +} VkPerformanceValueDataINTEL; + +typedef struct VkPerformanceValueINTEL { + VkPerformanceValueTypeINTEL type; + VkPerformanceValueDataINTEL data; +} VkPerformanceValueINTEL; + +typedef struct VkInitializePerformanceApiInfoINTEL { + VkStructureType sType; + const void* pNext; + void* pUserData; +} VkInitializePerformanceApiInfoINTEL; + +typedef struct VkQueryPoolPerformanceQueryCreateInfoINTEL { + VkStructureType sType; + const void* pNext; + VkQueryPoolSamplingModeINTEL performanceCountersSampling; +} VkQueryPoolPerformanceQueryCreateInfoINTEL; + +typedef VkQueryPoolPerformanceQueryCreateInfoINTEL VkQueryPoolCreateInfoINTEL; + +typedef struct VkPerformanceMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint64_t marker; +} VkPerformanceMarkerInfoINTEL; + +typedef struct VkPerformanceStreamMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint32_t marker; +} VkPerformanceStreamMarkerInfoINTEL; + +typedef struct VkPerformanceOverrideInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceOverrideTypeINTEL type; + VkBool32 enable; + uint64_t parameter; +} VkPerformanceOverrideInfoINTEL; + +typedef struct VkPerformanceConfigurationAcquireInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceConfigurationTypeINTEL type; +} VkPerformanceConfigurationAcquireInfoINTEL; + +typedef VkResult (VKAPI_PTR *PFN_vkInitializePerformanceApiINTEL)(VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); +typedef void (VKAPI_PTR *PFN_vkUninitializePerformanceApiINTEL)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceStreamMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceOverrideINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo); +typedef VkResult (VKAPI_PTR *PFN_vkAcquirePerformanceConfigurationINTEL)(VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration); +typedef VkResult (VKAPI_PTR *PFN_vkReleasePerformanceConfigurationINTEL)(VkDevice device, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSetPerformanceConfigurationINTEL)(VkQueue queue, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkGetPerformanceParameterINTEL)(VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkInitializePerformanceApiINTEL( + VkDevice device, + const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkUninitializePerformanceApiINTEL( + VkDevice device); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceMarkerInfoINTEL* pMarkerInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceStreamMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceOverrideINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceOverrideInfoINTEL* pOverrideInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquirePerformanceConfigurationINTEL( + VkDevice device, + const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, + VkPerformanceConfigurationINTEL* pConfiguration); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleasePerformanceConfigurationINTEL( + VkDevice device, + VkPerformanceConfigurationINTEL configuration); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSetPerformanceConfigurationINTEL( + VkQueue queue, + VkPerformanceConfigurationINTEL configuration); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPerformanceParameterINTEL( + VkDevice device, + VkPerformanceParameterTypeINTEL parameter, + VkPerformanceValueINTEL* pValue); +#endif +#endif + + +// VK_EXT_pci_bus_info is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_pci_bus_info 1 +#define VK_EXT_PCI_BUS_INFO_SPEC_VERSION 2 +#define VK_EXT_PCI_BUS_INFO_EXTENSION_NAME "VK_EXT_pci_bus_info" +typedef struct VkPhysicalDevicePCIBusInfoPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t pciDomain; + uint32_t pciBus; + uint32_t pciDevice; + uint32_t pciFunction; +} VkPhysicalDevicePCIBusInfoPropertiesEXT; + + + +// VK_AMD_display_native_hdr is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_display_native_hdr 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_SPEC_VERSION 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_EXTENSION_NAME "VK_AMD_display_native_hdr" +typedef struct VkDisplayNativeHdrSurfaceCapabilitiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 localDimmingSupport; +} VkDisplayNativeHdrSurfaceCapabilitiesAMD; + +typedef struct VkSwapchainDisplayNativeHdrCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkBool32 localDimmingEnable; +} VkSwapchainDisplayNativeHdrCreateInfoAMD; + +typedef void (VKAPI_PTR *PFN_vkSetLocalDimmingAMD)(VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetLocalDimmingAMD( + VkDevice device, + VkSwapchainKHR swapChain, + VkBool32 localDimmingEnable); +#endif +#endif + + +// VK_EXT_fragment_density_map is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_fragment_density_map 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 3 +#define VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME "VK_EXT_fragment_density_map" +typedef struct VkPhysicalDeviceFragmentDensityMapFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMap; + VkBool32 fragmentDensityMapDynamic; + VkBool32 fragmentDensityMapNonSubsampledImages; +} VkPhysicalDeviceFragmentDensityMapFeaturesEXT; + +typedef struct VkPhysicalDeviceFragmentDensityMapPropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentDensityTexelSize; + VkExtent2D maxFragmentDensityTexelSize; + VkBool32 fragmentDensityInvocations; +} VkPhysicalDeviceFragmentDensityMapPropertiesEXT; + +typedef struct VkRenderPassFragmentDensityMapCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkAttachmentReference fragmentDensityMapAttachment; +} VkRenderPassFragmentDensityMapCreateInfoEXT; + +typedef struct VkRenderingFragmentDensityMapAttachmentInfoEXT { + VkStructureType sType; + const void* pNext; + VkImageView imageView; + VkImageLayout imageLayout; +} VkRenderingFragmentDensityMapAttachmentInfoEXT; + + + +// VK_EXT_scalar_block_layout is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_scalar_block_layout 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME "VK_EXT_scalar_block_layout" +typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLayoutFeaturesEXT; + + + +// VK_GOOGLE_hlsl_functionality1 is a preprocessor guard. Do not pass it to API calls. +#define VK_GOOGLE_hlsl_functionality1 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1" +// VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION is a legacy alias +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION VK_GOOGLE_HLSL_FUNCTIONALITY_1_SPEC_VERSION +// VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME is a legacy alias +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME VK_GOOGLE_HLSL_FUNCTIONALITY_1_EXTENSION_NAME + + +// VK_GOOGLE_decorate_string is a preprocessor guard. Do not pass it to API calls. +#define VK_GOOGLE_decorate_string 1 +#define VK_GOOGLE_DECORATE_STRING_SPEC_VERSION 1 +#define VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME "VK_GOOGLE_decorate_string" + + +// VK_EXT_subgroup_size_control is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_subgroup_size_control 1 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME "VK_EXT_subgroup_size_control" +typedef VkPhysicalDeviceSubgroupSizeControlFeatures VkPhysicalDeviceSubgroupSizeControlFeaturesEXT; + +typedef VkPhysicalDeviceSubgroupSizeControlProperties VkPhysicalDeviceSubgroupSizeControlPropertiesEXT; + +typedef VkPipelineShaderStageRequiredSubgroupSizeCreateInfo VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + + + +// VK_AMD_shader_core_properties2 is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_core_properties2 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME "VK_AMD_shader_core_properties2" + +typedef enum VkShaderCorePropertiesFlagBitsAMD { + VK_SHADER_CORE_PROPERTIES_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderCorePropertiesFlagBitsAMD; +typedef VkFlags VkShaderCorePropertiesFlagsAMD; +typedef struct VkPhysicalDeviceShaderCoreProperties2AMD { + VkStructureType sType; + void* pNext; + VkShaderCorePropertiesFlagsAMD shaderCoreFeatures; + uint32_t activeComputeUnitCount; +} VkPhysicalDeviceShaderCoreProperties2AMD; + + + +// VK_AMD_device_coherent_memory is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_device_coherent_memory 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME "VK_AMD_device_coherent_memory" +typedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD { + VkStructureType sType; + void* pNext; + VkBool32 deviceCoherentMemory; +} VkPhysicalDeviceCoherentMemoryFeaturesAMD; + + + +// VK_EXT_shader_image_atomic_int64 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_image_atomic_int64 1 +#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME "VK_EXT_shader_image_atomic_int64" +typedef struct VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderImageInt64Atomics; + VkBool32 sparseImageInt64Atomics; +} VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + + + +// VK_EXT_memory_budget is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_memory_budget 1 +#define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1 +#define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME "VK_EXT_memory_budget" +typedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS]; + VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryBudgetPropertiesEXT; + + + +// VK_EXT_memory_priority is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_memory_priority 1 +#define VK_EXT_MEMORY_PRIORITY_SPEC_VERSION 1 +#define VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME "VK_EXT_memory_priority" +typedef struct VkPhysicalDeviceMemoryPriorityFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 memoryPriority; +} VkPhysicalDeviceMemoryPriorityFeaturesEXT; + +typedef struct VkMemoryPriorityAllocateInfoEXT { + VkStructureType sType; + const void* pNext; + float priority; +} VkMemoryPriorityAllocateInfoEXT; + + + +// VK_NV_dedicated_allocation_image_aliasing is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_dedicated_allocation_image_aliasing 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_EXTENSION_NAME "VK_NV_dedicated_allocation_image_aliasing" +typedef struct VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 dedicatedAllocationImageAliasing; +} VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + + + +// VK_EXT_buffer_device_address is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_buffer_device_address 1 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 2 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_EXT_buffer_device_address" +typedef struct VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeaturesEXT; + +typedef VkPhysicalDeviceBufferDeviceAddressFeaturesEXT VkPhysicalDeviceBufferAddressFeaturesEXT; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoEXT; + +typedef struct VkBufferDeviceAddressCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceAddress deviceAddress; +} VkBufferDeviceAddressCreateInfoEXT; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressEXT)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressEXT( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); +#endif +#endif + + +// VK_EXT_tooling_info is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_tooling_info 1 +#define VK_EXT_TOOLING_INFO_SPEC_VERSION 1 +#define VK_EXT_TOOLING_INFO_EXTENSION_NAME "VK_EXT_tooling_info" +typedef VkToolPurposeFlagBits VkToolPurposeFlagBitsEXT; + +typedef VkToolPurposeFlags VkToolPurposeFlagsEXT; + +typedef VkPhysicalDeviceToolProperties VkPhysicalDeviceToolPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolProperties* pToolProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolProperties* pToolProperties); +#endif +#endif + + +// VK_EXT_separate_stencil_usage is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_separate_stencil_usage 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME "VK_EXT_separate_stencil_usage" +typedef VkImageStencilUsageCreateInfo VkImageStencilUsageCreateInfoEXT; + + + +// VK_EXT_validation_features is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_validation_features 1 +#define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 6 +#define VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME "VK_EXT_validation_features" + +typedef enum VkValidationFeatureEnableEXT { + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT = 0, + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT = 1, + VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT = 2, + VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT = 3, + VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT = 4, + VK_VALIDATION_FEATURE_ENABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureEnableEXT; + +typedef enum VkValidationFeatureDisableEXT { + VK_VALIDATION_FEATURE_DISABLE_ALL_EXT = 0, + VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT = 1, + VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT = 2, + VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT = 3, + VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT = 4, + VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT = 5, + VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT = 6, + VK_VALIDATION_FEATURE_DISABLE_SHADER_VALIDATION_CACHE_EXT = 7, + VK_VALIDATION_FEATURE_DISABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureDisableEXT; +typedef struct VkValidationFeaturesEXT { + VkStructureType sType; + const void* pNext; + uint32_t enabledValidationFeatureCount; + const VkValidationFeatureEnableEXT* pEnabledValidationFeatures; + uint32_t disabledValidationFeatureCount; + const VkValidationFeatureDisableEXT* pDisabledValidationFeatures; +} VkValidationFeaturesEXT; + + + +// VK_NV_cooperative_matrix is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_cooperative_matrix 1 +#define VK_NV_COOPERATIVE_MATRIX_SPEC_VERSION 1 +#define VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME "VK_NV_cooperative_matrix" +typedef VkComponentTypeKHR VkComponentTypeNV; + +typedef VkScopeKHR VkScopeNV; + +typedef struct VkCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t MSize; + uint32_t NSize; + uint32_t KSize; + VkComponentTypeNV AType; + VkComponentTypeNV BType; + VkComponentTypeNV CType; + VkComponentTypeNV DType; + VkScopeNV scope; +} VkCooperativeMatrixPropertiesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cooperativeMatrix; + VkBool32 cooperativeMatrixRobustBufferAccess; +} VkPhysicalDeviceCooperativeMatrixFeaturesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + VkShaderStageFlags cooperativeMatrixSupportedStages; +} VkPhysicalDeviceCooperativeMatrixPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixPropertiesNV* pProperties); +#endif +#endif + + +// VK_NV_coverage_reduction_mode is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_coverage_reduction_mode 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_SPEC_VERSION 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME "VK_NV_coverage_reduction_mode" + +typedef enum VkCoverageReductionModeNV { + VK_COVERAGE_REDUCTION_MODE_MERGE_NV = 0, + VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV = 1, + VK_COVERAGE_REDUCTION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageReductionModeNV; +typedef VkFlags VkPipelineCoverageReductionStateCreateFlagsNV; +typedef struct VkPhysicalDeviceCoverageReductionModeFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 coverageReductionMode; +} VkPhysicalDeviceCoverageReductionModeFeaturesNV; + +typedef struct VkPipelineCoverageReductionStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageReductionStateCreateFlagsNV flags; + VkCoverageReductionModeNV coverageReductionMode; +} VkPipelineCoverageReductionStateCreateInfoNV; + +typedef struct VkFramebufferMixedSamplesCombinationNV { + VkStructureType sType; + void* pNext; + VkCoverageReductionModeNV coverageReductionMode; + VkSampleCountFlagBits rasterizationSamples; + VkSampleCountFlags depthStencilSamples; + VkSampleCountFlags colorSamples; +} VkFramebufferMixedSamplesCombinationNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)(VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + VkPhysicalDevice physicalDevice, + uint32_t* pCombinationCount, + VkFramebufferMixedSamplesCombinationNV* pCombinations); +#endif +#endif + + +// VK_EXT_fragment_shader_interlock is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_fragment_shader_interlock 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME "VK_EXT_fragment_shader_interlock" +typedef struct VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderSampleInterlock; + VkBool32 fragmentShaderPixelInterlock; + VkBool32 fragmentShaderShadingRateInterlock; +} VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT; + + + +// VK_EXT_ycbcr_image_arrays is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_ycbcr_image_arrays 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_EXTENSION_NAME "VK_EXT_ycbcr_image_arrays" +typedef struct VkPhysicalDeviceYcbcrImageArraysFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 ycbcrImageArrays; +} VkPhysicalDeviceYcbcrImageArraysFeaturesEXT; + + + +// VK_EXT_provoking_vertex is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_provoking_vertex 1 +#define VK_EXT_PROVOKING_VERTEX_SPEC_VERSION 1 +#define VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME "VK_EXT_provoking_vertex" + +typedef enum VkProvokingVertexModeEXT { + VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT = 0, + VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT = 1, + VK_PROVOKING_VERTEX_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkProvokingVertexModeEXT; +typedef struct VkPhysicalDeviceProvokingVertexFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 provokingVertexLast; + VkBool32 transformFeedbackPreservesProvokingVertex; +} VkPhysicalDeviceProvokingVertexFeaturesEXT; + +typedef struct VkPhysicalDeviceProvokingVertexPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 provokingVertexModePerPipeline; + VkBool32 transformFeedbackPreservesTriangleFanProvokingVertex; +} VkPhysicalDeviceProvokingVertexPropertiesEXT; + +typedef struct VkPipelineRasterizationProvokingVertexStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkProvokingVertexModeEXT provokingVertexMode; +} VkPipelineRasterizationProvokingVertexStateCreateInfoEXT; + + + +// VK_EXT_headless_surface is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_headless_surface 1 +#define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 1 +#define VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME "VK_EXT_headless_surface" +typedef VkFlags VkHeadlessSurfaceCreateFlagsEXT; +typedef struct VkHeadlessSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkHeadlessSurfaceCreateFlagsEXT flags; +} VkHeadlessSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateHeadlessSurfaceEXT)(VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT( + VkInstance instance, + const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif +#endif + + +// VK_EXT_line_rasterization is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_line_rasterization 1 +#define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME "VK_EXT_line_rasterization" +typedef VkLineRasterizationMode VkLineRasterizationModeEXT; + +typedef VkPhysicalDeviceLineRasterizationFeatures VkPhysicalDeviceLineRasterizationFeaturesEXT; + +typedef VkPhysicalDeviceLineRasterizationProperties VkPhysicalDeviceLineRasterizationPropertiesEXT; + +typedef VkPipelineRasterizationLineStateCreateInfo VkPipelineRasterizationLineStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern); +#endif +#endif + + +// VK_EXT_shader_atomic_float is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_atomic_float 1 +#define VK_EXT_SHADER_ATOMIC_FLOAT_SPEC_VERSION 1 +#define VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME "VK_EXT_shader_atomic_float" +typedef struct VkPhysicalDeviceShaderAtomicFloatFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferFloat32Atomics; + VkBool32 shaderBufferFloat32AtomicAdd; + VkBool32 shaderBufferFloat64Atomics; + VkBool32 shaderBufferFloat64AtomicAdd; + VkBool32 shaderSharedFloat32Atomics; + VkBool32 shaderSharedFloat32AtomicAdd; + VkBool32 shaderSharedFloat64Atomics; + VkBool32 shaderSharedFloat64AtomicAdd; + VkBool32 shaderImageFloat32Atomics; + VkBool32 shaderImageFloat32AtomicAdd; + VkBool32 sparseImageFloat32Atomics; + VkBool32 sparseImageFloat32AtomicAdd; +} VkPhysicalDeviceShaderAtomicFloatFeaturesEXT; + + + +// VK_EXT_host_query_reset is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_host_query_reset 1 +#define VK_EXT_HOST_QUERY_RESET_SPEC_VERSION 1 +#define VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME "VK_EXT_host_query_reset" +typedef VkPhysicalDeviceHostQueryResetFeatures VkPhysicalDeviceHostQueryResetFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkResetQueryPoolEXT)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); +#endif +#endif + + +// VK_EXT_index_type_uint8 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_index_type_uint8 1 +#define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1 +#define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_EXT_index_type_uint8" +typedef VkPhysicalDeviceIndexTypeUint8Features VkPhysicalDeviceIndexTypeUint8FeaturesEXT; + + + +// VK_EXT_extended_dynamic_state is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_extended_dynamic_state 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_SPEC_VERSION 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME "VK_EXT_extended_dynamic_state" +typedef struct VkPhysicalDeviceExtendedDynamicStateFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 extendedDynamicState; +} VkPhysicalDeviceExtendedDynamicStateFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetCullModeEXT)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetFrontFaceEXT)(VkCommandBuffer commandBuffer, VkFrontFace frontFace); +typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopologyEXT)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2EXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOpEXT)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilOpEXT)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCullModeEXT( + VkCommandBuffer commandBuffer, + VkCullModeFlags cullMode); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFaceEXT( + VkCommandBuffer commandBuffer, + VkFrontFace frontFace); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopologyEXT( + VkCommandBuffer commandBuffer, + VkPrimitiveTopology primitiveTopology); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t viewportCount, + const VkViewport* pViewports); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t scissorCount, + const VkRect2D* pScissors); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2EXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes, + const VkDeviceSize* pStrides); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthTestEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthWriteEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOpEXT( + VkCommandBuffer commandBuffer, + VkCompareOp depthCompareOp); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthBoundsTestEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 stencilTestEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOpEXT( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + VkStencilOp failOp, + VkStencilOp passOp, + VkStencilOp depthFailOp, + VkCompareOp compareOp); +#endif +#endif + + +// VK_EXT_host_image_copy is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_host_image_copy 1 +#define VK_EXT_HOST_IMAGE_COPY_SPEC_VERSION 1 +#define VK_EXT_HOST_IMAGE_COPY_EXTENSION_NAME "VK_EXT_host_image_copy" +typedef VkHostImageCopyFlagBits VkHostImageCopyFlagBitsEXT; + +typedef VkHostImageCopyFlags VkHostImageCopyFlagsEXT; + +typedef VkPhysicalDeviceHostImageCopyFeatures VkPhysicalDeviceHostImageCopyFeaturesEXT; + +typedef VkPhysicalDeviceHostImageCopyProperties VkPhysicalDeviceHostImageCopyPropertiesEXT; + +typedef VkMemoryToImageCopy VkMemoryToImageCopyEXT; + +typedef VkImageToMemoryCopy VkImageToMemoryCopyEXT; + +typedef VkCopyMemoryToImageInfo VkCopyMemoryToImageInfoEXT; + +typedef VkCopyImageToMemoryInfo VkCopyImageToMemoryInfoEXT; + +typedef VkCopyImageToImageInfo VkCopyImageToImageInfoEXT; + +typedef VkHostImageLayoutTransitionInfo VkHostImageLayoutTransitionInfoEXT; + +typedef VkSubresourceHostMemcpySize VkSubresourceHostMemcpySizeEXT; + +typedef VkHostImageCopyDevicePerformanceQuery VkHostImageCopyDevicePerformanceQueryEXT; + +typedef VkSubresourceLayout2 VkSubresourceLayout2EXT; + +typedef VkImageSubresource2 VkImageSubresource2EXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToImageEXT)(VkDevice device, const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToMemoryEXT)(VkDevice device, const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyImageToImageEXT)(VkDevice device, const VkCopyImageToImageInfo* pCopyImageToImageInfo); +typedef VkResult (VKAPI_PTR *PFN_vkTransitionImageLayoutEXT)(VkDevice device, uint32_t transitionCount, const VkHostImageLayoutTransitionInfo* pTransitions); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout2EXT)(VkDevice device, VkImage image, const VkImageSubresource2* pSubresource, VkSubresourceLayout2* pLayout); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToImageEXT( + VkDevice device, + const VkCopyMemoryToImageInfo* pCopyMemoryToImageInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyImageToMemoryEXT( + VkDevice device, + const VkCopyImageToMemoryInfo* pCopyImageToMemoryInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyImageToImageEXT( + VkDevice device, + const VkCopyImageToImageInfo* pCopyImageToImageInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkTransitionImageLayoutEXT( + VkDevice device, + uint32_t transitionCount, + const VkHostImageLayoutTransitionInfo* pTransitions); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout2EXT( + VkDevice device, + VkImage image, + const VkImageSubresource2* pSubresource, + VkSubresourceLayout2* pLayout); +#endif +#endif + + +// VK_EXT_map_memory_placed is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_map_memory_placed 1 +#define VK_EXT_MAP_MEMORY_PLACED_SPEC_VERSION 1 +#define VK_EXT_MAP_MEMORY_PLACED_EXTENSION_NAME "VK_EXT_map_memory_placed" +typedef struct VkPhysicalDeviceMapMemoryPlacedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 memoryMapPlaced; + VkBool32 memoryMapRangePlaced; + VkBool32 memoryUnmapReserve; +} VkPhysicalDeviceMapMemoryPlacedFeaturesEXT; + +typedef struct VkPhysicalDeviceMapMemoryPlacedPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize minPlacedMemoryMapAlignment; +} VkPhysicalDeviceMapMemoryPlacedPropertiesEXT; + +typedef struct VkMemoryMapPlacedInfoEXT { + VkStructureType sType; + const void* pNext; + void* pPlacedAddress; +} VkMemoryMapPlacedInfoEXT; + + + +// VK_EXT_shader_atomic_float2 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_atomic_float2 1 +#define VK_EXT_SHADER_ATOMIC_FLOAT_2_SPEC_VERSION 1 +#define VK_EXT_SHADER_ATOMIC_FLOAT_2_EXTENSION_NAME "VK_EXT_shader_atomic_float2" +typedef struct VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferFloat16Atomics; + VkBool32 shaderBufferFloat16AtomicAdd; + VkBool32 shaderBufferFloat16AtomicMinMax; + VkBool32 shaderBufferFloat32AtomicMinMax; + VkBool32 shaderBufferFloat64AtomicMinMax; + VkBool32 shaderSharedFloat16Atomics; + VkBool32 shaderSharedFloat16AtomicAdd; + VkBool32 shaderSharedFloat16AtomicMinMax; + VkBool32 shaderSharedFloat32AtomicMinMax; + VkBool32 shaderSharedFloat64AtomicMinMax; + VkBool32 shaderImageFloat32AtomicMinMax; + VkBool32 sparseImageFloat32AtomicMinMax; +} VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT; + + + +// VK_EXT_surface_maintenance1 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_surface_maintenance1 1 +#define VK_EXT_SURFACE_MAINTENANCE_1_SPEC_VERSION 1 +#define VK_EXT_SURFACE_MAINTENANCE_1_EXTENSION_NAME "VK_EXT_surface_maintenance1" +typedef VkPresentScalingFlagBitsKHR VkPresentScalingFlagBitsEXT; + +typedef VkPresentScalingFlagsKHR VkPresentScalingFlagsEXT; + +typedef VkPresentGravityFlagBitsKHR VkPresentGravityFlagBitsEXT; + +typedef VkPresentGravityFlagsKHR VkPresentGravityFlagsEXT; + +typedef VkSurfacePresentModeKHR VkSurfacePresentModeEXT; + +typedef VkSurfacePresentScalingCapabilitiesKHR VkSurfacePresentScalingCapabilitiesEXT; + +typedef VkSurfacePresentModeCompatibilityKHR VkSurfacePresentModeCompatibilityEXT; + + + +// VK_EXT_swapchain_maintenance1 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_swapchain_maintenance1 1 +#define VK_EXT_SWAPCHAIN_MAINTENANCE_1_SPEC_VERSION 1 +#define VK_EXT_SWAPCHAIN_MAINTENANCE_1_EXTENSION_NAME "VK_EXT_swapchain_maintenance1" +typedef VkPhysicalDeviceSwapchainMaintenance1FeaturesKHR VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT; + +typedef VkSwapchainPresentFenceInfoKHR VkSwapchainPresentFenceInfoEXT; + +typedef VkSwapchainPresentModesCreateInfoKHR VkSwapchainPresentModesCreateInfoEXT; + +typedef VkSwapchainPresentModeInfoKHR VkSwapchainPresentModeInfoEXT; + +typedef VkSwapchainPresentScalingCreateInfoKHR VkSwapchainPresentScalingCreateInfoEXT; + +typedef VkReleaseSwapchainImagesInfoKHR VkReleaseSwapchainImagesInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkReleaseSwapchainImagesEXT)(VkDevice device, const VkReleaseSwapchainImagesInfoKHR* pReleaseInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseSwapchainImagesEXT( + VkDevice device, + const VkReleaseSwapchainImagesInfoKHR* pReleaseInfo); +#endif +#endif + + +// VK_EXT_shader_demote_to_helper_invocation is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_demote_to_helper_invocation 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME "VK_EXT_shader_demote_to_helper_invocation" +typedef VkPhysicalDeviceShaderDemoteToHelperInvocationFeatures VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + + + +// VK_NV_device_generated_commands is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNV) +#define VK_NV_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 +#define VK_NV_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NV_device_generated_commands" + +typedef enum VkIndirectCommandsTokenTypeNV { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_DATA_NV = 1000135000, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_NV = 1000328000, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NV = 1000428003, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NV = 1000428004, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeNV; + +typedef enum VkIndirectStateFlagBitsNV { + VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV = 0x00000001, + VK_INDIRECT_STATE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectStateFlagBitsNV; +typedef VkFlags VkIndirectStateFlagsNV; + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsNV { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV = 0x00000004, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsNV; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNV; +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxGraphicsShaderGroupCount; + uint32_t maxIndirectSequenceCount; + uint32_t maxIndirectCommandsTokenCount; + uint32_t maxIndirectCommandsStreamCount; + uint32_t maxIndirectCommandsTokenOffset; + uint32_t maxIndirectCommandsStreamStride; + uint32_t minSequencesCountBufferOffsetAlignment; + uint32_t minSequencesIndexBufferOffsetAlignment; + uint32_t minIndirectCommandsBufferOffsetAlignment; +} VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV; + +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 deviceGeneratedCommands; +} VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV; + +typedef struct VkGraphicsShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; +} VkGraphicsShaderGroupCreateInfoNV; + +typedef struct VkGraphicsPipelineShaderGroupsCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t groupCount; + const VkGraphicsShaderGroupCreateInfoNV* pGroups; + uint32_t pipelineCount; + const VkPipeline* pPipelines; +} VkGraphicsPipelineShaderGroupsCreateInfoNV; + +typedef struct VkBindShaderGroupIndirectCommandNV { + uint32_t groupIndex; +} VkBindShaderGroupIndirectCommandNV; + +typedef struct VkBindIndexBufferIndirectCommandNV { + VkDeviceAddress bufferAddress; + uint32_t size; + VkIndexType indexType; +} VkBindIndexBufferIndirectCommandNV; + +typedef struct VkBindVertexBufferIndirectCommandNV { + VkDeviceAddress bufferAddress; + uint32_t size; + uint32_t stride; +} VkBindVertexBufferIndirectCommandNV; + +typedef struct VkSetStateFlagsIndirectCommandNV { + uint32_t data; +} VkSetStateFlagsIndirectCommandNV; + +typedef struct VkIndirectCommandsStreamNV { + VkBuffer buffer; + VkDeviceSize offset; +} VkIndirectCommandsStreamNV; + +typedef struct VkIndirectCommandsLayoutTokenNV { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsTokenTypeNV tokenType; + uint32_t stream; + uint32_t offset; + uint32_t vertexBindingUnit; + VkBool32 vertexDynamicStride; + VkPipelineLayout pushconstantPipelineLayout; + VkShaderStageFlags pushconstantShaderStageFlags; + uint32_t pushconstantOffset; + uint32_t pushconstantSize; + VkIndirectStateFlagsNV indirectStateFlags; + uint32_t indexTypeCount; + const VkIndexType* pIndexTypes; + const uint32_t* pIndexTypeValues; +} VkIndirectCommandsLayoutTokenNV; + +typedef struct VkIndirectCommandsLayoutCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsLayoutUsageFlagsNV flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenNV* pTokens; + uint32_t streamCount; + const uint32_t* pStreamStrides; +} VkIndirectCommandsLayoutCreateInfoNV; + +typedef struct VkGeneratedCommandsInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; + VkIndirectCommandsLayoutNV indirectCommandsLayout; + uint32_t streamCount; + const VkIndirectCommandsStreamNV* pStreams; + uint32_t sequencesCount; + VkBuffer preprocessBuffer; + VkDeviceSize preprocessOffset; + VkDeviceSize preprocessSize; + VkBuffer sequencesCountBuffer; + VkDeviceSize sequencesCountOffset; + VkBuffer sequencesIndexBuffer; + VkDeviceSize sequencesIndexOffset; +} VkGeneratedCommandsInfoNV; + +typedef struct VkGeneratedCommandsMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; + VkIndirectCommandsLayoutNV indirectCommandsLayout; + uint32_t maxSequencesCount; +} VkGeneratedCommandsMemoryRequirementsInfoNV; + +typedef void (VKAPI_PTR *PFN_vkGetGeneratedCommandsMemoryRequirementsNV)(VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdPreprocessGeneratedCommandsNV)(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteGeneratedCommandsNV)(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipelineShaderGroupNV)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNV)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNV)(VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetGeneratedCommandsMemoryRequirementsNV( + VkDevice device, + const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPreprocessGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + VkBool32 isPreprocessed, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipelineShaderGroupNV( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline, + uint32_t groupIndex); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNV( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNV( + VkDevice device, + VkIndirectCommandsLayoutNV indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); +#endif +#endif + + +// VK_NV_inherited_viewport_scissor is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_inherited_viewport_scissor 1 +#define VK_NV_INHERITED_VIEWPORT_SCISSOR_SPEC_VERSION 1 +#define VK_NV_INHERITED_VIEWPORT_SCISSOR_EXTENSION_NAME "VK_NV_inherited_viewport_scissor" +typedef struct VkPhysicalDeviceInheritedViewportScissorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 inheritedViewportScissor2D; +} VkPhysicalDeviceInheritedViewportScissorFeaturesNV; + +typedef struct VkCommandBufferInheritanceViewportScissorInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportScissor2D; + uint32_t viewportDepthCount; + const VkViewport* pViewportDepths; +} VkCommandBufferInheritanceViewportScissorInfoNV; + + + +// VK_EXT_texel_buffer_alignment is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_texel_buffer_alignment 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME "VK_EXT_texel_buffer_alignment" +typedef struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 texelBufferAlignment; +} VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT; + +typedef VkPhysicalDeviceTexelBufferAlignmentProperties VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT; + + + +// VK_QCOM_render_pass_transform is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_render_pass_transform 1 +#define VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION 5 +#define VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME "VK_QCOM_render_pass_transform" +typedef struct VkRenderPassTransformBeginInfoQCOM { + VkStructureType sType; + const void* pNext; + VkSurfaceTransformFlagBitsKHR transform; +} VkRenderPassTransformBeginInfoQCOM; + +typedef struct VkCommandBufferInheritanceRenderPassTransformInfoQCOM { + VkStructureType sType; + const void* pNext; + VkSurfaceTransformFlagBitsKHR transform; + VkRect2D renderArea; +} VkCommandBufferInheritanceRenderPassTransformInfoQCOM; + + + +// VK_EXT_depth_bias_control is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_depth_bias_control 1 +#define VK_EXT_DEPTH_BIAS_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DEPTH_BIAS_CONTROL_EXTENSION_NAME "VK_EXT_depth_bias_control" + +typedef enum VkDepthBiasRepresentationEXT { + VK_DEPTH_BIAS_REPRESENTATION_LEAST_REPRESENTABLE_VALUE_FORMAT_EXT = 0, + VK_DEPTH_BIAS_REPRESENTATION_LEAST_REPRESENTABLE_VALUE_FORCE_UNORM_EXT = 1, + VK_DEPTH_BIAS_REPRESENTATION_FLOAT_EXT = 2, + VK_DEPTH_BIAS_REPRESENTATION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDepthBiasRepresentationEXT; +typedef struct VkPhysicalDeviceDepthBiasControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 depthBiasControl; + VkBool32 leastRepresentableValueForceUnormRepresentation; + VkBool32 floatRepresentation; + VkBool32 depthBiasExact; +} VkPhysicalDeviceDepthBiasControlFeaturesEXT; + +typedef struct VkDepthBiasInfoEXT { + VkStructureType sType; + const void* pNext; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; +} VkDepthBiasInfoEXT; + +typedef struct VkDepthBiasRepresentationInfoEXT { + VkStructureType sType; + const void* pNext; + VkDepthBiasRepresentationEXT depthBiasRepresentation; + VkBool32 depthBiasExact; +} VkDepthBiasRepresentationInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias2EXT)(VkCommandBuffer commandBuffer, const VkDepthBiasInfoEXT* pDepthBiasInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias2EXT( + VkCommandBuffer commandBuffer, + const VkDepthBiasInfoEXT* pDepthBiasInfo); +#endif +#endif + + +// VK_EXT_device_memory_report is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_device_memory_report 1 +#define VK_EXT_DEVICE_MEMORY_REPORT_SPEC_VERSION 2 +#define VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME "VK_EXT_device_memory_report" + +typedef enum VkDeviceMemoryReportEventTypeEXT { + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT = 0, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT = 1, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT = 2, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT = 3, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT = 4, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceMemoryReportEventTypeEXT; +typedef VkFlags VkDeviceMemoryReportFlagsEXT; +typedef struct VkPhysicalDeviceDeviceMemoryReportFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 deviceMemoryReport; +} VkPhysicalDeviceDeviceMemoryReportFeaturesEXT; + +typedef struct VkDeviceMemoryReportCallbackDataEXT { + VkStructureType sType; + void* pNext; + VkDeviceMemoryReportFlagsEXT flags; + VkDeviceMemoryReportEventTypeEXT type; + uint64_t memoryObjectId; + VkDeviceSize size; + VkObjectType objectType; + uint64_t objectHandle; + uint32_t heapIndex; +} VkDeviceMemoryReportCallbackDataEXT; + +typedef void (VKAPI_PTR *PFN_vkDeviceMemoryReportCallbackEXT)( + const VkDeviceMemoryReportCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDeviceDeviceMemoryReportCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceMemoryReportFlagsEXT flags; + PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback; + void* pUserData; +} VkDeviceDeviceMemoryReportCreateInfoEXT; + + + +// VK_EXT_acquire_drm_display is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_acquire_drm_display 1 +#define VK_EXT_ACQUIRE_DRM_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_ACQUIRE_DRM_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_drm_display" +typedef VkResult (VKAPI_PTR *PFN_vkAcquireDrmDisplayEXT)(VkPhysicalDevice physicalDevice, int32_t drmFd, VkDisplayKHR display); +typedef VkResult (VKAPI_PTR *PFN_vkGetDrmDisplayEXT)(VkPhysicalDevice physicalDevice, int32_t drmFd, uint32_t connectorId, VkDisplayKHR* display); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireDrmDisplayEXT( + VkPhysicalDevice physicalDevice, + int32_t drmFd, + VkDisplayKHR display); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDrmDisplayEXT( + VkPhysicalDevice physicalDevice, + int32_t drmFd, + uint32_t connectorId, + VkDisplayKHR* display); +#endif +#endif + + +// VK_EXT_robustness2 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_robustness2 1 +#define VK_EXT_ROBUSTNESS_2_SPEC_VERSION 1 +#define VK_EXT_ROBUSTNESS_2_EXTENSION_NAME "VK_EXT_robustness2" +typedef VkPhysicalDeviceRobustness2FeaturesKHR VkPhysicalDeviceRobustness2FeaturesEXT; + +typedef VkPhysicalDeviceRobustness2PropertiesKHR VkPhysicalDeviceRobustness2PropertiesEXT; + + + +// VK_EXT_custom_border_color is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_custom_border_color 1 +#define VK_EXT_CUSTOM_BORDER_COLOR_SPEC_VERSION 12 +#define VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME "VK_EXT_custom_border_color" +typedef struct VkPhysicalDeviceCustomBorderColorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxCustomBorderColorSamplers; +} VkPhysicalDeviceCustomBorderColorPropertiesEXT; + +typedef struct VkPhysicalDeviceCustomBorderColorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 customBorderColors; + VkBool32 customBorderColorWithoutFormat; +} VkPhysicalDeviceCustomBorderColorFeaturesEXT; + + + +// VK_EXT_texture_compression_astc_3d is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_texture_compression_astc_3d 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_3D_SPEC_VERSION 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_3D_EXTENSION_NAME "VK_EXT_texture_compression_astc_3d" +typedef struct VkPhysicalDeviceTextureCompressionASTC3DFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 textureCompressionASTC_3D; +} VkPhysicalDeviceTextureCompressionASTC3DFeaturesEXT; + + + +// VK_GOOGLE_user_type is a preprocessor guard. Do not pass it to API calls. +#define VK_GOOGLE_user_type 1 +#define VK_GOOGLE_USER_TYPE_SPEC_VERSION 1 +#define VK_GOOGLE_USER_TYPE_EXTENSION_NAME "VK_GOOGLE_user_type" + + +// VK_NV_present_barrier is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_present_barrier 1 +#define VK_NV_PRESENT_BARRIER_SPEC_VERSION 1 +#define VK_NV_PRESENT_BARRIER_EXTENSION_NAME "VK_NV_present_barrier" +typedef struct VkPhysicalDevicePresentBarrierFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 presentBarrier; +} VkPhysicalDevicePresentBarrierFeaturesNV; + +typedef struct VkSurfaceCapabilitiesPresentBarrierNV { + VkStructureType sType; + void* pNext; + VkBool32 presentBarrierSupported; +} VkSurfaceCapabilitiesPresentBarrierNV; + +typedef struct VkSwapchainPresentBarrierCreateInfoNV { + VkStructureType sType; + void* pNext; + VkBool32 presentBarrierEnable; +} VkSwapchainPresentBarrierCreateInfoNV; + + + +// VK_EXT_private_data is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_private_data 1 +typedef VkPrivateDataSlot VkPrivateDataSlotEXT; + +#define VK_EXT_PRIVATE_DATA_SPEC_VERSION 1 +#define VK_EXT_PRIVATE_DATA_EXTENSION_NAME "VK_EXT_private_data" +typedef VkPrivateDataSlotCreateFlags VkPrivateDataSlotCreateFlagsEXT; + +typedef VkPhysicalDevicePrivateDataFeatures VkPhysicalDevicePrivateDataFeaturesEXT; + +typedef VkDevicePrivateDataCreateInfo VkDevicePrivateDataCreateInfoEXT; + +typedef VkPrivateDataSlotCreateInfo VkPrivateDataSlotCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlotEXT)(VkDevice device, const VkPrivateDataSlotCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlot* pPrivateDataSlot); +typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlotEXT)(VkDevice device, VkPrivateDataSlot privateDataSlot, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t data); +typedef void (VKAPI_PTR *PFN_vkGetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlot privateDataSlot, uint64_t* pData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlotEXT( + VkDevice device, + const VkPrivateDataSlotCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPrivateDataSlot* pPrivateDataSlot); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlotEXT( + VkDevice device, + VkPrivateDataSlot privateDataSlot, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlot privateDataSlot, + uint64_t data); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlot privateDataSlot, + uint64_t* pData); +#endif +#endif + + +// VK_EXT_pipeline_creation_cache_control is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_pipeline_creation_cache_control 1 +#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION 3 +#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME "VK_EXT_pipeline_creation_cache_control" +typedef VkPhysicalDevicePipelineCreationCacheControlFeatures VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT; + + + +// VK_NV_device_diagnostics_config is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_device_diagnostics_config 1 +#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION 2 +#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME "VK_NV_device_diagnostics_config" + +typedef enum VkDeviceDiagnosticsConfigFlagBitsNV { + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV = 0x00000001, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV = 0x00000002, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV = 0x00000004, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_ERROR_REPORTING_BIT_NV = 0x00000008, + VK_DEVICE_DIAGNOSTICS_CONFIG_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkDeviceDiagnosticsConfigFlagBitsNV; +typedef VkFlags VkDeviceDiagnosticsConfigFlagsNV; +typedef struct VkPhysicalDeviceDiagnosticsConfigFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 diagnosticsConfig; +} VkPhysicalDeviceDiagnosticsConfigFeaturesNV; + +typedef struct VkDeviceDiagnosticsConfigCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceDiagnosticsConfigFlagsNV flags; +} VkDeviceDiagnosticsConfigCreateInfoNV; + + + +// VK_QCOM_render_pass_store_ops is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_render_pass_store_ops 1 +#define VK_QCOM_RENDER_PASS_STORE_OPS_SPEC_VERSION 2 +#define VK_QCOM_RENDER_PASS_STORE_OPS_EXTENSION_NAME "VK_QCOM_render_pass_store_ops" + + +// VK_QCOM_tile_shading is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_tile_shading 1 +#define VK_QCOM_TILE_SHADING_SPEC_VERSION 2 +#define VK_QCOM_TILE_SHADING_EXTENSION_NAME "VK_QCOM_tile_shading" + +typedef enum VkTileShadingRenderPassFlagBitsQCOM { + VK_TILE_SHADING_RENDER_PASS_ENABLE_BIT_QCOM = 0x00000001, + VK_TILE_SHADING_RENDER_PASS_PER_TILE_EXECUTION_BIT_QCOM = 0x00000002, + VK_TILE_SHADING_RENDER_PASS_FLAG_BITS_MAX_ENUM_QCOM = 0x7FFFFFFF +} VkTileShadingRenderPassFlagBitsQCOM; +typedef VkFlags VkTileShadingRenderPassFlagsQCOM; +typedef struct VkPhysicalDeviceTileShadingFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 tileShading; + VkBool32 tileShadingFragmentStage; + VkBool32 tileShadingColorAttachments; + VkBool32 tileShadingDepthAttachments; + VkBool32 tileShadingStencilAttachments; + VkBool32 tileShadingInputAttachments; + VkBool32 tileShadingSampledAttachments; + VkBool32 tileShadingPerTileDraw; + VkBool32 tileShadingPerTileDispatch; + VkBool32 tileShadingDispatchTile; + VkBool32 tileShadingApron; + VkBool32 tileShadingAnisotropicApron; + VkBool32 tileShadingAtomicOps; + VkBool32 tileShadingImageProcessing; +} VkPhysicalDeviceTileShadingFeaturesQCOM; + +typedef struct VkPhysicalDeviceTileShadingPropertiesQCOM { + VkStructureType sType; + void* pNext; + uint32_t maxApronSize; + VkBool32 preferNonCoherent; + VkExtent2D tileGranularity; + VkExtent2D maxTileShadingRate; +} VkPhysicalDeviceTileShadingPropertiesQCOM; + +typedef struct VkRenderPassTileShadingCreateInfoQCOM { + VkStructureType sType; + const void* pNext; + VkTileShadingRenderPassFlagsQCOM flags; + VkExtent2D tileApronSize; +} VkRenderPassTileShadingCreateInfoQCOM; + +typedef struct VkPerTileBeginInfoQCOM { + VkStructureType sType; + const void* pNext; +} VkPerTileBeginInfoQCOM; + +typedef struct VkPerTileEndInfoQCOM { + VkStructureType sType; + const void* pNext; +} VkPerTileEndInfoQCOM; + +typedef struct VkDispatchTileInfoQCOM { + VkStructureType sType; + const void* pNext; +} VkDispatchTileInfoQCOM; + +typedef void (VKAPI_PTR *PFN_vkCmdDispatchTileQCOM)(VkCommandBuffer commandBuffer, const VkDispatchTileInfoQCOM* pDispatchTileInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginPerTileExecutionQCOM)(VkCommandBuffer commandBuffer, const VkPerTileBeginInfoQCOM* pPerTileBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndPerTileExecutionQCOM)(VkCommandBuffer commandBuffer, const VkPerTileEndInfoQCOM* pPerTileEndInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchTileQCOM( + VkCommandBuffer commandBuffer, + const VkDispatchTileInfoQCOM* pDispatchTileInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginPerTileExecutionQCOM( + VkCommandBuffer commandBuffer, + const VkPerTileBeginInfoQCOM* pPerTileBeginInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndPerTileExecutionQCOM( + VkCommandBuffer commandBuffer, + const VkPerTileEndInfoQCOM* pPerTileEndInfo); +#endif +#endif + + +// VK_NV_low_latency is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_low_latency 1 +#define VK_NV_LOW_LATENCY_SPEC_VERSION 1 +#define VK_NV_LOW_LATENCY_EXTENSION_NAME "VK_NV_low_latency" +typedef struct VkQueryLowLatencySupportNV { + VkStructureType sType; + const void* pNext; + void* pQueriedLowLatencyData; +} VkQueryLowLatencySupportNV; + + + +// VK_EXT_descriptor_buffer is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_descriptor_buffer 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) +#define VK_EXT_DESCRIPTOR_BUFFER_SPEC_VERSION 1 +#define VK_EXT_DESCRIPTOR_BUFFER_EXTENSION_NAME "VK_EXT_descriptor_buffer" +typedef struct VkPhysicalDeviceDescriptorBufferPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 combinedImageSamplerDescriptorSingleArray; + VkBool32 bufferlessPushDescriptors; + VkBool32 allowSamplerImageViewPostSubmitCreation; + VkDeviceSize descriptorBufferOffsetAlignment; + uint32_t maxDescriptorBufferBindings; + uint32_t maxResourceDescriptorBufferBindings; + uint32_t maxSamplerDescriptorBufferBindings; + uint32_t maxEmbeddedImmutableSamplerBindings; + uint32_t maxEmbeddedImmutableSamplers; + size_t bufferCaptureReplayDescriptorDataSize; + size_t imageCaptureReplayDescriptorDataSize; + size_t imageViewCaptureReplayDescriptorDataSize; + size_t samplerCaptureReplayDescriptorDataSize; + size_t accelerationStructureCaptureReplayDescriptorDataSize; + size_t samplerDescriptorSize; + size_t combinedImageSamplerDescriptorSize; + size_t sampledImageDescriptorSize; + size_t storageImageDescriptorSize; + size_t uniformTexelBufferDescriptorSize; + size_t robustUniformTexelBufferDescriptorSize; + size_t storageTexelBufferDescriptorSize; + size_t robustStorageTexelBufferDescriptorSize; + size_t uniformBufferDescriptorSize; + size_t robustUniformBufferDescriptorSize; + size_t storageBufferDescriptorSize; + size_t robustStorageBufferDescriptorSize; + size_t inputAttachmentDescriptorSize; + size_t accelerationStructureDescriptorSize; + VkDeviceSize maxSamplerDescriptorBufferRange; + VkDeviceSize maxResourceDescriptorBufferRange; + VkDeviceSize samplerDescriptorBufferAddressSpaceSize; + VkDeviceSize resourceDescriptorBufferAddressSpaceSize; + VkDeviceSize descriptorBufferAddressSpaceSize; +} VkPhysicalDeviceDescriptorBufferPropertiesEXT; + +typedef struct VkPhysicalDeviceDescriptorBufferDensityMapPropertiesEXT { + VkStructureType sType; + void* pNext; + size_t combinedImageSamplerDensityMapDescriptorSize; +} VkPhysicalDeviceDescriptorBufferDensityMapPropertiesEXT; + +typedef struct VkPhysicalDeviceDescriptorBufferFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 descriptorBuffer; + VkBool32 descriptorBufferCaptureReplay; + VkBool32 descriptorBufferImageLayoutIgnored; + VkBool32 descriptorBufferPushDescriptors; +} VkPhysicalDeviceDescriptorBufferFeaturesEXT; + +typedef struct VkDescriptorAddressInfoEXT { + VkStructureType sType; + void* pNext; + VkDeviceAddress address; + VkDeviceSize range; + VkFormat format; +} VkDescriptorAddressInfoEXT; + +typedef struct VkDescriptorBufferBindingInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceAddress address; + VkBufferUsageFlags usage; +} VkDescriptorBufferBindingInfoEXT; + +typedef struct VkDescriptorBufferBindingPushDescriptorBufferHandleEXT { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkDescriptorBufferBindingPushDescriptorBufferHandleEXT; + +typedef union VkDescriptorDataEXT { + const VkSampler* pSampler; + const VkDescriptorImageInfo* pCombinedImageSampler; + const VkDescriptorImageInfo* pInputAttachmentImage; + const VkDescriptorImageInfo* pSampledImage; + const VkDescriptorImageInfo* pStorageImage; + const VkDescriptorAddressInfoEXT* pUniformTexelBuffer; + const VkDescriptorAddressInfoEXT* pStorageTexelBuffer; + const VkDescriptorAddressInfoEXT* pUniformBuffer; + const VkDescriptorAddressInfoEXT* pStorageBuffer; + VkDeviceAddress accelerationStructure; +} VkDescriptorDataEXT; + +typedef struct VkDescriptorGetInfoEXT { + VkStructureType sType; + const void* pNext; + VkDescriptorType type; + VkDescriptorDataEXT data; +} VkDescriptorGetInfoEXT; + +typedef struct VkBufferCaptureDescriptorDataInfoEXT { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferCaptureDescriptorDataInfoEXT; + +typedef struct VkImageCaptureDescriptorDataInfoEXT { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageCaptureDescriptorDataInfoEXT; + +typedef struct VkImageViewCaptureDescriptorDataInfoEXT { + VkStructureType sType; + const void* pNext; + VkImageView imageView; +} VkImageViewCaptureDescriptorDataInfoEXT; + +typedef struct VkSamplerCaptureDescriptorDataInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampler sampler; +} VkSamplerCaptureDescriptorDataInfoEXT; + +typedef struct VkOpaqueCaptureDescriptorDataCreateInfoEXT { + VkStructureType sType; + const void* pNext; + const void* opaqueCaptureDescriptorData; +} VkOpaqueCaptureDescriptorDataCreateInfoEXT; + +typedef struct VkAccelerationStructureCaptureDescriptorDataInfoEXT { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR accelerationStructure; + VkAccelerationStructureNV accelerationStructureNV; +} VkAccelerationStructureCaptureDescriptorDataInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSizeEXT)(VkDevice device, VkDescriptorSetLayout layout, VkDeviceSize* pLayoutSizeInBytes); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutBindingOffsetEXT)(VkDevice device, VkDescriptorSetLayout layout, uint32_t binding, VkDeviceSize* pOffset); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorEXT)(VkDevice device, const VkDescriptorGetInfoEXT* pDescriptorInfo, size_t dataSize, void* pDescriptor); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorBuffersEXT)(VkCommandBuffer commandBuffer, uint32_t bufferCount, const VkDescriptorBufferBindingInfoEXT* pBindingInfos); +typedef void (VKAPI_PTR *PFN_vkCmdSetDescriptorBufferOffsetsEXT)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const uint32_t* pBufferIndices, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorBufferEmbeddedSamplersEXT)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set); +typedef VkResult (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureDescriptorDataEXT)(VkDevice device, const VkBufferCaptureDescriptorDataInfoEXT* pInfo, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetImageOpaqueCaptureDescriptorDataEXT)(VkDevice device, const VkImageCaptureDescriptorDataInfoEXT* pInfo, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetImageViewOpaqueCaptureDescriptorDataEXT)(VkDevice device, const VkImageViewCaptureDescriptorDataInfoEXT* pInfo, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetSamplerOpaqueCaptureDescriptorDataEXT)(VkDevice device, const VkSamplerCaptureDescriptorDataInfoEXT* pInfo, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT)(VkDevice device, const VkAccelerationStructureCaptureDescriptorDataInfoEXT* pInfo, void* pData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSizeEXT( + VkDevice device, + VkDescriptorSetLayout layout, + VkDeviceSize* pLayoutSizeInBytes); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutBindingOffsetEXT( + VkDevice device, + VkDescriptorSetLayout layout, + uint32_t binding, + VkDeviceSize* pOffset); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorEXT( + VkDevice device, + const VkDescriptorGetInfoEXT* pDescriptorInfo, + size_t dataSize, + void* pDescriptor); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorBuffersEXT( + VkCommandBuffer commandBuffer, + uint32_t bufferCount, + const VkDescriptorBufferBindingInfoEXT* pBindingInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDescriptorBufferOffsetsEXT( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t setCount, + const uint32_t* pBufferIndices, + const VkDeviceSize* pOffsets); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorBufferEmbeddedSamplersEXT( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetBufferOpaqueCaptureDescriptorDataEXT( + VkDevice device, + const VkBufferCaptureDescriptorDataInfoEXT* pInfo, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageOpaqueCaptureDescriptorDataEXT( + VkDevice device, + const VkImageCaptureDescriptorDataInfoEXT* pInfo, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageViewOpaqueCaptureDescriptorDataEXT( + VkDevice device, + const VkImageViewCaptureDescriptorDataInfoEXT* pInfo, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSamplerOpaqueCaptureDescriptorDataEXT( + VkDevice device, + const VkSamplerCaptureDescriptorDataInfoEXT* pInfo, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureOpaqueCaptureDescriptorDataEXT( + VkDevice device, + const VkAccelerationStructureCaptureDescriptorDataInfoEXT* pInfo, + void* pData); +#endif +#endif + + +// VK_EXT_graphics_pipeline_library is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_graphics_pipeline_library 1 +#define VK_EXT_GRAPHICS_PIPELINE_LIBRARY_SPEC_VERSION 1 +#define VK_EXT_GRAPHICS_PIPELINE_LIBRARY_EXTENSION_NAME "VK_EXT_graphics_pipeline_library" + +typedef enum VkGraphicsPipelineLibraryFlagBitsEXT { + VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT = 0x00000001, + VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT = 0x00000002, + VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT = 0x00000004, + VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT = 0x00000008, + VK_GRAPHICS_PIPELINE_LIBRARY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkGraphicsPipelineLibraryFlagBitsEXT; +typedef VkFlags VkGraphicsPipelineLibraryFlagsEXT; +typedef struct VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 graphicsPipelineLibrary; +} VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT; + +typedef struct VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 graphicsPipelineLibraryFastLinking; + VkBool32 graphicsPipelineLibraryIndependentInterpolationDecoration; +} VkPhysicalDeviceGraphicsPipelineLibraryPropertiesEXT; + +typedef struct VkGraphicsPipelineLibraryCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkGraphicsPipelineLibraryFlagsEXT flags; +} VkGraphicsPipelineLibraryCreateInfoEXT; + + + +// VK_AMD_shader_early_and_late_fragment_tests is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_shader_early_and_late_fragment_tests 1 +#define VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_SPEC_VERSION 1 +#define VK_AMD_SHADER_EARLY_AND_LATE_FRAGMENT_TESTS_EXTENSION_NAME "VK_AMD_shader_early_and_late_fragment_tests" +typedef struct VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD { + VkStructureType sType; + void* pNext; + VkBool32 shaderEarlyAndLateFragmentTests; +} VkPhysicalDeviceShaderEarlyAndLateFragmentTestsFeaturesAMD; + + + +// VK_NV_fragment_shading_rate_enums is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_fragment_shading_rate_enums 1 +#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME "VK_NV_fragment_shading_rate_enums" + +typedef enum VkFragmentShadingRateTypeNV { + VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV = 0, + VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV = 1, + VK_FRAGMENT_SHADING_RATE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkFragmentShadingRateTypeNV; + +typedef enum VkFragmentShadingRateNV { + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV = 0, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV = 1, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV = 4, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV = 5, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV = 6, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV = 10, + VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV = 11, + VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV = 12, + VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV = 13, + VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV = 14, + VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV = 15, + VK_FRAGMENT_SHADING_RATE_MAX_ENUM_NV = 0x7FFFFFFF +} VkFragmentShadingRateNV; +typedef struct VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShadingRateEnums; + VkBool32 supersampleFragmentShadingRates; + VkBool32 noInvocationFragmentShadingRates; +} VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + +typedef struct VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV { + VkStructureType sType; + void* pNext; + VkSampleCountFlagBits maxFragmentShadingRateInvocationCount; +} VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + +typedef struct VkPipelineFragmentShadingRateEnumStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkFragmentShadingRateTypeNV shadingRateType; + VkFragmentShadingRateNV shadingRate; + VkFragmentShadingRateCombinerOpKHR combinerOps[2]; +} VkPipelineFragmentShadingRateEnumStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateEnumNV)(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateEnumNV( + VkCommandBuffer commandBuffer, + VkFragmentShadingRateNV shadingRate, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); +#endif +#endif + + +// VK_NV_ray_tracing_motion_blur is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_ray_tracing_motion_blur 1 +#define VK_NV_RAY_TRACING_MOTION_BLUR_SPEC_VERSION 1 +#define VK_NV_RAY_TRACING_MOTION_BLUR_EXTENSION_NAME "VK_NV_ray_tracing_motion_blur" + +typedef enum VkAccelerationStructureMotionInstanceTypeNV { + VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_STATIC_NV = 0, + VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_MATRIX_MOTION_NV = 1, + VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_SRT_MOTION_NV = 2, + VK_ACCELERATION_STRUCTURE_MOTION_INSTANCE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureMotionInstanceTypeNV; +typedef VkFlags VkAccelerationStructureMotionInfoFlagsNV; +typedef VkFlags VkAccelerationStructureMotionInstanceFlagsNV; +typedef union VkDeviceOrHostAddressConstKHR { + VkDeviceAddress deviceAddress; + const void* hostAddress; +} VkDeviceOrHostAddressConstKHR; + +typedef struct VkAccelerationStructureGeometryMotionTrianglesDataNV { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR vertexData; +} VkAccelerationStructureGeometryMotionTrianglesDataNV; + +typedef struct VkAccelerationStructureMotionInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t maxInstances; + VkAccelerationStructureMotionInfoFlagsNV flags; +} VkAccelerationStructureMotionInfoNV; + +typedef struct VkAccelerationStructureMatrixMotionInstanceNV { + VkTransformMatrixKHR transformT0; + VkTransformMatrixKHR transformT1; + uint32_t instanceCustomIndex:24; + uint32_t mask:8; + uint32_t instanceShaderBindingTableRecordOffset:24; + VkGeometryInstanceFlagsKHR flags:8; + uint64_t accelerationStructureReference; +} VkAccelerationStructureMatrixMotionInstanceNV; + +typedef struct VkSRTDataNV { + float sx; + float a; + float b; + float pvx; + float sy; + float c; + float pvy; + float sz; + float pvz; + float qx; + float qy; + float qz; + float qw; + float tx; + float ty; + float tz; +} VkSRTDataNV; + +typedef struct VkAccelerationStructureSRTMotionInstanceNV { + VkSRTDataNV transformT0; + VkSRTDataNV transformT1; + uint32_t instanceCustomIndex:24; + uint32_t mask:8; + uint32_t instanceShaderBindingTableRecordOffset:24; + VkGeometryInstanceFlagsKHR flags:8; + uint64_t accelerationStructureReference; +} VkAccelerationStructureSRTMotionInstanceNV; + +typedef union VkAccelerationStructureMotionInstanceDataNV { + VkAccelerationStructureInstanceKHR staticInstance; + VkAccelerationStructureMatrixMotionInstanceNV matrixMotionInstance; + VkAccelerationStructureSRTMotionInstanceNV srtMotionInstance; +} VkAccelerationStructureMotionInstanceDataNV; + +typedef struct VkAccelerationStructureMotionInstanceNV { + VkAccelerationStructureMotionInstanceTypeNV type; + VkAccelerationStructureMotionInstanceFlagsNV flags; + VkAccelerationStructureMotionInstanceDataNV data; +} VkAccelerationStructureMotionInstanceNV; + +typedef struct VkPhysicalDeviceRayTracingMotionBlurFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingMotionBlur; + VkBool32 rayTracingMotionBlurPipelineTraceRaysIndirect; +} VkPhysicalDeviceRayTracingMotionBlurFeaturesNV; + + + +// VK_EXT_ycbcr_2plane_444_formats is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_ycbcr_2plane_444_formats 1 +#define VK_EXT_YCBCR_2PLANE_444_FORMATS_SPEC_VERSION 1 +#define VK_EXT_YCBCR_2PLANE_444_FORMATS_EXTENSION_NAME "VK_EXT_ycbcr_2plane_444_formats" +typedef struct VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 ycbcr2plane444Formats; +} VkPhysicalDeviceYcbcr2Plane444FormatsFeaturesEXT; + + + +// VK_EXT_fragment_density_map2 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_fragment_density_map2 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_2_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME "VK_EXT_fragment_density_map2" +typedef struct VkPhysicalDeviceFragmentDensityMap2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMapDeferred; +} VkPhysicalDeviceFragmentDensityMap2FeaturesEXT; + +typedef struct VkPhysicalDeviceFragmentDensityMap2PropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 subsampledLoads; + VkBool32 subsampledCoarseReconstructionEarlyAccess; + uint32_t maxSubsampledArrayLayers; + uint32_t maxDescriptorSetSubsampledSamplers; +} VkPhysicalDeviceFragmentDensityMap2PropertiesEXT; + + + +// VK_QCOM_rotated_copy_commands is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_rotated_copy_commands 1 +#define VK_QCOM_ROTATED_COPY_COMMANDS_SPEC_VERSION 2 +#define VK_QCOM_ROTATED_COPY_COMMANDS_EXTENSION_NAME "VK_QCOM_rotated_copy_commands" +typedef struct VkCopyCommandTransformInfoQCOM { + VkStructureType sType; + const void* pNext; + VkSurfaceTransformFlagBitsKHR transform; +} VkCopyCommandTransformInfoQCOM; + + + +// VK_EXT_image_robustness is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_image_robustness 1 +#define VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION 1 +#define VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_image_robustness" +typedef VkPhysicalDeviceImageRobustnessFeatures VkPhysicalDeviceImageRobustnessFeaturesEXT; + + + +// VK_EXT_image_compression_control is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_image_compression_control 1 +#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SPEC_VERSION 1 +#define VK_EXT_IMAGE_COMPRESSION_CONTROL_EXTENSION_NAME "VK_EXT_image_compression_control" + +typedef enum VkImageCompressionFlagBitsEXT { + VK_IMAGE_COMPRESSION_DEFAULT_EXT = 0, + VK_IMAGE_COMPRESSION_FIXED_RATE_DEFAULT_EXT = 0x00000001, + VK_IMAGE_COMPRESSION_FIXED_RATE_EXPLICIT_EXT = 0x00000002, + VK_IMAGE_COMPRESSION_DISABLED_EXT = 0x00000004, + VK_IMAGE_COMPRESSION_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkImageCompressionFlagBitsEXT; +typedef VkFlags VkImageCompressionFlagsEXT; + +typedef enum VkImageCompressionFixedRateFlagBitsEXT { + VK_IMAGE_COMPRESSION_FIXED_RATE_NONE_EXT = 0, + VK_IMAGE_COMPRESSION_FIXED_RATE_1BPC_BIT_EXT = 0x00000001, + VK_IMAGE_COMPRESSION_FIXED_RATE_2BPC_BIT_EXT = 0x00000002, + VK_IMAGE_COMPRESSION_FIXED_RATE_3BPC_BIT_EXT = 0x00000004, + VK_IMAGE_COMPRESSION_FIXED_RATE_4BPC_BIT_EXT = 0x00000008, + VK_IMAGE_COMPRESSION_FIXED_RATE_5BPC_BIT_EXT = 0x00000010, + VK_IMAGE_COMPRESSION_FIXED_RATE_6BPC_BIT_EXT = 0x00000020, + VK_IMAGE_COMPRESSION_FIXED_RATE_7BPC_BIT_EXT = 0x00000040, + VK_IMAGE_COMPRESSION_FIXED_RATE_8BPC_BIT_EXT = 0x00000080, + VK_IMAGE_COMPRESSION_FIXED_RATE_9BPC_BIT_EXT = 0x00000100, + VK_IMAGE_COMPRESSION_FIXED_RATE_10BPC_BIT_EXT = 0x00000200, + VK_IMAGE_COMPRESSION_FIXED_RATE_11BPC_BIT_EXT = 0x00000400, + VK_IMAGE_COMPRESSION_FIXED_RATE_12BPC_BIT_EXT = 0x00000800, + VK_IMAGE_COMPRESSION_FIXED_RATE_13BPC_BIT_EXT = 0x00001000, + VK_IMAGE_COMPRESSION_FIXED_RATE_14BPC_BIT_EXT = 0x00002000, + VK_IMAGE_COMPRESSION_FIXED_RATE_15BPC_BIT_EXT = 0x00004000, + VK_IMAGE_COMPRESSION_FIXED_RATE_16BPC_BIT_EXT = 0x00008000, + VK_IMAGE_COMPRESSION_FIXED_RATE_17BPC_BIT_EXT = 0x00010000, + VK_IMAGE_COMPRESSION_FIXED_RATE_18BPC_BIT_EXT = 0x00020000, + VK_IMAGE_COMPRESSION_FIXED_RATE_19BPC_BIT_EXT = 0x00040000, + VK_IMAGE_COMPRESSION_FIXED_RATE_20BPC_BIT_EXT = 0x00080000, + VK_IMAGE_COMPRESSION_FIXED_RATE_21BPC_BIT_EXT = 0x00100000, + VK_IMAGE_COMPRESSION_FIXED_RATE_22BPC_BIT_EXT = 0x00200000, + VK_IMAGE_COMPRESSION_FIXED_RATE_23BPC_BIT_EXT = 0x00400000, + VK_IMAGE_COMPRESSION_FIXED_RATE_24BPC_BIT_EXT = 0x00800000, + VK_IMAGE_COMPRESSION_FIXED_RATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkImageCompressionFixedRateFlagBitsEXT; +typedef VkFlags VkImageCompressionFixedRateFlagsEXT; +typedef struct VkPhysicalDeviceImageCompressionControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 imageCompressionControl; +} VkPhysicalDeviceImageCompressionControlFeaturesEXT; + +typedef struct VkImageCompressionControlEXT { + VkStructureType sType; + const void* pNext; + VkImageCompressionFlagsEXT flags; + uint32_t compressionControlPlaneCount; + VkImageCompressionFixedRateFlagsEXT* pFixedRateFlags; +} VkImageCompressionControlEXT; + +typedef struct VkImageCompressionPropertiesEXT { + VkStructureType sType; + void* pNext; + VkImageCompressionFlagsEXT imageCompressionFlags; + VkImageCompressionFixedRateFlagsEXT imageCompressionFixedRateFlags; +} VkImageCompressionPropertiesEXT; + + + +// VK_EXT_attachment_feedback_loop_layout is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_attachment_feedback_loop_layout 1 +#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_SPEC_VERSION 2 +#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_LAYOUT_EXTENSION_NAME "VK_EXT_attachment_feedback_loop_layout" +typedef struct VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 attachmentFeedbackLoopLayout; +} VkPhysicalDeviceAttachmentFeedbackLoopLayoutFeaturesEXT; + + + +// VK_EXT_4444_formats is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_4444_formats 1 +#define VK_EXT_4444_FORMATS_SPEC_VERSION 1 +#define VK_EXT_4444_FORMATS_EXTENSION_NAME "VK_EXT_4444_formats" +typedef struct VkPhysicalDevice4444FormatsFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 formatA4R4G4B4; + VkBool32 formatA4B4G4R4; +} VkPhysicalDevice4444FormatsFeaturesEXT; + + + +// VK_EXT_device_fault is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_device_fault 1 +#define VK_EXT_DEVICE_FAULT_SPEC_VERSION 2 +#define VK_EXT_DEVICE_FAULT_EXTENSION_NAME "VK_EXT_device_fault" + +typedef enum VkDeviceFaultAddressTypeEXT { + VK_DEVICE_FAULT_ADDRESS_TYPE_NONE_EXT = 0, + VK_DEVICE_FAULT_ADDRESS_TYPE_READ_INVALID_EXT = 1, + VK_DEVICE_FAULT_ADDRESS_TYPE_WRITE_INVALID_EXT = 2, + VK_DEVICE_FAULT_ADDRESS_TYPE_EXECUTE_INVALID_EXT = 3, + VK_DEVICE_FAULT_ADDRESS_TYPE_INSTRUCTION_POINTER_UNKNOWN_EXT = 4, + VK_DEVICE_FAULT_ADDRESS_TYPE_INSTRUCTION_POINTER_INVALID_EXT = 5, + VK_DEVICE_FAULT_ADDRESS_TYPE_INSTRUCTION_POINTER_FAULT_EXT = 6, + VK_DEVICE_FAULT_ADDRESS_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceFaultAddressTypeEXT; + +typedef enum VkDeviceFaultVendorBinaryHeaderVersionEXT { + VK_DEVICE_FAULT_VENDOR_BINARY_HEADER_VERSION_ONE_EXT = 1, + VK_DEVICE_FAULT_VENDOR_BINARY_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceFaultVendorBinaryHeaderVersionEXT; +typedef struct VkPhysicalDeviceFaultFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 deviceFault; + VkBool32 deviceFaultVendorBinary; +} VkPhysicalDeviceFaultFeaturesEXT; + +typedef struct VkDeviceFaultCountsEXT { + VkStructureType sType; + void* pNext; + uint32_t addressInfoCount; + uint32_t vendorInfoCount; + VkDeviceSize vendorBinarySize; +} VkDeviceFaultCountsEXT; + +typedef struct VkDeviceFaultAddressInfoEXT { + VkDeviceFaultAddressTypeEXT addressType; + VkDeviceAddress reportedAddress; + VkDeviceSize addressPrecision; +} VkDeviceFaultAddressInfoEXT; + +typedef struct VkDeviceFaultVendorInfoEXT { + char description[VK_MAX_DESCRIPTION_SIZE]; + uint64_t vendorFaultCode; + uint64_t vendorFaultData; +} VkDeviceFaultVendorInfoEXT; + +typedef struct VkDeviceFaultInfoEXT { + VkStructureType sType; + void* pNext; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkDeviceFaultAddressInfoEXT* pAddressInfos; + VkDeviceFaultVendorInfoEXT* pVendorInfos; + void* pVendorBinaryData; +} VkDeviceFaultInfoEXT; + +typedef struct VkDeviceFaultVendorBinaryHeaderVersionOneEXT { + uint32_t headerSize; + VkDeviceFaultVendorBinaryHeaderVersionEXT headerVersion; + uint32_t vendorID; + uint32_t deviceID; + uint32_t driverVersion; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + uint32_t applicationNameOffset; + uint32_t applicationVersion; + uint32_t engineNameOffset; + uint32_t engineVersion; + uint32_t apiVersion; +} VkDeviceFaultVendorBinaryHeaderVersionOneEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceFaultInfoEXT)(VkDevice device, VkDeviceFaultCountsEXT* pFaultCounts, VkDeviceFaultInfoEXT* pFaultInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceFaultInfoEXT( + VkDevice device, + VkDeviceFaultCountsEXT* pFaultCounts, + VkDeviceFaultInfoEXT* pFaultInfo); +#endif +#endif + + +// VK_ARM_rasterization_order_attachment_access is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_rasterization_order_attachment_access 1 +#define VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION 1 +#define VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME "VK_ARM_rasterization_order_attachment_access" +typedef struct VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 rasterizationOrderColorAttachmentAccess; + VkBool32 rasterizationOrderDepthAttachmentAccess; + VkBool32 rasterizationOrderStencilAttachmentAccess; +} VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT; + +typedef VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesEXT VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM; + + + +// VK_EXT_rgba10x6_formats is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_rgba10x6_formats 1 +#define VK_EXT_RGBA10X6_FORMATS_SPEC_VERSION 1 +#define VK_EXT_RGBA10X6_FORMATS_EXTENSION_NAME "VK_EXT_rgba10x6_formats" +typedef struct VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 formatRgba10x6WithoutYCbCrSampler; +} VkPhysicalDeviceRGBA10X6FormatsFeaturesEXT; + + + +// VK_VALVE_mutable_descriptor_type is a preprocessor guard. Do not pass it to API calls. +#define VK_VALVE_mutable_descriptor_type 1 +#define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION 1 +#define VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME "VK_VALVE_mutable_descriptor_type" +typedef struct VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 mutableDescriptorType; +} VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT; + +typedef VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT VkPhysicalDeviceMutableDescriptorTypeFeaturesVALVE; + +typedef struct VkMutableDescriptorTypeListEXT { + uint32_t descriptorTypeCount; + const VkDescriptorType* pDescriptorTypes; +} VkMutableDescriptorTypeListEXT; + +typedef VkMutableDescriptorTypeListEXT VkMutableDescriptorTypeListVALVE; + +typedef struct VkMutableDescriptorTypeCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t mutableDescriptorTypeListCount; + const VkMutableDescriptorTypeListEXT* pMutableDescriptorTypeLists; +} VkMutableDescriptorTypeCreateInfoEXT; + +typedef VkMutableDescriptorTypeCreateInfoEXT VkMutableDescriptorTypeCreateInfoVALVE; + + + +// VK_EXT_vertex_input_dynamic_state is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_vertex_input_dynamic_state 1 +#define VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_SPEC_VERSION 2 +#define VK_EXT_VERTEX_INPUT_DYNAMIC_STATE_EXTENSION_NAME "VK_EXT_vertex_input_dynamic_state" +typedef struct VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 vertexInputDynamicState; +} VkPhysicalDeviceVertexInputDynamicStateFeaturesEXT; + +typedef struct VkVertexInputBindingDescription2EXT { + VkStructureType sType; + void* pNext; + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; + uint32_t divisor; +} VkVertexInputBindingDescription2EXT; + +typedef struct VkVertexInputAttributeDescription2EXT { + VkStructureType sType; + void* pNext; + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription2EXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetVertexInputEXT)(VkCommandBuffer commandBuffer, uint32_t vertexBindingDescriptionCount, const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions, uint32_t vertexAttributeDescriptionCount, const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetVertexInputEXT( + VkCommandBuffer commandBuffer, + uint32_t vertexBindingDescriptionCount, + const VkVertexInputBindingDescription2EXT* pVertexBindingDescriptions, + uint32_t vertexAttributeDescriptionCount, + const VkVertexInputAttributeDescription2EXT* pVertexAttributeDescriptions); +#endif +#endif + + +// VK_EXT_physical_device_drm is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_physical_device_drm 1 +#define VK_EXT_PHYSICAL_DEVICE_DRM_SPEC_VERSION 1 +#define VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME "VK_EXT_physical_device_drm" +typedef struct VkPhysicalDeviceDrmPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 hasPrimary; + VkBool32 hasRender; + int64_t primaryMajor; + int64_t primaryMinor; + int64_t renderMajor; + int64_t renderMinor; +} VkPhysicalDeviceDrmPropertiesEXT; + + + +// VK_EXT_device_address_binding_report is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_device_address_binding_report 1 +#define VK_EXT_DEVICE_ADDRESS_BINDING_REPORT_SPEC_VERSION 1 +#define VK_EXT_DEVICE_ADDRESS_BINDING_REPORT_EXTENSION_NAME "VK_EXT_device_address_binding_report" + +typedef enum VkDeviceAddressBindingTypeEXT { + VK_DEVICE_ADDRESS_BINDING_TYPE_BIND_EXT = 0, + VK_DEVICE_ADDRESS_BINDING_TYPE_UNBIND_EXT = 1, + VK_DEVICE_ADDRESS_BINDING_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceAddressBindingTypeEXT; + +typedef enum VkDeviceAddressBindingFlagBitsEXT { + VK_DEVICE_ADDRESS_BINDING_INTERNAL_OBJECT_BIT_EXT = 0x00000001, + VK_DEVICE_ADDRESS_BINDING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceAddressBindingFlagBitsEXT; +typedef VkFlags VkDeviceAddressBindingFlagsEXT; +typedef struct VkPhysicalDeviceAddressBindingReportFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 reportAddressBinding; +} VkPhysicalDeviceAddressBindingReportFeaturesEXT; + +typedef struct VkDeviceAddressBindingCallbackDataEXT { + VkStructureType sType; + void* pNext; + VkDeviceAddressBindingFlagsEXT flags; + VkDeviceAddress baseAddress; + VkDeviceSize size; + VkDeviceAddressBindingTypeEXT bindingType; +} VkDeviceAddressBindingCallbackDataEXT; + + + +// VK_EXT_depth_clip_control is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_depth_clip_control 1 +#define VK_EXT_DEPTH_CLIP_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DEPTH_CLIP_CONTROL_EXTENSION_NAME "VK_EXT_depth_clip_control" +typedef struct VkPhysicalDeviceDepthClipControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 depthClipControl; +} VkPhysicalDeviceDepthClipControlFeaturesEXT; + +typedef struct VkPipelineViewportDepthClipControlCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 negativeOneToOne; +} VkPipelineViewportDepthClipControlCreateInfoEXT; + + + +// VK_EXT_primitive_topology_list_restart is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_primitive_topology_list_restart 1 +#define VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_SPEC_VERSION 1 +#define VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_EXTENSION_NAME "VK_EXT_primitive_topology_list_restart" +typedef struct VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 primitiveTopologyListRestart; + VkBool32 primitiveTopologyPatchListRestart; +} VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT; + + + +// VK_EXT_present_mode_fifo_latest_ready is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_present_mode_fifo_latest_ready 1 +#define VK_EXT_PRESENT_MODE_FIFO_LATEST_READY_SPEC_VERSION 1 +#define VK_EXT_PRESENT_MODE_FIFO_LATEST_READY_EXTENSION_NAME "VK_EXT_present_mode_fifo_latest_ready" +typedef VkPhysicalDevicePresentModeFifoLatestReadyFeaturesKHR VkPhysicalDevicePresentModeFifoLatestReadyFeaturesEXT; + + + +// VK_HUAWEI_subpass_shading is a preprocessor guard. Do not pass it to API calls. +#define VK_HUAWEI_subpass_shading 1 +#define VK_HUAWEI_SUBPASS_SHADING_SPEC_VERSION 3 +#define VK_HUAWEI_SUBPASS_SHADING_EXTENSION_NAME "VK_HUAWEI_subpass_shading" +typedef struct VkSubpassShadingPipelineCreateInfoHUAWEI { + VkStructureType sType; + void* pNext; + VkRenderPass renderPass; + uint32_t subpass; +} VkSubpassShadingPipelineCreateInfoHUAWEI; + +typedef struct VkPhysicalDeviceSubpassShadingFeaturesHUAWEI { + VkStructureType sType; + void* pNext; + VkBool32 subpassShading; +} VkPhysicalDeviceSubpassShadingFeaturesHUAWEI; + +typedef struct VkPhysicalDeviceSubpassShadingPropertiesHUAWEI { + VkStructureType sType; + void* pNext; + uint32_t maxSubpassShadingWorkgroupSizeAspectRatio; +} VkPhysicalDeviceSubpassShadingPropertiesHUAWEI; + +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI)(VkDevice device, VkRenderPass renderpass, VkExtent2D* pMaxWorkgroupSize); +typedef void (VKAPI_PTR *PFN_vkCmdSubpassShadingHUAWEI)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI( + VkDevice device, + VkRenderPass renderpass, + VkExtent2D* pMaxWorkgroupSize); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSubpassShadingHUAWEI( + VkCommandBuffer commandBuffer); +#endif +#endif + + +// VK_HUAWEI_invocation_mask is a preprocessor guard. Do not pass it to API calls. +#define VK_HUAWEI_invocation_mask 1 +#define VK_HUAWEI_INVOCATION_MASK_SPEC_VERSION 1 +#define VK_HUAWEI_INVOCATION_MASK_EXTENSION_NAME "VK_HUAWEI_invocation_mask" +typedef struct VkPhysicalDeviceInvocationMaskFeaturesHUAWEI { + VkStructureType sType; + void* pNext; + VkBool32 invocationMask; +} VkPhysicalDeviceInvocationMaskFeaturesHUAWEI; + +typedef void (VKAPI_PTR *PFN_vkCmdBindInvocationMaskHUAWEI)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindInvocationMaskHUAWEI( + VkCommandBuffer commandBuffer, + VkImageView imageView, + VkImageLayout imageLayout); +#endif +#endif + + +// VK_NV_external_memory_rdma is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_external_memory_rdma 1 +typedef void* VkRemoteAddressNV; +#define VK_NV_EXTERNAL_MEMORY_RDMA_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_RDMA_EXTENSION_NAME "VK_NV_external_memory_rdma" +typedef struct VkMemoryGetRemoteAddressInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetRemoteAddressInfoNV; + +typedef struct VkPhysicalDeviceExternalMemoryRDMAFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 externalMemoryRDMA; +} VkPhysicalDeviceExternalMemoryRDMAFeaturesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryRemoteAddressNV)(VkDevice device, const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo, VkRemoteAddressNV* pAddress); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryRemoteAddressNV( + VkDevice device, + const VkMemoryGetRemoteAddressInfoNV* pMemoryGetRemoteAddressInfo, + VkRemoteAddressNV* pAddress); +#endif +#endif + + +// VK_EXT_pipeline_properties is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_pipeline_properties 1 +#define VK_EXT_PIPELINE_PROPERTIES_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_PROPERTIES_EXTENSION_NAME "VK_EXT_pipeline_properties" +typedef VkPipelineInfoKHR VkPipelineInfoEXT; + +typedef struct VkPipelinePropertiesIdentifierEXT { + VkStructureType sType; + void* pNext; + uint8_t pipelineIdentifier[VK_UUID_SIZE]; +} VkPipelinePropertiesIdentifierEXT; + +typedef struct VkPhysicalDevicePipelinePropertiesFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 pipelinePropertiesIdentifier; +} VkPhysicalDevicePipelinePropertiesFeaturesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelinePropertiesEXT)(VkDevice device, const VkPipelineInfoEXT* pPipelineInfo, VkBaseOutStructure* pPipelineProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelinePropertiesEXT( + VkDevice device, + const VkPipelineInfoEXT* pPipelineInfo, + VkBaseOutStructure* pPipelineProperties); +#endif +#endif + + +// VK_EXT_frame_boundary is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_frame_boundary 1 +#define VK_EXT_FRAME_BOUNDARY_SPEC_VERSION 1 +#define VK_EXT_FRAME_BOUNDARY_EXTENSION_NAME "VK_EXT_frame_boundary" + +typedef enum VkFrameBoundaryFlagBitsEXT { + VK_FRAME_BOUNDARY_FRAME_END_BIT_EXT = 0x00000001, + VK_FRAME_BOUNDARY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkFrameBoundaryFlagBitsEXT; +typedef VkFlags VkFrameBoundaryFlagsEXT; +typedef struct VkPhysicalDeviceFrameBoundaryFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 frameBoundary; +} VkPhysicalDeviceFrameBoundaryFeaturesEXT; + +typedef struct VkFrameBoundaryEXT { + VkStructureType sType; + const void* pNext; + VkFrameBoundaryFlagsEXT flags; + uint64_t frameID; + uint32_t imageCount; + const VkImage* pImages; + uint32_t bufferCount; + const VkBuffer* pBuffers; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkFrameBoundaryEXT; + + + +// VK_EXT_multisampled_render_to_single_sampled is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_multisampled_render_to_single_sampled 1 +#define VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_SPEC_VERSION 1 +#define VK_EXT_MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_EXTENSION_NAME "VK_EXT_multisampled_render_to_single_sampled" +typedef struct VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 multisampledRenderToSingleSampled; +} VkPhysicalDeviceMultisampledRenderToSingleSampledFeaturesEXT; + +typedef struct VkSubpassResolvePerformanceQueryEXT { + VkStructureType sType; + void* pNext; + VkBool32 optimal; +} VkSubpassResolvePerformanceQueryEXT; + +typedef struct VkMultisampledRenderToSingleSampledInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 multisampledRenderToSingleSampledEnable; + VkSampleCountFlagBits rasterizationSamples; +} VkMultisampledRenderToSingleSampledInfoEXT; + + + +// VK_EXT_extended_dynamic_state2 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_extended_dynamic_state2 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_2_SPEC_VERSION 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_2_EXTENSION_NAME "VK_EXT_extended_dynamic_state2" +typedef struct VkPhysicalDeviceExtendedDynamicState2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 extendedDynamicState2; + VkBool32 extendedDynamicState2LogicOp; + VkBool32 extendedDynamicState2PatchControlPoints; +} VkPhysicalDeviceExtendedDynamicState2FeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetPatchControlPointsEXT)(VkCommandBuffer commandBuffer, uint32_t patchControlPoints); +typedef void (VKAPI_PTR *PFN_vkCmdSetRasterizerDiscardEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBiasEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetLogicOpEXT)(VkCommandBuffer commandBuffer, VkLogicOp logicOp); +typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveRestartEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetPatchControlPointsEXT( + VkCommandBuffer commandBuffer, + uint32_t patchControlPoints); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizerDiscardEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 rasterizerDiscardEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBiasEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthBiasEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLogicOpEXT( + VkCommandBuffer commandBuffer, + VkLogicOp logicOp); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveRestartEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 primitiveRestartEnable); +#endif +#endif + + +// VK_EXT_color_write_enable is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_color_write_enable 1 +#define VK_EXT_COLOR_WRITE_ENABLE_SPEC_VERSION 1 +#define VK_EXT_COLOR_WRITE_ENABLE_EXTENSION_NAME "VK_EXT_color_write_enable" +typedef struct VkPhysicalDeviceColorWriteEnableFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 colorWriteEnable; +} VkPhysicalDeviceColorWriteEnableFeaturesEXT; + +typedef struct VkPipelineColorWriteCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentCount; + const VkBool32* pColorWriteEnables; +} VkPipelineColorWriteCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetColorWriteEnableEXT)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkBool32* pColorWriteEnables); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetColorWriteEnableEXT( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkBool32* pColorWriteEnables); +#endif +#endif + + +// VK_EXT_primitives_generated_query is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_primitives_generated_query 1 +#define VK_EXT_PRIMITIVES_GENERATED_QUERY_SPEC_VERSION 1 +#define VK_EXT_PRIMITIVES_GENERATED_QUERY_EXTENSION_NAME "VK_EXT_primitives_generated_query" +typedef struct VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 primitivesGeneratedQuery; + VkBool32 primitivesGeneratedQueryWithRasterizerDiscard; + VkBool32 primitivesGeneratedQueryWithNonZeroStreams; +} VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT; + + + +// VK_EXT_global_priority_query is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_global_priority_query 1 +#define VK_EXT_GLOBAL_PRIORITY_QUERY_SPEC_VERSION 1 +#define VK_EXT_GLOBAL_PRIORITY_QUERY_EXTENSION_NAME "VK_EXT_global_priority_query" +#define VK_MAX_GLOBAL_PRIORITY_SIZE_EXT VK_MAX_GLOBAL_PRIORITY_SIZE +typedef VkPhysicalDeviceGlobalPriorityQueryFeatures VkPhysicalDeviceGlobalPriorityQueryFeaturesEXT; + +typedef VkQueueFamilyGlobalPriorityProperties VkQueueFamilyGlobalPriorityPropertiesEXT; + + + +// VK_VALVE_video_encode_rgb_conversion is a preprocessor guard. Do not pass it to API calls. +#define VK_VALVE_video_encode_rgb_conversion 1 +#define VK_VALVE_VIDEO_ENCODE_RGB_CONVERSION_SPEC_VERSION 1 +#define VK_VALVE_VIDEO_ENCODE_RGB_CONVERSION_EXTENSION_NAME "VK_VALVE_video_encode_rgb_conversion" + +typedef enum VkVideoEncodeRgbModelConversionFlagBitsVALVE { + VK_VIDEO_ENCODE_RGB_MODEL_CONVERSION_RGB_IDENTITY_BIT_VALVE = 0x00000001, + VK_VIDEO_ENCODE_RGB_MODEL_CONVERSION_YCBCR_IDENTITY_BIT_VALVE = 0x00000002, + VK_VIDEO_ENCODE_RGB_MODEL_CONVERSION_YCBCR_709_BIT_VALVE = 0x00000004, + VK_VIDEO_ENCODE_RGB_MODEL_CONVERSION_YCBCR_601_BIT_VALVE = 0x00000008, + VK_VIDEO_ENCODE_RGB_MODEL_CONVERSION_YCBCR_2020_BIT_VALVE = 0x00000010, + VK_VIDEO_ENCODE_RGB_MODEL_CONVERSION_FLAG_BITS_MAX_ENUM_VALVE = 0x7FFFFFFF +} VkVideoEncodeRgbModelConversionFlagBitsVALVE; +typedef VkFlags VkVideoEncodeRgbModelConversionFlagsVALVE; + +typedef enum VkVideoEncodeRgbRangeCompressionFlagBitsVALVE { + VK_VIDEO_ENCODE_RGB_RANGE_COMPRESSION_FULL_RANGE_BIT_VALVE = 0x00000001, + VK_VIDEO_ENCODE_RGB_RANGE_COMPRESSION_NARROW_RANGE_BIT_VALVE = 0x00000002, + VK_VIDEO_ENCODE_RGB_RANGE_COMPRESSION_FLAG_BITS_MAX_ENUM_VALVE = 0x7FFFFFFF +} VkVideoEncodeRgbRangeCompressionFlagBitsVALVE; +typedef VkFlags VkVideoEncodeRgbRangeCompressionFlagsVALVE; + +typedef enum VkVideoEncodeRgbChromaOffsetFlagBitsVALVE { + VK_VIDEO_ENCODE_RGB_CHROMA_OFFSET_COSITED_EVEN_BIT_VALVE = 0x00000001, + VK_VIDEO_ENCODE_RGB_CHROMA_OFFSET_MIDPOINT_BIT_VALVE = 0x00000002, + VK_VIDEO_ENCODE_RGB_CHROMA_OFFSET_FLAG_BITS_MAX_ENUM_VALVE = 0x7FFFFFFF +} VkVideoEncodeRgbChromaOffsetFlagBitsVALVE; +typedef VkFlags VkVideoEncodeRgbChromaOffsetFlagsVALVE; +typedef struct VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE { + VkStructureType sType; + void* pNext; + VkBool32 videoEncodeRgbConversion; +} VkPhysicalDeviceVideoEncodeRgbConversionFeaturesVALVE; + +typedef struct VkVideoEncodeRgbConversionCapabilitiesVALVE { + VkStructureType sType; + void* pNext; + VkVideoEncodeRgbModelConversionFlagsVALVE rgbModels; + VkVideoEncodeRgbRangeCompressionFlagsVALVE rgbRanges; + VkVideoEncodeRgbChromaOffsetFlagsVALVE xChromaOffsets; + VkVideoEncodeRgbChromaOffsetFlagsVALVE yChromaOffsets; +} VkVideoEncodeRgbConversionCapabilitiesVALVE; + +typedef struct VkVideoEncodeProfileRgbConversionInfoVALVE { + VkStructureType sType; + const void* pNext; + VkBool32 performEncodeRgbConversion; +} VkVideoEncodeProfileRgbConversionInfoVALVE; + +typedef struct VkVideoEncodeSessionRgbConversionCreateInfoVALVE { + VkStructureType sType; + const void* pNext; + VkVideoEncodeRgbModelConversionFlagBitsVALVE rgbModel; + VkVideoEncodeRgbRangeCompressionFlagBitsVALVE rgbRange; + VkVideoEncodeRgbChromaOffsetFlagBitsVALVE xChromaOffset; + VkVideoEncodeRgbChromaOffsetFlagBitsVALVE yChromaOffset; +} VkVideoEncodeSessionRgbConversionCreateInfoVALVE; + + + +// VK_EXT_image_view_min_lod is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_image_view_min_lod 1 +#define VK_EXT_IMAGE_VIEW_MIN_LOD_SPEC_VERSION 1 +#define VK_EXT_IMAGE_VIEW_MIN_LOD_EXTENSION_NAME "VK_EXT_image_view_min_lod" +typedef struct VkPhysicalDeviceImageViewMinLodFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 minLod; +} VkPhysicalDeviceImageViewMinLodFeaturesEXT; + +typedef struct VkImageViewMinLodCreateInfoEXT { + VkStructureType sType; + const void* pNext; + float minLod; +} VkImageViewMinLodCreateInfoEXT; + + + +// VK_EXT_multi_draw is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_multi_draw 1 +#define VK_EXT_MULTI_DRAW_SPEC_VERSION 1 +#define VK_EXT_MULTI_DRAW_EXTENSION_NAME "VK_EXT_multi_draw" +typedef struct VkPhysicalDeviceMultiDrawFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 multiDraw; +} VkPhysicalDeviceMultiDrawFeaturesEXT; + +typedef struct VkPhysicalDeviceMultiDrawPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxMultiDrawCount; +} VkPhysicalDeviceMultiDrawPropertiesEXT; + +typedef struct VkMultiDrawInfoEXT { + uint32_t firstVertex; + uint32_t vertexCount; +} VkMultiDrawInfoEXT; + +typedef struct VkMultiDrawIndexedInfoEXT { + uint32_t firstIndex; + uint32_t indexCount; + int32_t vertexOffset; +} VkMultiDrawIndexedInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawMultiEXT)(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawInfoEXT* pVertexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMultiIndexedEXT)(VkCommandBuffer commandBuffer, uint32_t drawCount, const VkMultiDrawIndexedInfoEXT* pIndexInfo, uint32_t instanceCount, uint32_t firstInstance, uint32_t stride, const int32_t* pVertexOffset); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMultiEXT( + VkCommandBuffer commandBuffer, + uint32_t drawCount, + const VkMultiDrawInfoEXT* pVertexInfo, + uint32_t instanceCount, + uint32_t firstInstance, + uint32_t stride); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMultiIndexedEXT( + VkCommandBuffer commandBuffer, + uint32_t drawCount, + const VkMultiDrawIndexedInfoEXT* pIndexInfo, + uint32_t instanceCount, + uint32_t firstInstance, + uint32_t stride, + const int32_t* pVertexOffset); +#endif +#endif + + +// VK_EXT_image_2d_view_of_3d is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_image_2d_view_of_3d 1 +#define VK_EXT_IMAGE_2D_VIEW_OF_3D_SPEC_VERSION 1 +#define VK_EXT_IMAGE_2D_VIEW_OF_3D_EXTENSION_NAME "VK_EXT_image_2d_view_of_3d" +typedef struct VkPhysicalDeviceImage2DViewOf3DFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 image2DViewOf3D; + VkBool32 sampler2DViewOf3D; +} VkPhysicalDeviceImage2DViewOf3DFeaturesEXT; + + + +// VK_EXT_shader_tile_image is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_tile_image 1 +#define VK_EXT_SHADER_TILE_IMAGE_SPEC_VERSION 1 +#define VK_EXT_SHADER_TILE_IMAGE_EXTENSION_NAME "VK_EXT_shader_tile_image" +typedef struct VkPhysicalDeviceShaderTileImageFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderTileImageColorReadAccess; + VkBool32 shaderTileImageDepthReadAccess; + VkBool32 shaderTileImageStencilReadAccess; +} VkPhysicalDeviceShaderTileImageFeaturesEXT; + +typedef struct VkPhysicalDeviceShaderTileImagePropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderTileImageCoherentReadAccelerated; + VkBool32 shaderTileImageReadSampleFromPixelRateInvocation; + VkBool32 shaderTileImageReadFromHelperInvocation; +} VkPhysicalDeviceShaderTileImagePropertiesEXT; + + + +// VK_EXT_opacity_micromap is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_opacity_micromap 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkMicromapEXT) +#define VK_EXT_OPACITY_MICROMAP_SPEC_VERSION 2 +#define VK_EXT_OPACITY_MICROMAP_EXTENSION_NAME "VK_EXT_opacity_micromap" + +typedef enum VkMicromapTypeEXT { + VK_MICROMAP_TYPE_OPACITY_MICROMAP_EXT = 0, +#ifdef VK_ENABLE_BETA_EXTENSIONS + VK_MICROMAP_TYPE_DISPLACEMENT_MICROMAP_NV = 1000397000, +#endif + VK_MICROMAP_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkMicromapTypeEXT; + +typedef enum VkBuildMicromapModeEXT { + VK_BUILD_MICROMAP_MODE_BUILD_EXT = 0, + VK_BUILD_MICROMAP_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBuildMicromapModeEXT; + +typedef enum VkCopyMicromapModeEXT { + VK_COPY_MICROMAP_MODE_CLONE_EXT = 0, + VK_COPY_MICROMAP_MODE_SERIALIZE_EXT = 1, + VK_COPY_MICROMAP_MODE_DESERIALIZE_EXT = 2, + VK_COPY_MICROMAP_MODE_COMPACT_EXT = 3, + VK_COPY_MICROMAP_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkCopyMicromapModeEXT; + +typedef enum VkOpacityMicromapFormatEXT { + VK_OPACITY_MICROMAP_FORMAT_2_STATE_EXT = 1, + VK_OPACITY_MICROMAP_FORMAT_4_STATE_EXT = 2, + VK_OPACITY_MICROMAP_FORMAT_MAX_ENUM_EXT = 0x7FFFFFFF +} VkOpacityMicromapFormatEXT; + +typedef enum VkOpacityMicromapSpecialIndexEXT { + VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_TRANSPARENT_EXT = -1, + VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_OPAQUE_EXT = -2, + VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_TRANSPARENT_EXT = -3, + VK_OPACITY_MICROMAP_SPECIAL_INDEX_FULLY_UNKNOWN_OPAQUE_EXT = -4, + VK_OPACITY_MICROMAP_SPECIAL_INDEX_CLUSTER_GEOMETRY_DISABLE_OPACITY_MICROMAP_NV = -5, + VK_OPACITY_MICROMAP_SPECIAL_INDEX_MAX_ENUM_EXT = 0x7FFFFFFF +} VkOpacityMicromapSpecialIndexEXT; + +typedef enum VkAccelerationStructureCompatibilityKHR { + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR = 0, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR = 1, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureCompatibilityKHR; + +typedef enum VkAccelerationStructureBuildTypeKHR { + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureBuildTypeKHR; + +typedef enum VkBuildMicromapFlagBitsEXT { + VK_BUILD_MICROMAP_PREFER_FAST_TRACE_BIT_EXT = 0x00000001, + VK_BUILD_MICROMAP_PREFER_FAST_BUILD_BIT_EXT = 0x00000002, + VK_BUILD_MICROMAP_ALLOW_COMPACTION_BIT_EXT = 0x00000004, + VK_BUILD_MICROMAP_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBuildMicromapFlagBitsEXT; +typedef VkFlags VkBuildMicromapFlagsEXT; + +typedef enum VkMicromapCreateFlagBitsEXT { + VK_MICROMAP_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = 0x00000001, + VK_MICROMAP_CREATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkMicromapCreateFlagBitsEXT; +typedef VkFlags VkMicromapCreateFlagsEXT; +typedef struct VkMicromapUsageEXT { + uint32_t count; + uint32_t subdivisionLevel; + uint32_t format; +} VkMicromapUsageEXT; + +typedef union VkDeviceOrHostAddressKHR { + VkDeviceAddress deviceAddress; + void* hostAddress; +} VkDeviceOrHostAddressKHR; + +typedef struct VkMicromapBuildInfoEXT { + VkStructureType sType; + const void* pNext; + VkMicromapTypeEXT type; + VkBuildMicromapFlagsEXT flags; + VkBuildMicromapModeEXT mode; + VkMicromapEXT dstMicromap; + uint32_t usageCountsCount; + const VkMicromapUsageEXT* pUsageCounts; + const VkMicromapUsageEXT* const* ppUsageCounts; + VkDeviceOrHostAddressConstKHR data; + VkDeviceOrHostAddressKHR scratchData; + VkDeviceOrHostAddressConstKHR triangleArray; + VkDeviceSize triangleArrayStride; +} VkMicromapBuildInfoEXT; + +typedef struct VkMicromapCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkMicromapCreateFlagsEXT createFlags; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; + VkMicromapTypeEXT type; + VkDeviceAddress deviceAddress; +} VkMicromapCreateInfoEXT; + +typedef struct VkPhysicalDeviceOpacityMicromapFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 micromap; + VkBool32 micromapCaptureReplay; + VkBool32 micromapHostCommands; +} VkPhysicalDeviceOpacityMicromapFeaturesEXT; + +typedef struct VkPhysicalDeviceOpacityMicromapPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxOpacity2StateSubdivisionLevel; + uint32_t maxOpacity4StateSubdivisionLevel; +} VkPhysicalDeviceOpacityMicromapPropertiesEXT; + +typedef struct VkMicromapVersionInfoEXT { + VkStructureType sType; + const void* pNext; + const uint8_t* pVersionData; +} VkMicromapVersionInfoEXT; + +typedef struct VkCopyMicromapToMemoryInfoEXT { + VkStructureType sType; + const void* pNext; + VkMicromapEXT src; + VkDeviceOrHostAddressKHR dst; + VkCopyMicromapModeEXT mode; +} VkCopyMicromapToMemoryInfoEXT; + +typedef struct VkCopyMemoryToMicromapInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR src; + VkMicromapEXT dst; + VkCopyMicromapModeEXT mode; +} VkCopyMemoryToMicromapInfoEXT; + +typedef struct VkCopyMicromapInfoEXT { + VkStructureType sType; + const void* pNext; + VkMicromapEXT src; + VkMicromapEXT dst; + VkCopyMicromapModeEXT mode; +} VkCopyMicromapInfoEXT; + +typedef struct VkMicromapBuildSizesInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceSize micromapSize; + VkDeviceSize buildScratchSize; + VkBool32 discardable; +} VkMicromapBuildSizesInfoEXT; + +typedef struct VkAccelerationStructureTrianglesOpacityMicromapEXT { + VkStructureType sType; + void* pNext; + VkIndexType indexType; + VkDeviceOrHostAddressConstKHR indexBuffer; + VkDeviceSize indexStride; + uint32_t baseTriangle; + uint32_t usageCountsCount; + const VkMicromapUsageEXT* pUsageCounts; + const VkMicromapUsageEXT* const* ppUsageCounts; + VkMicromapEXT micromap; +} VkAccelerationStructureTrianglesOpacityMicromapEXT; + +typedef struct VkMicromapTriangleEXT { + uint32_t dataOffset; + uint16_t subdivisionLevel; + uint16_t format; +} VkMicromapTriangleEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMicromapEXT)(VkDevice device, const VkMicromapCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkMicromapEXT* pMicromap); +typedef void (VKAPI_PTR *PFN_vkDestroyMicromapEXT)(VkDevice device, VkMicromapEXT micromap, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkCmdBuildMicromapsEXT)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkMicromapBuildInfoEXT* pInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBuildMicromapsEXT)(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkMicromapBuildInfoEXT* pInfos); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMicromapEXT)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMicromapInfoEXT* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMicromapToMemoryEXT)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMicromapToMemoryInfoEXT* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToMicromapEXT)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToMicromapInfoEXT* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkWriteMicromapsPropertiesEXT)(VkDevice device, uint32_t micromapCount, const VkMicromapEXT* pMicromaps, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMicromapEXT)(VkCommandBuffer commandBuffer, const VkCopyMicromapInfoEXT* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMicromapToMemoryEXT)(VkCommandBuffer commandBuffer, const VkCopyMicromapToMemoryInfoEXT* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToMicromapEXT)(VkCommandBuffer commandBuffer, const VkCopyMemoryToMicromapInfoEXT* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdWriteMicromapsPropertiesEXT)(VkCommandBuffer commandBuffer, uint32_t micromapCount, const VkMicromapEXT* pMicromaps, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMicromapCompatibilityEXT)(VkDevice device, const VkMicromapVersionInfoEXT* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility); +typedef void (VKAPI_PTR *PFN_vkGetMicromapBuildSizesEXT)(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkMicromapBuildInfoEXT* pBuildInfo, VkMicromapBuildSizesInfoEXT* pSizeInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMicromapEXT( + VkDevice device, + const VkMicromapCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkMicromapEXT* pMicromap); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyMicromapEXT( + VkDevice device, + VkMicromapEXT micromap, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBuildMicromapsEXT( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkMicromapBuildInfoEXT* pInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBuildMicromapsEXT( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + uint32_t infoCount, + const VkMicromapBuildInfoEXT* pInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMicromapEXT( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyMicromapInfoEXT* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMicromapToMemoryEXT( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyMicromapToMemoryInfoEXT* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToMicromapEXT( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyMemoryToMicromapInfoEXT* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkWriteMicromapsPropertiesEXT( + VkDevice device, + uint32_t micromapCount, + const VkMicromapEXT* pMicromaps, + VkQueryType queryType, + size_t dataSize, + void* pData, + size_t stride); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMicromapEXT( + VkCommandBuffer commandBuffer, + const VkCopyMicromapInfoEXT* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMicromapToMemoryEXT( + VkCommandBuffer commandBuffer, + const VkCopyMicromapToMemoryInfoEXT* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToMicromapEXT( + VkCommandBuffer commandBuffer, + const VkCopyMemoryToMicromapInfoEXT* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteMicromapsPropertiesEXT( + VkCommandBuffer commandBuffer, + uint32_t micromapCount, + const VkMicromapEXT* pMicromaps, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMicromapCompatibilityEXT( + VkDevice device, + const VkMicromapVersionInfoEXT* pVersionInfo, + VkAccelerationStructureCompatibilityKHR* pCompatibility); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetMicromapBuildSizesEXT( + VkDevice device, + VkAccelerationStructureBuildTypeKHR buildType, + const VkMicromapBuildInfoEXT* pBuildInfo, + VkMicromapBuildSizesInfoEXT* pSizeInfo); +#endif +#endif + + +// VK_EXT_load_store_op_none is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_load_store_op_none 1 +#define VK_EXT_LOAD_STORE_OP_NONE_SPEC_VERSION 1 +#define VK_EXT_LOAD_STORE_OP_NONE_EXTENSION_NAME "VK_EXT_load_store_op_none" + + +// VK_HUAWEI_cluster_culling_shader is a preprocessor guard. Do not pass it to API calls. +#define VK_HUAWEI_cluster_culling_shader 1 +#define VK_HUAWEI_CLUSTER_CULLING_SHADER_SPEC_VERSION 3 +#define VK_HUAWEI_CLUSTER_CULLING_SHADER_EXTENSION_NAME "VK_HUAWEI_cluster_culling_shader" +typedef struct VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI { + VkStructureType sType; + void* pNext; + VkBool32 clustercullingShader; + VkBool32 multiviewClusterCullingShader; +} VkPhysicalDeviceClusterCullingShaderFeaturesHUAWEI; + +typedef struct VkPhysicalDeviceClusterCullingShaderPropertiesHUAWEI { + VkStructureType sType; + void* pNext; + uint32_t maxWorkGroupCount[3]; + uint32_t maxWorkGroupSize[3]; + uint32_t maxOutputClusterCount; + VkDeviceSize indirectBufferOffsetAlignment; +} VkPhysicalDeviceClusterCullingShaderPropertiesHUAWEI; + +typedef struct VkPhysicalDeviceClusterCullingShaderVrsFeaturesHUAWEI { + VkStructureType sType; + void* pNext; + VkBool32 clusterShadingRate; +} VkPhysicalDeviceClusterCullingShaderVrsFeaturesHUAWEI; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawClusterHUAWEI)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDrawClusterIndirectHUAWEI)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawClusterHUAWEI( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawClusterIndirectHUAWEI( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); +#endif +#endif + + +// VK_EXT_border_color_swizzle is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_border_color_swizzle 1 +#define VK_EXT_BORDER_COLOR_SWIZZLE_SPEC_VERSION 1 +#define VK_EXT_BORDER_COLOR_SWIZZLE_EXTENSION_NAME "VK_EXT_border_color_swizzle" +typedef struct VkPhysicalDeviceBorderColorSwizzleFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 borderColorSwizzle; + VkBool32 borderColorSwizzleFromImage; +} VkPhysicalDeviceBorderColorSwizzleFeaturesEXT; + +typedef struct VkSamplerBorderColorComponentMappingCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkComponentMapping components; + VkBool32 srgb; +} VkSamplerBorderColorComponentMappingCreateInfoEXT; + + + +// VK_EXT_pageable_device_local_memory is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_pageable_device_local_memory 1 +#define VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_SPEC_VERSION 1 +#define VK_EXT_PAGEABLE_DEVICE_LOCAL_MEMORY_EXTENSION_NAME "VK_EXT_pageable_device_local_memory" +typedef struct VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 pageableDeviceLocalMemory; +} VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkSetDeviceMemoryPriorityEXT)(VkDevice device, VkDeviceMemory memory, float priority); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetDeviceMemoryPriorityEXT( + VkDevice device, + VkDeviceMemory memory, + float priority); +#endif +#endif + + +// VK_ARM_shader_core_properties is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_shader_core_properties 1 +#define VK_ARM_SHADER_CORE_PROPERTIES_SPEC_VERSION 1 +#define VK_ARM_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_ARM_shader_core_properties" +typedef struct VkPhysicalDeviceShaderCorePropertiesARM { + VkStructureType sType; + void* pNext; + uint32_t pixelRate; + uint32_t texelRate; + uint32_t fmaRate; +} VkPhysicalDeviceShaderCorePropertiesARM; + + + +// VK_ARM_scheduling_controls is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_scheduling_controls 1 +#define VK_ARM_SCHEDULING_CONTROLS_SPEC_VERSION 1 +#define VK_ARM_SCHEDULING_CONTROLS_EXTENSION_NAME "VK_ARM_scheduling_controls" +typedef VkFlags64 VkPhysicalDeviceSchedulingControlsFlagsARM; + +// Flag bits for VkPhysicalDeviceSchedulingControlsFlagBitsARM +typedef VkFlags64 VkPhysicalDeviceSchedulingControlsFlagBitsARM; +static const VkPhysicalDeviceSchedulingControlsFlagBitsARM VK_PHYSICAL_DEVICE_SCHEDULING_CONTROLS_SHADER_CORE_COUNT_ARM = 0x00000001ULL; + +typedef struct VkDeviceQueueShaderCoreControlCreateInfoARM { + VkStructureType sType; + void* pNext; + uint32_t shaderCoreCount; +} VkDeviceQueueShaderCoreControlCreateInfoARM; + +typedef struct VkPhysicalDeviceSchedulingControlsFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 schedulingControls; +} VkPhysicalDeviceSchedulingControlsFeaturesARM; + +typedef struct VkPhysicalDeviceSchedulingControlsPropertiesARM { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceSchedulingControlsFlagsARM schedulingControlsFlags; +} VkPhysicalDeviceSchedulingControlsPropertiesARM; + + + +// VK_EXT_image_sliced_view_of_3d is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_image_sliced_view_of_3d 1 +#define VK_EXT_IMAGE_SLICED_VIEW_OF_3D_SPEC_VERSION 1 +#define VK_EXT_IMAGE_SLICED_VIEW_OF_3D_EXTENSION_NAME "VK_EXT_image_sliced_view_of_3d" +#define VK_REMAINING_3D_SLICES_EXT (~0U) +typedef struct VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 imageSlicedViewOf3D; +} VkPhysicalDeviceImageSlicedViewOf3DFeaturesEXT; + +typedef struct VkImageViewSlicedCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t sliceOffset; + uint32_t sliceCount; +} VkImageViewSlicedCreateInfoEXT; + + + +// VK_VALVE_descriptor_set_host_mapping is a preprocessor guard. Do not pass it to API calls. +#define VK_VALVE_descriptor_set_host_mapping 1 +#define VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_SPEC_VERSION 1 +#define VK_VALVE_DESCRIPTOR_SET_HOST_MAPPING_EXTENSION_NAME "VK_VALVE_descriptor_set_host_mapping" +typedef struct VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE { + VkStructureType sType; + void* pNext; + VkBool32 descriptorSetHostMapping; +} VkPhysicalDeviceDescriptorSetHostMappingFeaturesVALVE; + +typedef struct VkDescriptorSetBindingReferenceVALVE { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayout descriptorSetLayout; + uint32_t binding; +} VkDescriptorSetBindingReferenceVALVE; + +typedef struct VkDescriptorSetLayoutHostMappingInfoVALVE { + VkStructureType sType; + void* pNext; + size_t descriptorOffset; + uint32_t descriptorSize; +} VkDescriptorSetLayoutHostMappingInfoVALVE; + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutHostMappingInfoVALVE)(VkDevice device, const VkDescriptorSetBindingReferenceVALVE* pBindingReference, VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetHostMappingVALVE)(VkDevice device, VkDescriptorSet descriptorSet, void** ppData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutHostMappingInfoVALVE( + VkDevice device, + const VkDescriptorSetBindingReferenceVALVE* pBindingReference, + VkDescriptorSetLayoutHostMappingInfoVALVE* pHostMapping); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetHostMappingVALVE( + VkDevice device, + VkDescriptorSet descriptorSet, + void** ppData); +#endif +#endif + + +// VK_EXT_depth_clamp_zero_one is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_depth_clamp_zero_one 1 +#define VK_EXT_DEPTH_CLAMP_ZERO_ONE_SPEC_VERSION 1 +#define VK_EXT_DEPTH_CLAMP_ZERO_ONE_EXTENSION_NAME "VK_EXT_depth_clamp_zero_one" +typedef VkPhysicalDeviceDepthClampZeroOneFeaturesKHR VkPhysicalDeviceDepthClampZeroOneFeaturesEXT; + + + +// VK_EXT_non_seamless_cube_map is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_non_seamless_cube_map 1 +#define VK_EXT_NON_SEAMLESS_CUBE_MAP_SPEC_VERSION 1 +#define VK_EXT_NON_SEAMLESS_CUBE_MAP_EXTENSION_NAME "VK_EXT_non_seamless_cube_map" +typedef struct VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 nonSeamlessCubeMap; +} VkPhysicalDeviceNonSeamlessCubeMapFeaturesEXT; + + + +// VK_ARM_render_pass_striped is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_render_pass_striped 1 +#define VK_ARM_RENDER_PASS_STRIPED_SPEC_VERSION 1 +#define VK_ARM_RENDER_PASS_STRIPED_EXTENSION_NAME "VK_ARM_render_pass_striped" +typedef struct VkPhysicalDeviceRenderPassStripedFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 renderPassStriped; +} VkPhysicalDeviceRenderPassStripedFeaturesARM; + +typedef struct VkPhysicalDeviceRenderPassStripedPropertiesARM { + VkStructureType sType; + void* pNext; + VkExtent2D renderPassStripeGranularity; + uint32_t maxRenderPassStripes; +} VkPhysicalDeviceRenderPassStripedPropertiesARM; + +typedef struct VkRenderPassStripeInfoARM { + VkStructureType sType; + const void* pNext; + VkRect2D stripeArea; +} VkRenderPassStripeInfoARM; + +typedef struct VkRenderPassStripeBeginInfoARM { + VkStructureType sType; + const void* pNext; + uint32_t stripeInfoCount; + const VkRenderPassStripeInfoARM* pStripeInfos; +} VkRenderPassStripeBeginInfoARM; + +typedef struct VkRenderPassStripeSubmitInfoARM { + VkStructureType sType; + const void* pNext; + uint32_t stripeSemaphoreInfoCount; + const VkSemaphoreSubmitInfo* pStripeSemaphoreInfos; +} VkRenderPassStripeSubmitInfoARM; + + + +// VK_QCOM_fragment_density_map_offset is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_fragment_density_map_offset 1 +#define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_SPEC_VERSION 3 +#define VK_QCOM_FRAGMENT_DENSITY_MAP_OFFSET_EXTENSION_NAME "VK_QCOM_fragment_density_map_offset" +typedef struct VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMapOffset; +} VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT; + +typedef VkPhysicalDeviceFragmentDensityMapOffsetFeaturesEXT VkPhysicalDeviceFragmentDensityMapOffsetFeaturesQCOM; + +typedef struct VkPhysicalDeviceFragmentDensityMapOffsetPropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D fragmentDensityOffsetGranularity; +} VkPhysicalDeviceFragmentDensityMapOffsetPropertiesEXT; + +typedef VkPhysicalDeviceFragmentDensityMapOffsetPropertiesEXT VkPhysicalDeviceFragmentDensityMapOffsetPropertiesQCOM; + +typedef struct VkRenderPassFragmentDensityMapOffsetEndInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t fragmentDensityOffsetCount; + const VkOffset2D* pFragmentDensityOffsets; +} VkRenderPassFragmentDensityMapOffsetEndInfoEXT; + +typedef VkRenderPassFragmentDensityMapOffsetEndInfoEXT VkSubpassFragmentDensityMapOffsetEndInfoQCOM; + + + +// VK_NV_copy_memory_indirect is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_copy_memory_indirect 1 +#define VK_NV_COPY_MEMORY_INDIRECT_SPEC_VERSION 1 +#define VK_NV_COPY_MEMORY_INDIRECT_EXTENSION_NAME "VK_NV_copy_memory_indirect" +typedef VkCopyMemoryIndirectCommandKHR VkCopyMemoryIndirectCommandNV; + +typedef VkCopyMemoryToImageIndirectCommandKHR VkCopyMemoryToImageIndirectCommandNV; + +typedef struct VkPhysicalDeviceCopyMemoryIndirectFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 indirectCopy; +} VkPhysicalDeviceCopyMemoryIndirectFeaturesNV; + +typedef VkPhysicalDeviceCopyMemoryIndirectPropertiesKHR VkPhysicalDeviceCopyMemoryIndirectPropertiesNV; + +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryIndirectNV)(VkCommandBuffer commandBuffer, VkDeviceAddress copyBufferAddress, uint32_t copyCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToImageIndirectNV)(VkCommandBuffer commandBuffer, VkDeviceAddress copyBufferAddress, uint32_t copyCount, uint32_t stride, VkImage dstImage, VkImageLayout dstImageLayout, const VkImageSubresourceLayers* pImageSubresources); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryIndirectNV( + VkCommandBuffer commandBuffer, + VkDeviceAddress copyBufferAddress, + uint32_t copyCount, + uint32_t stride); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToImageIndirectNV( + VkCommandBuffer commandBuffer, + VkDeviceAddress copyBufferAddress, + uint32_t copyCount, + uint32_t stride, + VkImage dstImage, + VkImageLayout dstImageLayout, + const VkImageSubresourceLayers* pImageSubresources); +#endif +#endif + + +// VK_NV_memory_decompression is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_memory_decompression 1 +#define VK_NV_MEMORY_DECOMPRESSION_SPEC_VERSION 1 +#define VK_NV_MEMORY_DECOMPRESSION_EXTENSION_NAME "VK_NV_memory_decompression" + +// Flag bits for VkMemoryDecompressionMethodFlagBitsEXT +typedef VkFlags64 VkMemoryDecompressionMethodFlagBitsEXT; +static const VkMemoryDecompressionMethodFlagBitsEXT VK_MEMORY_DECOMPRESSION_METHOD_GDEFLATE_1_0_BIT_EXT = 0x00000001ULL; +static const VkMemoryDecompressionMethodFlagBitsEXT VK_MEMORY_DECOMPRESSION_METHOD_GDEFLATE_1_0_BIT_NV = 0x00000001ULL; + +typedef VkMemoryDecompressionMethodFlagBitsEXT VkMemoryDecompressionMethodFlagBitsNV; + +typedef VkFlags64 VkMemoryDecompressionMethodFlagsEXT; +typedef VkMemoryDecompressionMethodFlagsEXT VkMemoryDecompressionMethodFlagsNV; + +typedef struct VkDecompressMemoryRegionNV { + VkDeviceAddress srcAddress; + VkDeviceAddress dstAddress; + VkDeviceSize compressedSize; + VkDeviceSize decompressedSize; + VkMemoryDecompressionMethodFlagsNV decompressionMethod; +} VkDecompressMemoryRegionNV; + +typedef struct VkPhysicalDeviceMemoryDecompressionFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 memoryDecompression; +} VkPhysicalDeviceMemoryDecompressionFeaturesEXT; + +typedef VkPhysicalDeviceMemoryDecompressionFeaturesEXT VkPhysicalDeviceMemoryDecompressionFeaturesNV; + +typedef struct VkPhysicalDeviceMemoryDecompressionPropertiesEXT { + VkStructureType sType; + void* pNext; + VkMemoryDecompressionMethodFlagsEXT decompressionMethods; + uint64_t maxDecompressionIndirectCount; +} VkPhysicalDeviceMemoryDecompressionPropertiesEXT; + +typedef VkPhysicalDeviceMemoryDecompressionPropertiesEXT VkPhysicalDeviceMemoryDecompressionPropertiesNV; + +typedef void (VKAPI_PTR *PFN_vkCmdDecompressMemoryNV)(VkCommandBuffer commandBuffer, uint32_t decompressRegionCount, const VkDecompressMemoryRegionNV* pDecompressMemoryRegions); +typedef void (VKAPI_PTR *PFN_vkCmdDecompressMemoryIndirectCountNV)(VkCommandBuffer commandBuffer, VkDeviceAddress indirectCommandsAddress, VkDeviceAddress indirectCommandsCountAddress, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDecompressMemoryNV( + VkCommandBuffer commandBuffer, + uint32_t decompressRegionCount, + const VkDecompressMemoryRegionNV* pDecompressMemoryRegions); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDecompressMemoryIndirectCountNV( + VkCommandBuffer commandBuffer, + VkDeviceAddress indirectCommandsAddress, + VkDeviceAddress indirectCommandsCountAddress, + uint32_t stride); +#endif +#endif + + +// VK_NV_device_generated_commands_compute is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_device_generated_commands_compute 1 +#define VK_NV_DEVICE_GENERATED_COMMANDS_COMPUTE_SPEC_VERSION 2 +#define VK_NV_DEVICE_GENERATED_COMMANDS_COMPUTE_EXTENSION_NAME "VK_NV_device_generated_commands_compute" +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 deviceGeneratedCompute; + VkBool32 deviceGeneratedComputePipelines; + VkBool32 deviceGeneratedComputeCaptureReplay; +} VkPhysicalDeviceDeviceGeneratedCommandsComputeFeaturesNV; + +typedef struct VkComputePipelineIndirectBufferInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceAddress deviceAddress; + VkDeviceSize size; + VkDeviceAddress pipelineDeviceAddressCaptureReplay; +} VkComputePipelineIndirectBufferInfoNV; + +typedef struct VkPipelineIndirectDeviceAddressInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; +} VkPipelineIndirectDeviceAddressInfoNV; + +typedef struct VkBindPipelineIndirectCommandNV { + VkDeviceAddress pipelineAddress; +} VkBindPipelineIndirectCommandNV; + +typedef void (VKAPI_PTR *PFN_vkGetPipelineIndirectMemoryRequirementsNV)(VkDevice device, const VkComputePipelineCreateInfo* pCreateInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdUpdatePipelineIndirectBufferNV)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetPipelineIndirectDeviceAddressNV)(VkDevice device, const VkPipelineIndirectDeviceAddressInfoNV* pInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPipelineIndirectMemoryRequirementsNV( + VkDevice device, + const VkComputePipelineCreateInfo* pCreateInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdUpdatePipelineIndirectBufferNV( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetPipelineIndirectDeviceAddressNV( + VkDevice device, + const VkPipelineIndirectDeviceAddressInfoNV* pInfo); +#endif +#endif + + +// VK_NV_ray_tracing_linear_swept_spheres is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_ray_tracing_linear_swept_spheres 1 +#define VK_NV_RAY_TRACING_LINEAR_SWEPT_SPHERES_SPEC_VERSION 1 +#define VK_NV_RAY_TRACING_LINEAR_SWEPT_SPHERES_EXTENSION_NAME "VK_NV_ray_tracing_linear_swept_spheres" + +typedef enum VkRayTracingLssIndexingModeNV { + VK_RAY_TRACING_LSS_INDEXING_MODE_LIST_NV = 0, + VK_RAY_TRACING_LSS_INDEXING_MODE_SUCCESSIVE_NV = 1, + VK_RAY_TRACING_LSS_INDEXING_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkRayTracingLssIndexingModeNV; + +typedef enum VkRayTracingLssPrimitiveEndCapsModeNV { + VK_RAY_TRACING_LSS_PRIMITIVE_END_CAPS_MODE_NONE_NV = 0, + VK_RAY_TRACING_LSS_PRIMITIVE_END_CAPS_MODE_CHAINED_NV = 1, + VK_RAY_TRACING_LSS_PRIMITIVE_END_CAPS_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkRayTracingLssPrimitiveEndCapsModeNV; +typedef struct VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 spheres; + VkBool32 linearSweptSpheres; +} VkPhysicalDeviceRayTracingLinearSweptSpheresFeaturesNV; + +typedef struct VkAccelerationStructureGeometryLinearSweptSpheresDataNV { + VkStructureType sType; + const void* pNext; + VkFormat vertexFormat; + VkDeviceOrHostAddressConstKHR vertexData; + VkDeviceSize vertexStride; + VkFormat radiusFormat; + VkDeviceOrHostAddressConstKHR radiusData; + VkDeviceSize radiusStride; + VkIndexType indexType; + VkDeviceOrHostAddressConstKHR indexData; + VkDeviceSize indexStride; + VkRayTracingLssIndexingModeNV indexingMode; + VkRayTracingLssPrimitiveEndCapsModeNV endCapsMode; +} VkAccelerationStructureGeometryLinearSweptSpheresDataNV; + +typedef struct VkAccelerationStructureGeometrySpheresDataNV { + VkStructureType sType; + const void* pNext; + VkFormat vertexFormat; + VkDeviceOrHostAddressConstKHR vertexData; + VkDeviceSize vertexStride; + VkFormat radiusFormat; + VkDeviceOrHostAddressConstKHR radiusData; + VkDeviceSize radiusStride; + VkIndexType indexType; + VkDeviceOrHostAddressConstKHR indexData; + VkDeviceSize indexStride; +} VkAccelerationStructureGeometrySpheresDataNV; + + + +// VK_NV_linear_color_attachment is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_linear_color_attachment 1 +#define VK_NV_LINEAR_COLOR_ATTACHMENT_SPEC_VERSION 1 +#define VK_NV_LINEAR_COLOR_ATTACHMENT_EXTENSION_NAME "VK_NV_linear_color_attachment" +typedef struct VkPhysicalDeviceLinearColorAttachmentFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 linearColorAttachment; +} VkPhysicalDeviceLinearColorAttachmentFeaturesNV; + + + +// VK_GOOGLE_surfaceless_query is a preprocessor guard. Do not pass it to API calls. +#define VK_GOOGLE_surfaceless_query 1 +#define VK_GOOGLE_SURFACELESS_QUERY_SPEC_VERSION 2 +#define VK_GOOGLE_SURFACELESS_QUERY_EXTENSION_NAME "VK_GOOGLE_surfaceless_query" + + +// VK_EXT_image_compression_control_swapchain is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_image_compression_control_swapchain 1 +#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_SPEC_VERSION 1 +#define VK_EXT_IMAGE_COMPRESSION_CONTROL_SWAPCHAIN_EXTENSION_NAME "VK_EXT_image_compression_control_swapchain" +typedef struct VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 imageCompressionControlSwapchain; +} VkPhysicalDeviceImageCompressionControlSwapchainFeaturesEXT; + + + +// VK_QCOM_image_processing is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_image_processing 1 +#define VK_QCOM_IMAGE_PROCESSING_SPEC_VERSION 1 +#define VK_QCOM_IMAGE_PROCESSING_EXTENSION_NAME "VK_QCOM_image_processing" +typedef struct VkImageViewSampleWeightCreateInfoQCOM { + VkStructureType sType; + const void* pNext; + VkOffset2D filterCenter; + VkExtent2D filterSize; + uint32_t numPhases; +} VkImageViewSampleWeightCreateInfoQCOM; + +typedef struct VkPhysicalDeviceImageProcessingFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 textureSampleWeighted; + VkBool32 textureBoxFilter; + VkBool32 textureBlockMatch; +} VkPhysicalDeviceImageProcessingFeaturesQCOM; + +typedef struct VkPhysicalDeviceImageProcessingPropertiesQCOM { + VkStructureType sType; + void* pNext; + uint32_t maxWeightFilterPhases; + VkExtent2D maxWeightFilterDimension; + VkExtent2D maxBlockMatchRegion; + VkExtent2D maxBoxFilterBlockSize; +} VkPhysicalDeviceImageProcessingPropertiesQCOM; + + + +// VK_EXT_nested_command_buffer is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_nested_command_buffer 1 +#define VK_EXT_NESTED_COMMAND_BUFFER_SPEC_VERSION 1 +#define VK_EXT_NESTED_COMMAND_BUFFER_EXTENSION_NAME "VK_EXT_nested_command_buffer" +typedef struct VkPhysicalDeviceNestedCommandBufferFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 nestedCommandBuffer; + VkBool32 nestedCommandBufferRendering; + VkBool32 nestedCommandBufferSimultaneousUse; +} VkPhysicalDeviceNestedCommandBufferFeaturesEXT; + +typedef struct VkPhysicalDeviceNestedCommandBufferPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxCommandBufferNestingLevel; +} VkPhysicalDeviceNestedCommandBufferPropertiesEXT; + + + +// VK_EXT_external_memory_acquire_unmodified is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_external_memory_acquire_unmodified 1 +#define VK_EXT_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_ACQUIRE_UNMODIFIED_EXTENSION_NAME "VK_EXT_external_memory_acquire_unmodified" +typedef struct VkExternalMemoryAcquireUnmodifiedEXT { + VkStructureType sType; + const void* pNext; + VkBool32 acquireUnmodifiedMemory; +} VkExternalMemoryAcquireUnmodifiedEXT; + + + +// VK_EXT_extended_dynamic_state3 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_extended_dynamic_state3 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_3_SPEC_VERSION 2 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_3_EXTENSION_NAME "VK_EXT_extended_dynamic_state3" +typedef struct VkPhysicalDeviceExtendedDynamicState3FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 extendedDynamicState3TessellationDomainOrigin; + VkBool32 extendedDynamicState3DepthClampEnable; + VkBool32 extendedDynamicState3PolygonMode; + VkBool32 extendedDynamicState3RasterizationSamples; + VkBool32 extendedDynamicState3SampleMask; + VkBool32 extendedDynamicState3AlphaToCoverageEnable; + VkBool32 extendedDynamicState3AlphaToOneEnable; + VkBool32 extendedDynamicState3LogicOpEnable; + VkBool32 extendedDynamicState3ColorBlendEnable; + VkBool32 extendedDynamicState3ColorBlendEquation; + VkBool32 extendedDynamicState3ColorWriteMask; + VkBool32 extendedDynamicState3RasterizationStream; + VkBool32 extendedDynamicState3ConservativeRasterizationMode; + VkBool32 extendedDynamicState3ExtraPrimitiveOverestimationSize; + VkBool32 extendedDynamicState3DepthClipEnable; + VkBool32 extendedDynamicState3SampleLocationsEnable; + VkBool32 extendedDynamicState3ColorBlendAdvanced; + VkBool32 extendedDynamicState3ProvokingVertexMode; + VkBool32 extendedDynamicState3LineRasterizationMode; + VkBool32 extendedDynamicState3LineStippleEnable; + VkBool32 extendedDynamicState3DepthClipNegativeOneToOne; + VkBool32 extendedDynamicState3ViewportWScalingEnable; + VkBool32 extendedDynamicState3ViewportSwizzle; + VkBool32 extendedDynamicState3CoverageToColorEnable; + VkBool32 extendedDynamicState3CoverageToColorLocation; + VkBool32 extendedDynamicState3CoverageModulationMode; + VkBool32 extendedDynamicState3CoverageModulationTableEnable; + VkBool32 extendedDynamicState3CoverageModulationTable; + VkBool32 extendedDynamicState3CoverageReductionMode; + VkBool32 extendedDynamicState3RepresentativeFragmentTestEnable; + VkBool32 extendedDynamicState3ShadingRateImageEnable; +} VkPhysicalDeviceExtendedDynamicState3FeaturesEXT; + +typedef struct VkPhysicalDeviceExtendedDynamicState3PropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 dynamicPrimitiveTopologyUnrestricted; +} VkPhysicalDeviceExtendedDynamicState3PropertiesEXT; + +typedef struct VkColorBlendEquationEXT { + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; +} VkColorBlendEquationEXT; + +typedef struct VkColorBlendAdvancedEXT { + VkBlendOp advancedBlendOp; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; + VkBool32 clampResults; +} VkColorBlendAdvancedEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthClampEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthClampEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetPolygonModeEXT)(VkCommandBuffer commandBuffer, VkPolygonMode polygonMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetRasterizationSamplesEXT)(VkCommandBuffer commandBuffer, VkSampleCountFlagBits rasterizationSamples); +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleMaskEXT)(VkCommandBuffer commandBuffer, VkSampleCountFlagBits samples, const VkSampleMask* pSampleMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetAlphaToCoverageEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 alphaToCoverageEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetAlphaToOneEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 alphaToOneEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetLogicOpEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 logicOpEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetColorBlendEnableEXT)(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount, const VkBool32* pColorBlendEnables); +typedef void (VKAPI_PTR *PFN_vkCmdSetColorBlendEquationEXT)(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount, const VkColorBlendEquationEXT* pColorBlendEquations); +typedef void (VKAPI_PTR *PFN_vkCmdSetColorWriteMaskEXT)(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount, const VkColorComponentFlags* pColorWriteMasks); +typedef void (VKAPI_PTR *PFN_vkCmdSetTessellationDomainOriginEXT)(VkCommandBuffer commandBuffer, VkTessellationDomainOrigin domainOrigin); +typedef void (VKAPI_PTR *PFN_vkCmdSetRasterizationStreamEXT)(VkCommandBuffer commandBuffer, uint32_t rasterizationStream); +typedef void (VKAPI_PTR *PFN_vkCmdSetConservativeRasterizationModeEXT)(VkCommandBuffer commandBuffer, VkConservativeRasterizationModeEXT conservativeRasterizationMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetExtraPrimitiveOverestimationSizeEXT)(VkCommandBuffer commandBuffer, float extraPrimitiveOverestimationSize); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthClipEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthClipEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 sampleLocationsEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetColorBlendAdvancedEXT)(VkCommandBuffer commandBuffer, uint32_t firstAttachment, uint32_t attachmentCount, const VkColorBlendAdvancedEXT* pColorBlendAdvanced); +typedef void (VKAPI_PTR *PFN_vkCmdSetProvokingVertexModeEXT)(VkCommandBuffer commandBuffer, VkProvokingVertexModeEXT provokingVertexMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineRasterizationModeEXT)(VkCommandBuffer commandBuffer, VkLineRasterizationModeEXT lineRasterizationMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 stippledLineEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthClipNegativeOneToOneEXT)(VkCommandBuffer commandBuffer, VkBool32 negativeOneToOne); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingEnableNV)(VkCommandBuffer commandBuffer, VkBool32 viewportWScalingEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportSwizzleNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportSwizzleNV* pViewportSwizzles); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoverageToColorEnableNV)(VkCommandBuffer commandBuffer, VkBool32 coverageToColorEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoverageToColorLocationNV)(VkCommandBuffer commandBuffer, uint32_t coverageToColorLocation); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoverageModulationModeNV)(VkCommandBuffer commandBuffer, VkCoverageModulationModeNV coverageModulationMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoverageModulationTableEnableNV)(VkCommandBuffer commandBuffer, VkBool32 coverageModulationTableEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoverageModulationTableNV)(VkCommandBuffer commandBuffer, uint32_t coverageModulationTableCount, const float* pCoverageModulationTable); +typedef void (VKAPI_PTR *PFN_vkCmdSetShadingRateImageEnableNV)(VkCommandBuffer commandBuffer, VkBool32 shadingRateImageEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetRepresentativeFragmentTestEnableNV)(VkCommandBuffer commandBuffer, VkBool32 representativeFragmentTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoverageReductionModeNV)(VkCommandBuffer commandBuffer, VkCoverageReductionModeNV coverageReductionMode); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthClampEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthClampEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetPolygonModeEXT( + VkCommandBuffer commandBuffer, + VkPolygonMode polygonMode); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizationSamplesEXT( + VkCommandBuffer commandBuffer, + VkSampleCountFlagBits rasterizationSamples); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleMaskEXT( + VkCommandBuffer commandBuffer, + VkSampleCountFlagBits samples, + const VkSampleMask* pSampleMask); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetAlphaToCoverageEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 alphaToCoverageEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetAlphaToOneEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 alphaToOneEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLogicOpEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 logicOpEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetColorBlendEnableEXT( + VkCommandBuffer commandBuffer, + uint32_t firstAttachment, + uint32_t attachmentCount, + const VkBool32* pColorBlendEnables); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetColorBlendEquationEXT( + VkCommandBuffer commandBuffer, + uint32_t firstAttachment, + uint32_t attachmentCount, + const VkColorBlendEquationEXT* pColorBlendEquations); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetColorWriteMaskEXT( + VkCommandBuffer commandBuffer, + uint32_t firstAttachment, + uint32_t attachmentCount, + const VkColorComponentFlags* pColorWriteMasks); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetTessellationDomainOriginEXT( + VkCommandBuffer commandBuffer, + VkTessellationDomainOrigin domainOrigin); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetRasterizationStreamEXT( + VkCommandBuffer commandBuffer, + uint32_t rasterizationStream); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetConservativeRasterizationModeEXT( + VkCommandBuffer commandBuffer, + VkConservativeRasterizationModeEXT conservativeRasterizationMode); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetExtraPrimitiveOverestimationSizeEXT( + VkCommandBuffer commandBuffer, + float extraPrimitiveOverestimationSize); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthClipEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthClipEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 sampleLocationsEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetColorBlendAdvancedEXT( + VkCommandBuffer commandBuffer, + uint32_t firstAttachment, + uint32_t attachmentCount, + const VkColorBlendAdvancedEXT* pColorBlendAdvanced); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetProvokingVertexModeEXT( + VkCommandBuffer commandBuffer, + VkProvokingVertexModeEXT provokingVertexMode); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineRasterizationModeEXT( + VkCommandBuffer commandBuffer, + VkLineRasterizationModeEXT lineRasterizationMode); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 stippledLineEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthClipNegativeOneToOneEXT( + VkCommandBuffer commandBuffer, + VkBool32 negativeOneToOne); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingEnableNV( + VkCommandBuffer commandBuffer, + VkBool32 viewportWScalingEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportSwizzleNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportSwizzleNV* pViewportSwizzles); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoverageToColorEnableNV( + VkCommandBuffer commandBuffer, + VkBool32 coverageToColorEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoverageToColorLocationNV( + VkCommandBuffer commandBuffer, + uint32_t coverageToColorLocation); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoverageModulationModeNV( + VkCommandBuffer commandBuffer, + VkCoverageModulationModeNV coverageModulationMode); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoverageModulationTableEnableNV( + VkCommandBuffer commandBuffer, + VkBool32 coverageModulationTableEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoverageModulationTableNV( + VkCommandBuffer commandBuffer, + uint32_t coverageModulationTableCount, + const float* pCoverageModulationTable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetShadingRateImageEnableNV( + VkCommandBuffer commandBuffer, + VkBool32 shadingRateImageEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetRepresentativeFragmentTestEnableNV( + VkCommandBuffer commandBuffer, + VkBool32 representativeFragmentTestEnable); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoverageReductionModeNV( + VkCommandBuffer commandBuffer, + VkCoverageReductionModeNV coverageReductionMode); +#endif +#endif + + +// VK_EXT_subpass_merge_feedback is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_subpass_merge_feedback 1 +#define VK_EXT_SUBPASS_MERGE_FEEDBACK_SPEC_VERSION 2 +#define VK_EXT_SUBPASS_MERGE_FEEDBACK_EXTENSION_NAME "VK_EXT_subpass_merge_feedback" + +typedef enum VkSubpassMergeStatusEXT { + VK_SUBPASS_MERGE_STATUS_MERGED_EXT = 0, + VK_SUBPASS_MERGE_STATUS_DISALLOWED_EXT = 1, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SIDE_EFFECTS_EXT = 2, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SAMPLES_MISMATCH_EXT = 3, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_VIEWS_MISMATCH_EXT = 4, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_ALIASING_EXT = 5, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPENDENCIES_EXT = 6, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INCOMPATIBLE_INPUT_ATTACHMENT_EXT = 7, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_TOO_MANY_ATTACHMENTS_EXT = 8, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_INSUFFICIENT_STORAGE_EXT = 9, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_DEPTH_STENCIL_COUNT_EXT = 10, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_RESOLVE_ATTACHMENT_REUSE_EXT = 11, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_SINGLE_SUBPASS_EXT = 12, + VK_SUBPASS_MERGE_STATUS_NOT_MERGED_UNSPECIFIED_EXT = 13, + VK_SUBPASS_MERGE_STATUS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSubpassMergeStatusEXT; +typedef struct VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 subpassMergeFeedback; +} VkPhysicalDeviceSubpassMergeFeedbackFeaturesEXT; + +typedef struct VkRenderPassCreationControlEXT { + VkStructureType sType; + const void* pNext; + VkBool32 disallowMerging; +} VkRenderPassCreationControlEXT; + +typedef struct VkRenderPassCreationFeedbackInfoEXT { + uint32_t postMergeSubpassCount; +} VkRenderPassCreationFeedbackInfoEXT; + +typedef struct VkRenderPassCreationFeedbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkRenderPassCreationFeedbackInfoEXT* pRenderPassFeedback; +} VkRenderPassCreationFeedbackCreateInfoEXT; + +typedef struct VkRenderPassSubpassFeedbackInfoEXT { + VkSubpassMergeStatusEXT subpassMergeStatus; + char description[VK_MAX_DESCRIPTION_SIZE]; + uint32_t postMergeIndex; +} VkRenderPassSubpassFeedbackInfoEXT; + +typedef struct VkRenderPassSubpassFeedbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkRenderPassSubpassFeedbackInfoEXT* pSubpassFeedback; +} VkRenderPassSubpassFeedbackCreateInfoEXT; + + + +// VK_LUNARG_direct_driver_loading is a preprocessor guard. Do not pass it to API calls. +#define VK_LUNARG_direct_driver_loading 1 +#define VK_LUNARG_DIRECT_DRIVER_LOADING_SPEC_VERSION 1 +#define VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME "VK_LUNARG_direct_driver_loading" + +typedef enum VkDirectDriverLoadingModeLUNARG { + VK_DIRECT_DRIVER_LOADING_MODE_EXCLUSIVE_LUNARG = 0, + VK_DIRECT_DRIVER_LOADING_MODE_INCLUSIVE_LUNARG = 1, + VK_DIRECT_DRIVER_LOADING_MODE_MAX_ENUM_LUNARG = 0x7FFFFFFF +} VkDirectDriverLoadingModeLUNARG; +typedef VkFlags VkDirectDriverLoadingFlagsLUNARG; +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddrLUNARG)( + VkInstance instance, + const char* pName); + +typedef struct VkDirectDriverLoadingInfoLUNARG { + VkStructureType sType; + void* pNext; + VkDirectDriverLoadingFlagsLUNARG flags; + PFN_vkGetInstanceProcAddrLUNARG pfnGetInstanceProcAddr; +} VkDirectDriverLoadingInfoLUNARG; + +typedef struct VkDirectDriverLoadingListLUNARG { + VkStructureType sType; + const void* pNext; + VkDirectDriverLoadingModeLUNARG mode; + uint32_t driverCount; + const VkDirectDriverLoadingInfoLUNARG* pDrivers; +} VkDirectDriverLoadingListLUNARG; + + + +// VK_ARM_tensors is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_tensors 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkTensorViewARM) +#define VK_ARM_TENSORS_SPEC_VERSION 1 +#define VK_ARM_TENSORS_EXTENSION_NAME "VK_ARM_tensors" + +typedef enum VkTensorTilingARM { + VK_TENSOR_TILING_OPTIMAL_ARM = 0, + VK_TENSOR_TILING_LINEAR_ARM = 1, + VK_TENSOR_TILING_MAX_ENUM_ARM = 0x7FFFFFFF +} VkTensorTilingARM; +typedef VkFlags64 VkTensorCreateFlagsARM; + +// Flag bits for VkTensorCreateFlagBitsARM +typedef VkFlags64 VkTensorCreateFlagBitsARM; +static const VkTensorCreateFlagBitsARM VK_TENSOR_CREATE_MUTABLE_FORMAT_BIT_ARM = 0x00000001ULL; +static const VkTensorCreateFlagBitsARM VK_TENSOR_CREATE_PROTECTED_BIT_ARM = 0x00000002ULL; +static const VkTensorCreateFlagBitsARM VK_TENSOR_CREATE_DESCRIPTOR_HEAP_CAPTURE_REPLAY_BIT_ARM = 0x00000008ULL; +static const VkTensorCreateFlagBitsARM VK_TENSOR_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_ARM = 0x00000004ULL; + +typedef VkFlags64 VkTensorUsageFlagsARM; + +// Flag bits for VkTensorUsageFlagBitsARM +typedef VkFlags64 VkTensorUsageFlagBitsARM; +static const VkTensorUsageFlagBitsARM VK_TENSOR_USAGE_SHADER_BIT_ARM = 0x00000002ULL; +static const VkTensorUsageFlagBitsARM VK_TENSOR_USAGE_TRANSFER_SRC_BIT_ARM = 0x00000004ULL; +static const VkTensorUsageFlagBitsARM VK_TENSOR_USAGE_TRANSFER_DST_BIT_ARM = 0x00000008ULL; +static const VkTensorUsageFlagBitsARM VK_TENSOR_USAGE_IMAGE_ALIASING_BIT_ARM = 0x00000010ULL; +static const VkTensorUsageFlagBitsARM VK_TENSOR_USAGE_DATA_GRAPH_BIT_ARM = 0x00000020ULL; + +typedef struct VkTensorDescriptionARM { + VkStructureType sType; + const void* pNext; + VkTensorTilingARM tiling; + VkFormat format; + uint32_t dimensionCount; + const int64_t* pDimensions; + const int64_t* pStrides; + VkTensorUsageFlagsARM usage; +} VkTensorDescriptionARM; + +typedef struct VkTensorCreateInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorCreateFlagsARM flags; + const VkTensorDescriptionARM* pDescription; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkTensorCreateInfoARM; + +typedef struct VkTensorMemoryRequirementsInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorARM tensor; +} VkTensorMemoryRequirementsInfoARM; + +typedef struct VkBindTensorMemoryInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorARM tensor; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindTensorMemoryInfoARM; + +typedef struct VkWriteDescriptorSetTensorARM { + VkStructureType sType; + const void* pNext; + uint32_t tensorViewCount; + const VkTensorViewARM* pTensorViews; +} VkWriteDescriptorSetTensorARM; + +typedef struct VkTensorFormatPropertiesARM { + VkStructureType sType; + void* pNext; + VkFormatFeatureFlags2 optimalTilingTensorFeatures; + VkFormatFeatureFlags2 linearTilingTensorFeatures; +} VkTensorFormatPropertiesARM; + +typedef struct VkPhysicalDeviceTensorPropertiesARM { + VkStructureType sType; + void* pNext; + uint32_t maxTensorDimensionCount; + uint64_t maxTensorElements; + uint64_t maxPerDimensionTensorElements; + int64_t maxTensorStride; + uint64_t maxTensorSize; + uint32_t maxTensorShaderAccessArrayLength; + uint32_t maxTensorShaderAccessSize; + uint32_t maxDescriptorSetStorageTensors; + uint32_t maxPerStageDescriptorSetStorageTensors; + uint32_t maxDescriptorSetUpdateAfterBindStorageTensors; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageTensors; + VkBool32 shaderStorageTensorArrayNonUniformIndexingNative; + VkShaderStageFlags shaderTensorSupportedStages; +} VkPhysicalDeviceTensorPropertiesARM; + +typedef struct VkTensorMemoryBarrierARM { + VkStructureType sType; + const void* pNext; + VkPipelineStageFlags2 srcStageMask; + VkAccessFlags2 srcAccessMask; + VkPipelineStageFlags2 dstStageMask; + VkAccessFlags2 dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkTensorARM tensor; +} VkTensorMemoryBarrierARM; + +typedef struct VkTensorDependencyInfoARM { + VkStructureType sType; + const void* pNext; + uint32_t tensorMemoryBarrierCount; + const VkTensorMemoryBarrierARM* pTensorMemoryBarriers; +} VkTensorDependencyInfoARM; + +typedef struct VkPhysicalDeviceTensorFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 tensorNonPacked; + VkBool32 shaderTensorAccess; + VkBool32 shaderStorageTensorArrayDynamicIndexing; + VkBool32 shaderStorageTensorArrayNonUniformIndexing; + VkBool32 descriptorBindingStorageTensorUpdateAfterBind; + VkBool32 tensors; +} VkPhysicalDeviceTensorFeaturesARM; + +typedef struct VkDeviceTensorMemoryRequirementsARM { + VkStructureType sType; + const void* pNext; + const VkTensorCreateInfoARM* pCreateInfo; +} VkDeviceTensorMemoryRequirementsARM; + +typedef struct VkTensorCopyARM { + VkStructureType sType; + const void* pNext; + uint32_t dimensionCount; + const uint64_t* pSrcOffset; + const uint64_t* pDstOffset; + const uint64_t* pExtent; +} VkTensorCopyARM; + +typedef struct VkCopyTensorInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorARM srcTensor; + VkTensorARM dstTensor; + uint32_t regionCount; + const VkTensorCopyARM* pRegions; +} VkCopyTensorInfoARM; + +typedef struct VkMemoryDedicatedAllocateInfoTensorARM { + VkStructureType sType; + const void* pNext; + VkTensorARM tensor; +} VkMemoryDedicatedAllocateInfoTensorARM; + +typedef struct VkPhysicalDeviceExternalTensorInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorCreateFlagsARM flags; + const VkTensorDescriptionARM* pDescription; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalTensorInfoARM; + +typedef struct VkExternalTensorPropertiesARM { + VkStructureType sType; + const void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalTensorPropertiesARM; + +typedef struct VkExternalMemoryTensorCreateInfoARM { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryTensorCreateInfoARM; + +typedef struct VkPhysicalDeviceDescriptorBufferTensorFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 descriptorBufferTensorDescriptors; +} VkPhysicalDeviceDescriptorBufferTensorFeaturesARM; + +typedef struct VkPhysicalDeviceDescriptorBufferTensorPropertiesARM { + VkStructureType sType; + void* pNext; + size_t tensorCaptureReplayDescriptorDataSize; + size_t tensorViewCaptureReplayDescriptorDataSize; + size_t tensorDescriptorSize; +} VkPhysicalDeviceDescriptorBufferTensorPropertiesARM; + +typedef struct VkDescriptorGetTensorInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorViewARM tensorView; +} VkDescriptorGetTensorInfoARM; + +typedef struct VkTensorCaptureDescriptorDataInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorARM tensor; +} VkTensorCaptureDescriptorDataInfoARM; + +typedef struct VkTensorViewCaptureDescriptorDataInfoARM { + VkStructureType sType; + const void* pNext; + VkTensorViewARM tensorView; +} VkTensorViewCaptureDescriptorDataInfoARM; + +typedef struct VkFrameBoundaryTensorsARM { + VkStructureType sType; + const void* pNext; + uint32_t tensorCount; + const VkTensorARM* pTensors; +} VkFrameBoundaryTensorsARM; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateTensorARM)(VkDevice device, const VkTensorCreateInfoARM* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkTensorARM* pTensor); +typedef void (VKAPI_PTR *PFN_vkDestroyTensorARM)(VkDevice device, VkTensorARM tensor, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateTensorViewARM)(VkDevice device, const VkTensorViewCreateInfoARM* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkTensorViewARM* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyTensorViewARM)(VkDevice device, VkTensorViewARM tensorView, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetTensorMemoryRequirementsARM)(VkDevice device, const VkTensorMemoryRequirementsInfoARM* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindTensorMemoryARM)(VkDevice device, uint32_t bindInfoCount, const VkBindTensorMemoryInfoARM* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkGetDeviceTensorMemoryRequirementsARM)(VkDevice device, const VkDeviceTensorMemoryRequirementsARM* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdCopyTensorARM)(VkCommandBuffer commandBuffer, const VkCopyTensorInfoARM* pCopyTensorInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalTensorPropertiesARM)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalTensorInfoARM* pExternalTensorInfo, VkExternalTensorPropertiesARM* pExternalTensorProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetTensorOpaqueCaptureDescriptorDataARM)(VkDevice device, const VkTensorCaptureDescriptorDataInfoARM* pInfo, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetTensorViewOpaqueCaptureDescriptorDataARM)(VkDevice device, const VkTensorViewCaptureDescriptorDataInfoARM* pInfo, void* pData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateTensorARM( + VkDevice device, + const VkTensorCreateInfoARM* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkTensorARM* pTensor); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyTensorARM( + VkDevice device, + VkTensorARM tensor, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateTensorViewARM( + VkDevice device, + const VkTensorViewCreateInfoARM* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkTensorViewARM* pView); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyTensorViewARM( + VkDevice device, + VkTensorViewARM tensorView, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetTensorMemoryRequirementsARM( + VkDevice device, + const VkTensorMemoryRequirementsInfoARM* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindTensorMemoryARM( + VkDevice device, + uint32_t bindInfoCount, + const VkBindTensorMemoryInfoARM* pBindInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceTensorMemoryRequirementsARM( + VkDevice device, + const VkDeviceTensorMemoryRequirementsARM* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyTensorARM( + VkCommandBuffer commandBuffer, + const VkCopyTensorInfoARM* pCopyTensorInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalTensorPropertiesARM( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalTensorInfoARM* pExternalTensorInfo, + VkExternalTensorPropertiesARM* pExternalTensorProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetTensorOpaqueCaptureDescriptorDataARM( + VkDevice device, + const VkTensorCaptureDescriptorDataInfoARM* pInfo, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetTensorViewOpaqueCaptureDescriptorDataARM( + VkDevice device, + const VkTensorViewCaptureDescriptorDataInfoARM* pInfo, + void* pData); +#endif +#endif + + +// VK_EXT_shader_module_identifier is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_module_identifier 1 +#define VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT 32U +#define VK_EXT_SHADER_MODULE_IDENTIFIER_SPEC_VERSION 1 +#define VK_EXT_SHADER_MODULE_IDENTIFIER_EXTENSION_NAME "VK_EXT_shader_module_identifier" +typedef struct VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderModuleIdentifier; +} VkPhysicalDeviceShaderModuleIdentifierFeaturesEXT; + +typedef struct VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT { + VkStructureType sType; + void* pNext; + uint8_t shaderModuleIdentifierAlgorithmUUID[VK_UUID_SIZE]; +} VkPhysicalDeviceShaderModuleIdentifierPropertiesEXT; + +typedef struct VkPipelineShaderStageModuleIdentifierCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t identifierSize; + const uint8_t* pIdentifier; +} VkPipelineShaderStageModuleIdentifierCreateInfoEXT; + +typedef struct VkShaderModuleIdentifierEXT { + VkStructureType sType; + void* pNext; + uint32_t identifierSize; + uint8_t identifier[VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT]; +} VkShaderModuleIdentifierEXT; + +typedef void (VKAPI_PTR *PFN_vkGetShaderModuleIdentifierEXT)(VkDevice device, VkShaderModule shaderModule, VkShaderModuleIdentifierEXT* pIdentifier); +typedef void (VKAPI_PTR *PFN_vkGetShaderModuleCreateInfoIdentifierEXT)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, VkShaderModuleIdentifierEXT* pIdentifier); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetShaderModuleIdentifierEXT( + VkDevice device, + VkShaderModule shaderModule, + VkShaderModuleIdentifierEXT* pIdentifier); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetShaderModuleCreateInfoIdentifierEXT( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + VkShaderModuleIdentifierEXT* pIdentifier); +#endif +#endif + + +// VK_EXT_rasterization_order_attachment_access is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_rasterization_order_attachment_access 1 +#define VK_EXT_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_SPEC_VERSION 1 +#define VK_EXT_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME "VK_EXT_rasterization_order_attachment_access" + + +// VK_NV_optical_flow is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_optical_flow 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkOpticalFlowSessionNV) +#define VK_NV_OPTICAL_FLOW_SPEC_VERSION 1 +#define VK_NV_OPTICAL_FLOW_EXTENSION_NAME "VK_NV_optical_flow" + +typedef enum VkOpticalFlowPerformanceLevelNV { + VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_UNKNOWN_NV = 0, + VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_SLOW_NV = 1, + VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_MEDIUM_NV = 2, + VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_FAST_NV = 3, + VK_OPTICAL_FLOW_PERFORMANCE_LEVEL_MAX_ENUM_NV = 0x7FFFFFFF +} VkOpticalFlowPerformanceLevelNV; + +typedef enum VkOpticalFlowSessionBindingPointNV { + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_UNKNOWN_NV = 0, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_INPUT_NV = 1, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_REFERENCE_NV = 2, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_HINT_NV = 3, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_FLOW_VECTOR_NV = 4, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_BACKWARD_FLOW_VECTOR_NV = 5, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_COST_NV = 6, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_BACKWARD_COST_NV = 7, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_GLOBAL_FLOW_NV = 8, + VK_OPTICAL_FLOW_SESSION_BINDING_POINT_MAX_ENUM_NV = 0x7FFFFFFF +} VkOpticalFlowSessionBindingPointNV; + +typedef enum VkOpticalFlowGridSizeFlagBitsNV { + VK_OPTICAL_FLOW_GRID_SIZE_UNKNOWN_NV = 0, + VK_OPTICAL_FLOW_GRID_SIZE_1X1_BIT_NV = 0x00000001, + VK_OPTICAL_FLOW_GRID_SIZE_2X2_BIT_NV = 0x00000002, + VK_OPTICAL_FLOW_GRID_SIZE_4X4_BIT_NV = 0x00000004, + VK_OPTICAL_FLOW_GRID_SIZE_8X8_BIT_NV = 0x00000008, + VK_OPTICAL_FLOW_GRID_SIZE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkOpticalFlowGridSizeFlagBitsNV; +typedef VkFlags VkOpticalFlowGridSizeFlagsNV; + +typedef enum VkOpticalFlowUsageFlagBitsNV { + VK_OPTICAL_FLOW_USAGE_UNKNOWN_NV = 0, + VK_OPTICAL_FLOW_USAGE_INPUT_BIT_NV = 0x00000001, + VK_OPTICAL_FLOW_USAGE_OUTPUT_BIT_NV = 0x00000002, + VK_OPTICAL_FLOW_USAGE_HINT_BIT_NV = 0x00000004, + VK_OPTICAL_FLOW_USAGE_COST_BIT_NV = 0x00000008, + VK_OPTICAL_FLOW_USAGE_GLOBAL_FLOW_BIT_NV = 0x00000010, + VK_OPTICAL_FLOW_USAGE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkOpticalFlowUsageFlagBitsNV; +typedef VkFlags VkOpticalFlowUsageFlagsNV; + +typedef enum VkOpticalFlowSessionCreateFlagBitsNV { + VK_OPTICAL_FLOW_SESSION_CREATE_ENABLE_HINT_BIT_NV = 0x00000001, + VK_OPTICAL_FLOW_SESSION_CREATE_ENABLE_COST_BIT_NV = 0x00000002, + VK_OPTICAL_FLOW_SESSION_CREATE_ENABLE_GLOBAL_FLOW_BIT_NV = 0x00000004, + VK_OPTICAL_FLOW_SESSION_CREATE_ALLOW_REGIONS_BIT_NV = 0x00000008, + VK_OPTICAL_FLOW_SESSION_CREATE_BOTH_DIRECTIONS_BIT_NV = 0x00000010, + VK_OPTICAL_FLOW_SESSION_CREATE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkOpticalFlowSessionCreateFlagBitsNV; +typedef VkFlags VkOpticalFlowSessionCreateFlagsNV; + +typedef enum VkOpticalFlowExecuteFlagBitsNV { + VK_OPTICAL_FLOW_EXECUTE_DISABLE_TEMPORAL_HINTS_BIT_NV = 0x00000001, + VK_OPTICAL_FLOW_EXECUTE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkOpticalFlowExecuteFlagBitsNV; +typedef VkFlags VkOpticalFlowExecuteFlagsNV; +typedef struct VkPhysicalDeviceOpticalFlowFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 opticalFlow; +} VkPhysicalDeviceOpticalFlowFeaturesNV; + +typedef struct VkPhysicalDeviceOpticalFlowPropertiesNV { + VkStructureType sType; + void* pNext; + VkOpticalFlowGridSizeFlagsNV supportedOutputGridSizes; + VkOpticalFlowGridSizeFlagsNV supportedHintGridSizes; + VkBool32 hintSupported; + VkBool32 costSupported; + VkBool32 bidirectionalFlowSupported; + VkBool32 globalFlowSupported; + uint32_t minWidth; + uint32_t minHeight; + uint32_t maxWidth; + uint32_t maxHeight; + uint32_t maxNumRegionsOfInterest; +} VkPhysicalDeviceOpticalFlowPropertiesNV; + +typedef struct VkOpticalFlowImageFormatInfoNV { + VkStructureType sType; + const void* pNext; + VkOpticalFlowUsageFlagsNV usage; +} VkOpticalFlowImageFormatInfoNV; + +typedef struct VkOpticalFlowImageFormatPropertiesNV { + VkStructureType sType; + void* pNext; + VkFormat format; +} VkOpticalFlowImageFormatPropertiesNV; + +typedef struct VkOpticalFlowSessionCreateInfoNV { + VkStructureType sType; + void* pNext; + uint32_t width; + uint32_t height; + VkFormat imageFormat; + VkFormat flowVectorFormat; + VkFormat costFormat; + VkOpticalFlowGridSizeFlagsNV outputGridSize; + VkOpticalFlowGridSizeFlagsNV hintGridSize; + VkOpticalFlowPerformanceLevelNV performanceLevel; + VkOpticalFlowSessionCreateFlagsNV flags; +} VkOpticalFlowSessionCreateInfoNV; + +typedef struct VkOpticalFlowSessionCreatePrivateDataInfoNV { + VkStructureType sType; + void* pNext; + uint32_t id; + uint32_t size; + const void* pPrivateData; +} VkOpticalFlowSessionCreatePrivateDataInfoNV; + +typedef struct VkOpticalFlowExecuteInfoNV { + VkStructureType sType; + void* pNext; + VkOpticalFlowExecuteFlagsNV flags; + uint32_t regionCount; + const VkRect2D* pRegions; +} VkOpticalFlowExecuteInfoNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceOpticalFlowImageFormatsNV)(VkPhysicalDevice physicalDevice, const VkOpticalFlowImageFormatInfoNV* pOpticalFlowImageFormatInfo, uint32_t* pFormatCount, VkOpticalFlowImageFormatPropertiesNV* pImageFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateOpticalFlowSessionNV)(VkDevice device, const VkOpticalFlowSessionCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkOpticalFlowSessionNV* pSession); +typedef void (VKAPI_PTR *PFN_vkDestroyOpticalFlowSessionNV)(VkDevice device, VkOpticalFlowSessionNV session, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkBindOpticalFlowSessionImageNV)(VkDevice device, VkOpticalFlowSessionNV session, VkOpticalFlowSessionBindingPointNV bindingPoint, VkImageView view, VkImageLayout layout); +typedef void (VKAPI_PTR *PFN_vkCmdOpticalFlowExecuteNV)(VkCommandBuffer commandBuffer, VkOpticalFlowSessionNV session, const VkOpticalFlowExecuteInfoNV* pExecuteInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceOpticalFlowImageFormatsNV( + VkPhysicalDevice physicalDevice, + const VkOpticalFlowImageFormatInfoNV* pOpticalFlowImageFormatInfo, + uint32_t* pFormatCount, + VkOpticalFlowImageFormatPropertiesNV* pImageFormatProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateOpticalFlowSessionNV( + VkDevice device, + const VkOpticalFlowSessionCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkOpticalFlowSessionNV* pSession); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyOpticalFlowSessionNV( + VkDevice device, + VkOpticalFlowSessionNV session, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindOpticalFlowSessionImageNV( + VkDevice device, + VkOpticalFlowSessionNV session, + VkOpticalFlowSessionBindingPointNV bindingPoint, + VkImageView view, + VkImageLayout layout); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdOpticalFlowExecuteNV( + VkCommandBuffer commandBuffer, + VkOpticalFlowSessionNV session, + const VkOpticalFlowExecuteInfoNV* pExecuteInfo); +#endif +#endif + + +// VK_EXT_legacy_dithering is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_legacy_dithering 1 +#define VK_EXT_LEGACY_DITHERING_SPEC_VERSION 2 +#define VK_EXT_LEGACY_DITHERING_EXTENSION_NAME "VK_EXT_legacy_dithering" +typedef struct VkPhysicalDeviceLegacyDitheringFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 legacyDithering; +} VkPhysicalDeviceLegacyDitheringFeaturesEXT; + + + +// VK_EXT_pipeline_protected_access is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_pipeline_protected_access 1 +#define VK_EXT_PIPELINE_PROTECTED_ACCESS_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_PROTECTED_ACCESS_EXTENSION_NAME "VK_EXT_pipeline_protected_access" +typedef VkPhysicalDevicePipelineProtectedAccessFeatures VkPhysicalDevicePipelineProtectedAccessFeaturesEXT; + + + +// VK_AMD_anti_lag is a preprocessor guard. Do not pass it to API calls. +#define VK_AMD_anti_lag 1 +#define VK_AMD_ANTI_LAG_SPEC_VERSION 1 +#define VK_AMD_ANTI_LAG_EXTENSION_NAME "VK_AMD_anti_lag" + +typedef enum VkAntiLagModeAMD { + VK_ANTI_LAG_MODE_DRIVER_CONTROL_AMD = 0, + VK_ANTI_LAG_MODE_ON_AMD = 1, + VK_ANTI_LAG_MODE_OFF_AMD = 2, + VK_ANTI_LAG_MODE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkAntiLagModeAMD; + +typedef enum VkAntiLagStageAMD { + VK_ANTI_LAG_STAGE_INPUT_AMD = 0, + VK_ANTI_LAG_STAGE_PRESENT_AMD = 1, + VK_ANTI_LAG_STAGE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkAntiLagStageAMD; +typedef struct VkPhysicalDeviceAntiLagFeaturesAMD { + VkStructureType sType; + void* pNext; + VkBool32 antiLag; +} VkPhysicalDeviceAntiLagFeaturesAMD; + +typedef struct VkAntiLagPresentationInfoAMD { + VkStructureType sType; + void* pNext; + VkAntiLagStageAMD stage; + uint64_t frameIndex; +} VkAntiLagPresentationInfoAMD; + +typedef struct VkAntiLagDataAMD { + VkStructureType sType; + const void* pNext; + VkAntiLagModeAMD mode; + uint32_t maxFPS; + const VkAntiLagPresentationInfoAMD* pPresentationInfo; +} VkAntiLagDataAMD; + +typedef void (VKAPI_PTR *PFN_vkAntiLagUpdateAMD)(VkDevice device, const VkAntiLagDataAMD* pData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkAntiLagUpdateAMD( + VkDevice device, + const VkAntiLagDataAMD* pData); +#endif +#endif + + +// VK_EXT_shader_object is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_object 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderEXT) +#define VK_EXT_SHADER_OBJECT_SPEC_VERSION 1 +#define VK_EXT_SHADER_OBJECT_EXTENSION_NAME "VK_EXT_shader_object" + +typedef enum VkShaderCodeTypeEXT { + VK_SHADER_CODE_TYPE_BINARY_EXT = 0, + VK_SHADER_CODE_TYPE_SPIRV_EXT = 1, + VK_SHADER_CODE_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkShaderCodeTypeEXT; + +typedef enum VkDepthClampModeEXT { + VK_DEPTH_CLAMP_MODE_VIEWPORT_RANGE_EXT = 0, + VK_DEPTH_CLAMP_MODE_USER_DEFINED_RANGE_EXT = 1, + VK_DEPTH_CLAMP_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDepthClampModeEXT; + +typedef enum VkShaderCreateFlagBitsEXT { + VK_SHADER_CREATE_LINK_STAGE_BIT_EXT = 0x00000001, + VK_SHADER_CREATE_DESCRIPTOR_HEAP_BIT_EXT = 0x00000400, + VK_SHADER_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = 0x00000002, + VK_SHADER_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = 0x00000004, + VK_SHADER_CREATE_NO_TASK_SHADER_BIT_EXT = 0x00000008, + VK_SHADER_CREATE_DISPATCH_BASE_BIT_EXT = 0x00000010, + VK_SHADER_CREATE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_EXT = 0x00000020, + VK_SHADER_CREATE_FRAGMENT_DENSITY_MAP_ATTACHMENT_BIT_EXT = 0x00000040, + VK_SHADER_CREATE_INDIRECT_BINDABLE_BIT_EXT = 0x00000080, + VK_SHADER_CREATE_64_BIT_INDEXING_BIT_EXT = 0x00008000, + VK_SHADER_CREATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkShaderCreateFlagBitsEXT; +typedef VkFlags VkShaderCreateFlagsEXT; +typedef struct VkPhysicalDeviceShaderObjectFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderObject; +} VkPhysicalDeviceShaderObjectFeaturesEXT; + +typedef struct VkPhysicalDeviceShaderObjectPropertiesEXT { + VkStructureType sType; + void* pNext; + uint8_t shaderBinaryUUID[VK_UUID_SIZE]; + uint32_t shaderBinaryVersion; +} VkPhysicalDeviceShaderObjectPropertiesEXT; + +typedef struct VkShaderCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkShaderCreateFlagsEXT flags; + VkShaderStageFlagBits stage; + VkShaderStageFlags nextStage; + VkShaderCodeTypeEXT codeType; + size_t codeSize; + const void* pCode; + const char* pName; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; + const VkSpecializationInfo* pSpecializationInfo; +} VkShaderCreateInfoEXT; + +typedef VkPipelineShaderStageRequiredSubgroupSizeCreateInfo VkShaderRequiredSubgroupSizeCreateInfoEXT; + +typedef struct VkDepthClampRangeEXT { + float minDepthClamp; + float maxDepthClamp; +} VkDepthClampRangeEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateShadersEXT)(VkDevice device, uint32_t createInfoCount, const VkShaderCreateInfoEXT* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkShaderEXT* pShaders); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderEXT)(VkDevice device, VkShaderEXT shader, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetShaderBinaryDataEXT)(VkDevice device, VkShaderEXT shader, size_t* pDataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdBindShadersEXT)(VkCommandBuffer commandBuffer, uint32_t stageCount, const VkShaderStageFlagBits* pStages, const VkShaderEXT* pShaders); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthClampRangeEXT)(VkCommandBuffer commandBuffer, VkDepthClampModeEXT depthClampMode, const VkDepthClampRangeEXT* pDepthClampRange); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShadersEXT( + VkDevice device, + uint32_t createInfoCount, + const VkShaderCreateInfoEXT* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkShaderEXT* pShaders); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderEXT( + VkDevice device, + VkShaderEXT shader, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderBinaryDataEXT( + VkDevice device, + VkShaderEXT shader, + size_t* pDataSize, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindShadersEXT( + VkCommandBuffer commandBuffer, + uint32_t stageCount, + const VkShaderStageFlagBits* pStages, + const VkShaderEXT* pShaders); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthClampRangeEXT( + VkCommandBuffer commandBuffer, + VkDepthClampModeEXT depthClampMode, + const VkDepthClampRangeEXT* pDepthClampRange); +#endif +#endif + + +// VK_QCOM_tile_properties is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_tile_properties 1 +#define VK_QCOM_TILE_PROPERTIES_SPEC_VERSION 1 +#define VK_QCOM_TILE_PROPERTIES_EXTENSION_NAME "VK_QCOM_tile_properties" +typedef struct VkPhysicalDeviceTilePropertiesFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 tileProperties; +} VkPhysicalDeviceTilePropertiesFeaturesQCOM; + +typedef struct VkTilePropertiesQCOM { + VkStructureType sType; + void* pNext; + VkExtent3D tileSize; + VkExtent2D apronSize; + VkOffset2D origin; +} VkTilePropertiesQCOM; + +typedef VkResult (VKAPI_PTR *PFN_vkGetFramebufferTilePropertiesQCOM)(VkDevice device, VkFramebuffer framebuffer, uint32_t* pPropertiesCount, VkTilePropertiesQCOM* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDynamicRenderingTilePropertiesQCOM)(VkDevice device, const VkRenderingInfo* pRenderingInfo, VkTilePropertiesQCOM* pProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetFramebufferTilePropertiesQCOM( + VkDevice device, + VkFramebuffer framebuffer, + uint32_t* pPropertiesCount, + VkTilePropertiesQCOM* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDynamicRenderingTilePropertiesQCOM( + VkDevice device, + const VkRenderingInfo* pRenderingInfo, + VkTilePropertiesQCOM* pProperties); +#endif +#endif + + +// VK_SEC_amigo_profiling is a preprocessor guard. Do not pass it to API calls. +#define VK_SEC_amigo_profiling 1 +#define VK_SEC_AMIGO_PROFILING_SPEC_VERSION 1 +#define VK_SEC_AMIGO_PROFILING_EXTENSION_NAME "VK_SEC_amigo_profiling" +typedef struct VkPhysicalDeviceAmigoProfilingFeaturesSEC { + VkStructureType sType; + void* pNext; + VkBool32 amigoProfiling; +} VkPhysicalDeviceAmigoProfilingFeaturesSEC; + +typedef struct VkAmigoProfilingSubmitInfoSEC { + VkStructureType sType; + const void* pNext; + uint64_t firstDrawTimestamp; + uint64_t swapBufferTimestamp; +} VkAmigoProfilingSubmitInfoSEC; + + + +// VK_QCOM_multiview_per_view_viewports is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_multiview_per_view_viewports 1 +#define VK_QCOM_MULTIVIEW_PER_VIEW_VIEWPORTS_SPEC_VERSION 1 +#define VK_QCOM_MULTIVIEW_PER_VIEW_VIEWPORTS_EXTENSION_NAME "VK_QCOM_multiview_per_view_viewports" +typedef struct VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 multiviewPerViewViewports; +} VkPhysicalDeviceMultiviewPerViewViewportsFeaturesQCOM; + + + +// VK_NV_ray_tracing_invocation_reorder is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_ray_tracing_invocation_reorder 1 +#define VK_NV_RAY_TRACING_INVOCATION_REORDER_SPEC_VERSION 1 +#define VK_NV_RAY_TRACING_INVOCATION_REORDER_EXTENSION_NAME "VK_NV_ray_tracing_invocation_reorder" + +typedef enum VkRayTracingInvocationReorderModeEXT { + VK_RAY_TRACING_INVOCATION_REORDER_MODE_NONE_EXT = 0, + VK_RAY_TRACING_INVOCATION_REORDER_MODE_REORDER_EXT = 1, + VK_RAY_TRACING_INVOCATION_REORDER_MODE_NONE_NV = VK_RAY_TRACING_INVOCATION_REORDER_MODE_NONE_EXT, + VK_RAY_TRACING_INVOCATION_REORDER_MODE_REORDER_NV = VK_RAY_TRACING_INVOCATION_REORDER_MODE_REORDER_EXT, + VK_RAY_TRACING_INVOCATION_REORDER_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkRayTracingInvocationReorderModeEXT; +typedef VkRayTracingInvocationReorderModeEXT VkRayTracingInvocationReorderModeNV; + +typedef struct VkPhysicalDeviceRayTracingInvocationReorderPropertiesNV { + VkStructureType sType; + void* pNext; + VkRayTracingInvocationReorderModeEXT rayTracingInvocationReorderReorderingHint; +} VkPhysicalDeviceRayTracingInvocationReorderPropertiesNV; + +typedef struct VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingInvocationReorder; +} VkPhysicalDeviceRayTracingInvocationReorderFeaturesNV; + + + +// VK_NV_cooperative_vector is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_cooperative_vector 1 +#define VK_NV_COOPERATIVE_VECTOR_SPEC_VERSION 4 +#define VK_NV_COOPERATIVE_VECTOR_EXTENSION_NAME "VK_NV_cooperative_vector" + +typedef enum VkCooperativeVectorMatrixLayoutNV { + VK_COOPERATIVE_VECTOR_MATRIX_LAYOUT_ROW_MAJOR_NV = 0, + VK_COOPERATIVE_VECTOR_MATRIX_LAYOUT_COLUMN_MAJOR_NV = 1, + VK_COOPERATIVE_VECTOR_MATRIX_LAYOUT_INFERENCING_OPTIMAL_NV = 2, + VK_COOPERATIVE_VECTOR_MATRIX_LAYOUT_TRAINING_OPTIMAL_NV = 3, + VK_COOPERATIVE_VECTOR_MATRIX_LAYOUT_MAX_ENUM_NV = 0x7FFFFFFF +} VkCooperativeVectorMatrixLayoutNV; +typedef struct VkPhysicalDeviceCooperativeVectorPropertiesNV { + VkStructureType sType; + void* pNext; + VkShaderStageFlags cooperativeVectorSupportedStages; + VkBool32 cooperativeVectorTrainingFloat16Accumulation; + VkBool32 cooperativeVectorTrainingFloat32Accumulation; + uint32_t maxCooperativeVectorComponents; +} VkPhysicalDeviceCooperativeVectorPropertiesNV; + +typedef struct VkPhysicalDeviceCooperativeVectorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cooperativeVector; + VkBool32 cooperativeVectorTraining; +} VkPhysicalDeviceCooperativeVectorFeaturesNV; + +typedef struct VkCooperativeVectorPropertiesNV { + VkStructureType sType; + void* pNext; + VkComponentTypeKHR inputType; + VkComponentTypeKHR inputInterpretation; + VkComponentTypeKHR matrixInterpretation; + VkComponentTypeKHR biasInterpretation; + VkComponentTypeKHR resultType; + VkBool32 transpose; +} VkCooperativeVectorPropertiesNV; + +typedef struct VkConvertCooperativeVectorMatrixInfoNV { + VkStructureType sType; + const void* pNext; + size_t srcSize; + VkDeviceOrHostAddressConstKHR srcData; + size_t* pDstSize; + VkDeviceOrHostAddressKHR dstData; + VkComponentTypeKHR srcComponentType; + VkComponentTypeKHR dstComponentType; + uint32_t numRows; + uint32_t numColumns; + VkCooperativeVectorMatrixLayoutNV srcLayout; + size_t srcStride; + VkCooperativeVectorMatrixLayoutNV dstLayout; + size_t dstStride; +} VkConvertCooperativeVectorMatrixInfoNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeVectorPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeVectorPropertiesNV* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkConvertCooperativeVectorMatrixNV)(VkDevice device, const VkConvertCooperativeVectorMatrixInfoNV* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdConvertCooperativeVectorMatrixNV)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkConvertCooperativeVectorMatrixInfoNV* pInfos); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeVectorPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeVectorPropertiesNV* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkConvertCooperativeVectorMatrixNV( + VkDevice device, + const VkConvertCooperativeVectorMatrixInfoNV* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdConvertCooperativeVectorMatrixNV( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkConvertCooperativeVectorMatrixInfoNV* pInfos); +#endif +#endif + + +// VK_NV_extended_sparse_address_space is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_extended_sparse_address_space 1 +#define VK_NV_EXTENDED_SPARSE_ADDRESS_SPACE_SPEC_VERSION 1 +#define VK_NV_EXTENDED_SPARSE_ADDRESS_SPACE_EXTENSION_NAME "VK_NV_extended_sparse_address_space" +typedef struct VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 extendedSparseAddressSpace; +} VkPhysicalDeviceExtendedSparseAddressSpaceFeaturesNV; + +typedef struct VkPhysicalDeviceExtendedSparseAddressSpacePropertiesNV { + VkStructureType sType; + void* pNext; + VkDeviceSize extendedSparseAddressSpaceSize; + VkImageUsageFlags extendedSparseImageUsageFlags; + VkBufferUsageFlags extendedSparseBufferUsageFlags; +} VkPhysicalDeviceExtendedSparseAddressSpacePropertiesNV; + + + +// VK_EXT_mutable_descriptor_type is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_mutable_descriptor_type 1 +#define VK_EXT_MUTABLE_DESCRIPTOR_TYPE_SPEC_VERSION 1 +#define VK_EXT_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME "VK_EXT_mutable_descriptor_type" + + +// VK_EXT_legacy_vertex_attributes is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_legacy_vertex_attributes 1 +#define VK_EXT_LEGACY_VERTEX_ATTRIBUTES_SPEC_VERSION 1 +#define VK_EXT_LEGACY_VERTEX_ATTRIBUTES_EXTENSION_NAME "VK_EXT_legacy_vertex_attributes" +typedef struct VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 legacyVertexAttributes; +} VkPhysicalDeviceLegacyVertexAttributesFeaturesEXT; + +typedef struct VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 nativeUnalignedPerformance; +} VkPhysicalDeviceLegacyVertexAttributesPropertiesEXT; + + + +// VK_EXT_layer_settings is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_layer_settings 1 +#define VK_EXT_LAYER_SETTINGS_SPEC_VERSION 2 +#define VK_EXT_LAYER_SETTINGS_EXTENSION_NAME "VK_EXT_layer_settings" + +typedef enum VkLayerSettingTypeEXT { + VK_LAYER_SETTING_TYPE_BOOL32_EXT = 0, + VK_LAYER_SETTING_TYPE_INT32_EXT = 1, + VK_LAYER_SETTING_TYPE_INT64_EXT = 2, + VK_LAYER_SETTING_TYPE_UINT32_EXT = 3, + VK_LAYER_SETTING_TYPE_UINT64_EXT = 4, + VK_LAYER_SETTING_TYPE_FLOAT32_EXT = 5, + VK_LAYER_SETTING_TYPE_FLOAT64_EXT = 6, + VK_LAYER_SETTING_TYPE_STRING_EXT = 7, + VK_LAYER_SETTING_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkLayerSettingTypeEXT; +typedef struct VkLayerSettingEXT { + const char* pLayerName; + const char* pSettingName; + VkLayerSettingTypeEXT type; + uint32_t valueCount; + const void* pValues; +} VkLayerSettingEXT; + +typedef struct VkLayerSettingsCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t settingCount; + const VkLayerSettingEXT* pSettings; +} VkLayerSettingsCreateInfoEXT; + + + +// VK_ARM_shader_core_builtins is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_shader_core_builtins 1 +#define VK_ARM_SHADER_CORE_BUILTINS_SPEC_VERSION 2 +#define VK_ARM_SHADER_CORE_BUILTINS_EXTENSION_NAME "VK_ARM_shader_core_builtins" +typedef struct VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 shaderCoreBuiltins; +} VkPhysicalDeviceShaderCoreBuiltinsFeaturesARM; + +typedef struct VkPhysicalDeviceShaderCoreBuiltinsPropertiesARM { + VkStructureType sType; + void* pNext; + uint64_t shaderCoreMask; + uint32_t shaderCoreCount; + uint32_t shaderWarpsPerCore; +} VkPhysicalDeviceShaderCoreBuiltinsPropertiesARM; + + + +// VK_EXT_pipeline_library_group_handles is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_pipeline_library_group_handles 1 +#define VK_EXT_PIPELINE_LIBRARY_GROUP_HANDLES_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_LIBRARY_GROUP_HANDLES_EXTENSION_NAME "VK_EXT_pipeline_library_group_handles" +typedef struct VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 pipelineLibraryGroupHandles; +} VkPhysicalDevicePipelineLibraryGroupHandlesFeaturesEXT; + + + +// VK_EXT_dynamic_rendering_unused_attachments is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_dynamic_rendering_unused_attachments 1 +#define VK_EXT_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_SPEC_VERSION 1 +#define VK_EXT_DYNAMIC_RENDERING_UNUSED_ATTACHMENTS_EXTENSION_NAME "VK_EXT_dynamic_rendering_unused_attachments" +typedef struct VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 dynamicRenderingUnusedAttachments; +} VkPhysicalDeviceDynamicRenderingUnusedAttachmentsFeaturesEXT; + + + +// VK_NV_low_latency2 is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_low_latency2 1 +#define VK_NV_LOW_LATENCY_2_SPEC_VERSION 2 +#define VK_NV_LOW_LATENCY_2_EXTENSION_NAME "VK_NV_low_latency2" + +typedef enum VkLatencyMarkerNV { + VK_LATENCY_MARKER_SIMULATION_START_NV = 0, + VK_LATENCY_MARKER_SIMULATION_END_NV = 1, + VK_LATENCY_MARKER_RENDERSUBMIT_START_NV = 2, + VK_LATENCY_MARKER_RENDERSUBMIT_END_NV = 3, + VK_LATENCY_MARKER_PRESENT_START_NV = 4, + VK_LATENCY_MARKER_PRESENT_END_NV = 5, + VK_LATENCY_MARKER_INPUT_SAMPLE_NV = 6, + VK_LATENCY_MARKER_TRIGGER_FLASH_NV = 7, + VK_LATENCY_MARKER_OUT_OF_BAND_RENDERSUBMIT_START_NV = 8, + VK_LATENCY_MARKER_OUT_OF_BAND_RENDERSUBMIT_END_NV = 9, + VK_LATENCY_MARKER_OUT_OF_BAND_PRESENT_START_NV = 10, + VK_LATENCY_MARKER_OUT_OF_BAND_PRESENT_END_NV = 11, + VK_LATENCY_MARKER_MAX_ENUM_NV = 0x7FFFFFFF +} VkLatencyMarkerNV; + +typedef enum VkOutOfBandQueueTypeNV { + VK_OUT_OF_BAND_QUEUE_TYPE_RENDER_NV = 0, + VK_OUT_OF_BAND_QUEUE_TYPE_PRESENT_NV = 1, + VK_OUT_OF_BAND_QUEUE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkOutOfBandQueueTypeNV; +typedef struct VkLatencySleepModeInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 lowLatencyMode; + VkBool32 lowLatencyBoost; + uint32_t minimumIntervalUs; +} VkLatencySleepModeInfoNV; + +typedef struct VkLatencySleepInfoNV { + VkStructureType sType; + const void* pNext; + VkSemaphore signalSemaphore; + uint64_t value; +} VkLatencySleepInfoNV; + +typedef struct VkSetLatencyMarkerInfoNV { + VkStructureType sType; + const void* pNext; + uint64_t presentID; + VkLatencyMarkerNV marker; +} VkSetLatencyMarkerInfoNV; + +typedef struct VkLatencyTimingsFrameReportNV { + VkStructureType sType; + void* pNext; + uint64_t presentID; + uint64_t inputSampleTimeUs; + uint64_t simStartTimeUs; + uint64_t simEndTimeUs; + uint64_t renderSubmitStartTimeUs; + uint64_t renderSubmitEndTimeUs; + uint64_t presentStartTimeUs; + uint64_t presentEndTimeUs; + uint64_t driverStartTimeUs; + uint64_t driverEndTimeUs; + uint64_t osRenderQueueStartTimeUs; + uint64_t osRenderQueueEndTimeUs; + uint64_t gpuRenderStartTimeUs; + uint64_t gpuRenderEndTimeUs; +} VkLatencyTimingsFrameReportNV; + +typedef struct VkGetLatencyMarkerInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t timingCount; + VkLatencyTimingsFrameReportNV* pTimings; +} VkGetLatencyMarkerInfoNV; + +typedef struct VkLatencySubmissionPresentIdNV { + VkStructureType sType; + const void* pNext; + uint64_t presentID; +} VkLatencySubmissionPresentIdNV; + +typedef struct VkSwapchainLatencyCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 latencyModeEnable; +} VkSwapchainLatencyCreateInfoNV; + +typedef struct VkOutOfBandQueueTypeInfoNV { + VkStructureType sType; + const void* pNext; + VkOutOfBandQueueTypeNV queueType; +} VkOutOfBandQueueTypeInfoNV; + +typedef struct VkLatencySurfaceCapabilitiesNV { + VkStructureType sType; + const void* pNext; + uint32_t presentModeCount; + VkPresentModeKHR* pPresentModes; +} VkLatencySurfaceCapabilitiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkSetLatencySleepModeNV)(VkDevice device, VkSwapchainKHR swapchain, const VkLatencySleepModeInfoNV* pSleepModeInfo); +typedef VkResult (VKAPI_PTR *PFN_vkLatencySleepNV)(VkDevice device, VkSwapchainKHR swapchain, const VkLatencySleepInfoNV* pSleepInfo); +typedef void (VKAPI_PTR *PFN_vkSetLatencyMarkerNV)(VkDevice device, VkSwapchainKHR swapchain, const VkSetLatencyMarkerInfoNV* pLatencyMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkGetLatencyTimingsNV)(VkDevice device, VkSwapchainKHR swapchain, VkGetLatencyMarkerInfoNV* pLatencyMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkQueueNotifyOutOfBandNV)(VkQueue queue, const VkOutOfBandQueueTypeInfoNV* pQueueTypeInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetLatencySleepModeNV( + VkDevice device, + VkSwapchainKHR swapchain, + const VkLatencySleepModeInfoNV* pSleepModeInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkLatencySleepNV( + VkDevice device, + VkSwapchainKHR swapchain, + const VkLatencySleepInfoNV* pSleepInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetLatencyMarkerNV( + VkDevice device, + VkSwapchainKHR swapchain, + const VkSetLatencyMarkerInfoNV* pLatencyMarkerInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetLatencyTimingsNV( + VkDevice device, + VkSwapchainKHR swapchain, + VkGetLatencyMarkerInfoNV* pLatencyMarkerInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkQueueNotifyOutOfBandNV( + VkQueue queue, + const VkOutOfBandQueueTypeInfoNV* pQueueTypeInfo); +#endif +#endif + + +// VK_ARM_data_graph is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_data_graph 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDataGraphPipelineSessionARM) +#define VK_MAX_PHYSICAL_DEVICE_DATA_GRAPH_OPERATION_SET_NAME_SIZE_ARM 128U +#define VK_ARM_DATA_GRAPH_SPEC_VERSION 1 +#define VK_ARM_DATA_GRAPH_EXTENSION_NAME "VK_ARM_data_graph" + +typedef enum VkDataGraphPipelineSessionBindPointARM { + VK_DATA_GRAPH_PIPELINE_SESSION_BIND_POINT_TRANSIENT_ARM = 0, + VK_DATA_GRAPH_PIPELINE_SESSION_BIND_POINT_MAX_ENUM_ARM = 0x7FFFFFFF +} VkDataGraphPipelineSessionBindPointARM; + +typedef enum VkDataGraphPipelineSessionBindPointTypeARM { + VK_DATA_GRAPH_PIPELINE_SESSION_BIND_POINT_TYPE_MEMORY_ARM = 0, + VK_DATA_GRAPH_PIPELINE_SESSION_BIND_POINT_TYPE_MAX_ENUM_ARM = 0x7FFFFFFF +} VkDataGraphPipelineSessionBindPointTypeARM; + +typedef enum VkDataGraphPipelinePropertyARM { + VK_DATA_GRAPH_PIPELINE_PROPERTY_CREATION_LOG_ARM = 0, + VK_DATA_GRAPH_PIPELINE_PROPERTY_IDENTIFIER_ARM = 1, + VK_DATA_GRAPH_PIPELINE_PROPERTY_MAX_ENUM_ARM = 0x7FFFFFFF +} VkDataGraphPipelinePropertyARM; + +typedef enum VkPhysicalDeviceDataGraphProcessingEngineTypeARM { + VK_PHYSICAL_DEVICE_DATA_GRAPH_PROCESSING_ENGINE_TYPE_DEFAULT_ARM = 0, + VK_PHYSICAL_DEVICE_DATA_GRAPH_PROCESSING_ENGINE_TYPE_NEURAL_QCOM = 1000629000, + VK_PHYSICAL_DEVICE_DATA_GRAPH_PROCESSING_ENGINE_TYPE_COMPUTE_QCOM = 1000629001, + VK_PHYSICAL_DEVICE_DATA_GRAPH_PROCESSING_ENGINE_TYPE_MAX_ENUM_ARM = 0x7FFFFFFF +} VkPhysicalDeviceDataGraphProcessingEngineTypeARM; + +typedef enum VkPhysicalDeviceDataGraphOperationTypeARM { + VK_PHYSICAL_DEVICE_DATA_GRAPH_OPERATION_TYPE_SPIRV_EXTENDED_INSTRUCTION_SET_ARM = 0, + VK_PHYSICAL_DEVICE_DATA_GRAPH_OPERATION_TYPE_NEURAL_MODEL_QCOM = 1000629000, + VK_PHYSICAL_DEVICE_DATA_GRAPH_OPERATION_TYPE_BUILTIN_MODEL_QCOM = 1000629001, + VK_PHYSICAL_DEVICE_DATA_GRAPH_OPERATION_TYPE_MAX_ENUM_ARM = 0x7FFFFFFF +} VkPhysicalDeviceDataGraphOperationTypeARM; +typedef VkFlags64 VkDataGraphPipelineSessionCreateFlagsARM; + +// Flag bits for VkDataGraphPipelineSessionCreateFlagBitsARM +typedef VkFlags64 VkDataGraphPipelineSessionCreateFlagBitsARM; +static const VkDataGraphPipelineSessionCreateFlagBitsARM VK_DATA_GRAPH_PIPELINE_SESSION_CREATE_PROTECTED_BIT_ARM = 0x00000001ULL; + +typedef VkFlags64 VkDataGraphPipelineDispatchFlagsARM; + +// Flag bits for VkDataGraphPipelineDispatchFlagBitsARM +typedef VkFlags64 VkDataGraphPipelineDispatchFlagBitsARM; + +typedef struct VkPhysicalDeviceDataGraphFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 dataGraph; + VkBool32 dataGraphUpdateAfterBind; + VkBool32 dataGraphSpecializationConstants; + VkBool32 dataGraphDescriptorBuffer; + VkBool32 dataGraphShaderModule; +} VkPhysicalDeviceDataGraphFeaturesARM; + +typedef struct VkDataGraphPipelineConstantARM { + VkStructureType sType; + const void* pNext; + uint32_t id; + const void* pConstantData; +} VkDataGraphPipelineConstantARM; + +typedef struct VkDataGraphPipelineResourceInfoARM { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSet; + uint32_t binding; + uint32_t arrayElement; +} VkDataGraphPipelineResourceInfoARM; + +typedef struct VkDataGraphPipelineCompilerControlCreateInfoARM { + VkStructureType sType; + const void* pNext; + const char* pVendorOptions; +} VkDataGraphPipelineCompilerControlCreateInfoARM; + +typedef struct VkDataGraphPipelineCreateInfoARM { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags2KHR flags; + VkPipelineLayout layout; + uint32_t resourceInfoCount; + const VkDataGraphPipelineResourceInfoARM* pResourceInfos; +} VkDataGraphPipelineCreateInfoARM; + +typedef struct VkDataGraphPipelineShaderModuleCreateInfoARM { + VkStructureType sType; + const void* pNext; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; + uint32_t constantCount; + const VkDataGraphPipelineConstantARM* pConstants; +} VkDataGraphPipelineShaderModuleCreateInfoARM; + +typedef struct VkDataGraphPipelineSessionCreateInfoARM { + VkStructureType sType; + const void* pNext; + VkDataGraphPipelineSessionCreateFlagsARM flags; + VkPipeline dataGraphPipeline; +} VkDataGraphPipelineSessionCreateInfoARM; + +typedef struct VkDataGraphPipelineSessionBindPointRequirementsInfoARM { + VkStructureType sType; + const void* pNext; + VkDataGraphPipelineSessionARM session; +} VkDataGraphPipelineSessionBindPointRequirementsInfoARM; + +typedef struct VkDataGraphPipelineSessionBindPointRequirementARM { + VkStructureType sType; + void* pNext; + VkDataGraphPipelineSessionBindPointARM bindPoint; + VkDataGraphPipelineSessionBindPointTypeARM bindPointType; + uint32_t numObjects; +} VkDataGraphPipelineSessionBindPointRequirementARM; + +typedef struct VkDataGraphPipelineSessionMemoryRequirementsInfoARM { + VkStructureType sType; + const void* pNext; + VkDataGraphPipelineSessionARM session; + VkDataGraphPipelineSessionBindPointARM bindPoint; + uint32_t objectIndex; +} VkDataGraphPipelineSessionMemoryRequirementsInfoARM; + +typedef struct VkBindDataGraphPipelineSessionMemoryInfoARM { + VkStructureType sType; + const void* pNext; + VkDataGraphPipelineSessionARM session; + VkDataGraphPipelineSessionBindPointARM bindPoint; + uint32_t objectIndex; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindDataGraphPipelineSessionMemoryInfoARM; + +typedef struct VkDataGraphPipelineInfoARM { + VkStructureType sType; + const void* pNext; + VkPipeline dataGraphPipeline; +} VkDataGraphPipelineInfoARM; + +typedef struct VkDataGraphPipelinePropertyQueryResultARM { + VkStructureType sType; + void* pNext; + VkDataGraphPipelinePropertyARM property; + VkBool32 isText; + size_t dataSize; + void* pData; +} VkDataGraphPipelinePropertyQueryResultARM; + +typedef struct VkDataGraphPipelineIdentifierCreateInfoARM { + VkStructureType sType; + const void* pNext; + uint32_t identifierSize; + const uint8_t* pIdentifier; +} VkDataGraphPipelineIdentifierCreateInfoARM; + +typedef struct VkDataGraphPipelineDispatchInfoARM { + VkStructureType sType; + void* pNext; + VkDataGraphPipelineDispatchFlagsARM flags; +} VkDataGraphPipelineDispatchInfoARM; + +typedef struct VkPhysicalDeviceDataGraphProcessingEngineARM { + VkPhysicalDeviceDataGraphProcessingEngineTypeARM type; + VkBool32 isForeign; +} VkPhysicalDeviceDataGraphProcessingEngineARM; + +typedef struct VkPhysicalDeviceDataGraphOperationSupportARM { + VkPhysicalDeviceDataGraphOperationTypeARM operationType; + char name[VK_MAX_PHYSICAL_DEVICE_DATA_GRAPH_OPERATION_SET_NAME_SIZE_ARM]; + uint32_t version; +} VkPhysicalDeviceDataGraphOperationSupportARM; + +typedef struct VkQueueFamilyDataGraphPropertiesARM { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceDataGraphProcessingEngineARM engine; + VkPhysicalDeviceDataGraphOperationSupportARM operation; +} VkQueueFamilyDataGraphPropertiesARM; + +typedef struct VkDataGraphProcessingEngineCreateInfoARM { + VkStructureType sType; + const void* pNext; + uint32_t processingEngineCount; + VkPhysicalDeviceDataGraphProcessingEngineARM* pProcessingEngines; +} VkDataGraphProcessingEngineCreateInfoARM; + +typedef struct VkPhysicalDeviceQueueFamilyDataGraphProcessingEngineInfoARM { + VkStructureType sType; + const void* pNext; + uint32_t queueFamilyIndex; + VkPhysicalDeviceDataGraphProcessingEngineTypeARM engineType; +} VkPhysicalDeviceQueueFamilyDataGraphProcessingEngineInfoARM; + +typedef struct VkQueueFamilyDataGraphProcessingEnginePropertiesARM { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlags foreignSemaphoreHandleTypes; + VkExternalMemoryHandleTypeFlags foreignMemoryHandleTypes; +} VkQueueFamilyDataGraphProcessingEnginePropertiesARM; + +typedef struct VkDataGraphPipelineConstantTensorSemiStructuredSparsityInfoARM { + VkStructureType sType; + const void* pNext; + uint32_t dimension; + uint32_t zeroCount; + uint32_t groupSize; +} VkDataGraphPipelineConstantTensorSemiStructuredSparsityInfoARM; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDataGraphPipelinesARM)(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkDataGraphPipelineCreateInfoARM* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDataGraphPipelineSessionARM)(VkDevice device, const VkDataGraphPipelineSessionCreateInfoARM* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDataGraphPipelineSessionARM* pSession); +typedef VkResult (VKAPI_PTR *PFN_vkGetDataGraphPipelineSessionBindPointRequirementsARM)(VkDevice device, const VkDataGraphPipelineSessionBindPointRequirementsInfoARM* pInfo, uint32_t* pBindPointRequirementCount, VkDataGraphPipelineSessionBindPointRequirementARM* pBindPointRequirements); +typedef void (VKAPI_PTR *PFN_vkGetDataGraphPipelineSessionMemoryRequirementsARM)(VkDevice device, const VkDataGraphPipelineSessionMemoryRequirementsInfoARM* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindDataGraphPipelineSessionMemoryARM)(VkDevice device, uint32_t bindInfoCount, const VkBindDataGraphPipelineSessionMemoryInfoARM* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkDestroyDataGraphPipelineSessionARM)(VkDevice device, VkDataGraphPipelineSessionARM session, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchDataGraphARM)(VkCommandBuffer commandBuffer, VkDataGraphPipelineSessionARM session, const VkDataGraphPipelineDispatchInfoARM* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetDataGraphPipelineAvailablePropertiesARM)(VkDevice device, const VkDataGraphPipelineInfoARM* pPipelineInfo, uint32_t* pPropertiesCount, VkDataGraphPipelinePropertyARM* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDataGraphPipelinePropertiesARM)(VkDevice device, const VkDataGraphPipelineInfoARM* pPipelineInfo, uint32_t propertiesCount, VkDataGraphPipelinePropertyQueryResultARM* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pQueueFamilyDataGraphPropertyCount, VkQueueFamilyDataGraphPropertiesARM* pQueueFamilyDataGraphProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceQueueFamilyDataGraphProcessingEngineInfoARM* pQueueFamilyDataGraphProcessingEngineInfo, VkQueueFamilyDataGraphProcessingEnginePropertiesARM* pQueueFamilyDataGraphProcessingEngineProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDataGraphPipelinesARM( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkDataGraphPipelineCreateInfoARM* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDataGraphPipelineSessionARM( + VkDevice device, + const VkDataGraphPipelineSessionCreateInfoARM* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDataGraphPipelineSessionARM* pSession); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDataGraphPipelineSessionBindPointRequirementsARM( + VkDevice device, + const VkDataGraphPipelineSessionBindPointRequirementsInfoARM* pInfo, + uint32_t* pBindPointRequirementCount, + VkDataGraphPipelineSessionBindPointRequirementARM* pBindPointRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDataGraphPipelineSessionMemoryRequirementsARM( + VkDevice device, + const VkDataGraphPipelineSessionMemoryRequirementsInfoARM* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindDataGraphPipelineSessionMemoryARM( + VkDevice device, + uint32_t bindInfoCount, + const VkBindDataGraphPipelineSessionMemoryInfoARM* pBindInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyDataGraphPipelineSessionARM( + VkDevice device, + VkDataGraphPipelineSessionARM session, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchDataGraphARM( + VkCommandBuffer commandBuffer, + VkDataGraphPipelineSessionARM session, + const VkDataGraphPipelineDispatchInfoARM* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDataGraphPipelineAvailablePropertiesARM( + VkDevice device, + const VkDataGraphPipelineInfoARM* pPipelineInfo, + uint32_t* pPropertiesCount, + VkDataGraphPipelinePropertyARM* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetDataGraphPipelinePropertiesARM( + VkDevice device, + const VkDataGraphPipelineInfoARM* pPipelineInfo, + uint32_t propertiesCount, + VkDataGraphPipelinePropertyQueryResultARM* pProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceQueueFamilyDataGraphPropertiesARM( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pQueueFamilyDataGraphPropertyCount, + VkQueueFamilyDataGraphPropertiesARM* pQueueFamilyDataGraphProperties); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyDataGraphProcessingEnginePropertiesARM( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceQueueFamilyDataGraphProcessingEngineInfoARM* pQueueFamilyDataGraphProcessingEngineInfo, + VkQueueFamilyDataGraphProcessingEnginePropertiesARM* pQueueFamilyDataGraphProcessingEngineProperties); +#endif +#endif + + +// VK_QCOM_multiview_per_view_render_areas is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_multiview_per_view_render_areas 1 +#define VK_QCOM_MULTIVIEW_PER_VIEW_RENDER_AREAS_SPEC_VERSION 1 +#define VK_QCOM_MULTIVIEW_PER_VIEW_RENDER_AREAS_EXTENSION_NAME "VK_QCOM_multiview_per_view_render_areas" +typedef struct VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 multiviewPerViewRenderAreas; +} VkPhysicalDeviceMultiviewPerViewRenderAreasFeaturesQCOM; + +typedef struct VkMultiviewPerViewRenderAreasRenderPassBeginInfoQCOM { + VkStructureType sType; + const void* pNext; + uint32_t perViewRenderAreaCount; + const VkRect2D* pPerViewRenderAreas; +} VkMultiviewPerViewRenderAreasRenderPassBeginInfoQCOM; + + + +// VK_NV_per_stage_descriptor_set is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_per_stage_descriptor_set 1 +#define VK_NV_PER_STAGE_DESCRIPTOR_SET_SPEC_VERSION 1 +#define VK_NV_PER_STAGE_DESCRIPTOR_SET_EXTENSION_NAME "VK_NV_per_stage_descriptor_set" +typedef struct VkPhysicalDevicePerStageDescriptorSetFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 perStageDescriptorSet; + VkBool32 dynamicPipelineLayout; +} VkPhysicalDevicePerStageDescriptorSetFeaturesNV; + + + +// VK_QCOM_image_processing2 is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_image_processing2 1 +#define VK_QCOM_IMAGE_PROCESSING_2_SPEC_VERSION 1 +#define VK_QCOM_IMAGE_PROCESSING_2_EXTENSION_NAME "VK_QCOM_image_processing2" + +typedef enum VkBlockMatchWindowCompareModeQCOM { + VK_BLOCK_MATCH_WINDOW_COMPARE_MODE_MIN_QCOM = 0, + VK_BLOCK_MATCH_WINDOW_COMPARE_MODE_MAX_QCOM = 1, + VK_BLOCK_MATCH_WINDOW_COMPARE_MODE_MAX_ENUM_QCOM = 0x7FFFFFFF +} VkBlockMatchWindowCompareModeQCOM; +typedef struct VkPhysicalDeviceImageProcessing2FeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 textureBlockMatch2; +} VkPhysicalDeviceImageProcessing2FeaturesQCOM; + +typedef struct VkPhysicalDeviceImageProcessing2PropertiesQCOM { + VkStructureType sType; + void* pNext; + VkExtent2D maxBlockMatchWindow; +} VkPhysicalDeviceImageProcessing2PropertiesQCOM; + +typedef struct VkSamplerBlockMatchWindowCreateInfoQCOM { + VkStructureType sType; + const void* pNext; + VkExtent2D windowExtent; + VkBlockMatchWindowCompareModeQCOM windowCompareMode; +} VkSamplerBlockMatchWindowCreateInfoQCOM; + + + +// VK_QCOM_filter_cubic_weights is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_filter_cubic_weights 1 +#define VK_QCOM_FILTER_CUBIC_WEIGHTS_SPEC_VERSION 1 +#define VK_QCOM_FILTER_CUBIC_WEIGHTS_EXTENSION_NAME "VK_QCOM_filter_cubic_weights" + +typedef enum VkCubicFilterWeightsQCOM { + VK_CUBIC_FILTER_WEIGHTS_CATMULL_ROM_QCOM = 0, + VK_CUBIC_FILTER_WEIGHTS_ZERO_TANGENT_CARDINAL_QCOM = 1, + VK_CUBIC_FILTER_WEIGHTS_B_SPLINE_QCOM = 2, + VK_CUBIC_FILTER_WEIGHTS_MITCHELL_NETRAVALI_QCOM = 3, + VK_CUBIC_FILTER_WEIGHTS_MAX_ENUM_QCOM = 0x7FFFFFFF +} VkCubicFilterWeightsQCOM; +typedef struct VkPhysicalDeviceCubicWeightsFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 selectableCubicWeights; +} VkPhysicalDeviceCubicWeightsFeaturesQCOM; + +typedef struct VkSamplerCubicWeightsCreateInfoQCOM { + VkStructureType sType; + const void* pNext; + VkCubicFilterWeightsQCOM cubicWeights; +} VkSamplerCubicWeightsCreateInfoQCOM; + +typedef struct VkBlitImageCubicWeightsInfoQCOM { + VkStructureType sType; + const void* pNext; + VkCubicFilterWeightsQCOM cubicWeights; +} VkBlitImageCubicWeightsInfoQCOM; + + + +// VK_QCOM_ycbcr_degamma is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_ycbcr_degamma 1 +#define VK_QCOM_YCBCR_DEGAMMA_SPEC_VERSION 1 +#define VK_QCOM_YCBCR_DEGAMMA_EXTENSION_NAME "VK_QCOM_ycbcr_degamma" +typedef struct VkPhysicalDeviceYcbcrDegammaFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 ycbcrDegamma; +} VkPhysicalDeviceYcbcrDegammaFeaturesQCOM; + +typedef struct VkSamplerYcbcrConversionYcbcrDegammaCreateInfoQCOM { + VkStructureType sType; + void* pNext; + VkBool32 enableYDegamma; + VkBool32 enableCbCrDegamma; +} VkSamplerYcbcrConversionYcbcrDegammaCreateInfoQCOM; + + + +// VK_QCOM_filter_cubic_clamp is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_filter_cubic_clamp 1 +#define VK_QCOM_FILTER_CUBIC_CLAMP_SPEC_VERSION 1 +#define VK_QCOM_FILTER_CUBIC_CLAMP_EXTENSION_NAME "VK_QCOM_filter_cubic_clamp" +typedef struct VkPhysicalDeviceCubicClampFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 cubicRangeClamp; +} VkPhysicalDeviceCubicClampFeaturesQCOM; + + + +// VK_EXT_attachment_feedback_loop_dynamic_state is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_attachment_feedback_loop_dynamic_state 1 +#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_SPEC_VERSION 1 +#define VK_EXT_ATTACHMENT_FEEDBACK_LOOP_DYNAMIC_STATE_EXTENSION_NAME "VK_EXT_attachment_feedback_loop_dynamic_state" +typedef struct VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 attachmentFeedbackLoopDynamicState; +} VkPhysicalDeviceAttachmentFeedbackLoopDynamicStateFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetAttachmentFeedbackLoopEnableEXT)(VkCommandBuffer commandBuffer, VkImageAspectFlags aspectMask); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetAttachmentFeedbackLoopEnableEXT( + VkCommandBuffer commandBuffer, + VkImageAspectFlags aspectMask); +#endif +#endif + + +// VK_MSFT_layered_driver is a preprocessor guard. Do not pass it to API calls. +#define VK_MSFT_layered_driver 1 +#define VK_MSFT_LAYERED_DRIVER_SPEC_VERSION 1 +#define VK_MSFT_LAYERED_DRIVER_EXTENSION_NAME "VK_MSFT_layered_driver" + +typedef enum VkLayeredDriverUnderlyingApiMSFT { + VK_LAYERED_DRIVER_UNDERLYING_API_NONE_MSFT = 0, + VK_LAYERED_DRIVER_UNDERLYING_API_D3D12_MSFT = 1, + VK_LAYERED_DRIVER_UNDERLYING_API_MAX_ENUM_MSFT = 0x7FFFFFFF +} VkLayeredDriverUnderlyingApiMSFT; +typedef struct VkPhysicalDeviceLayeredDriverPropertiesMSFT { + VkStructureType sType; + void* pNext; + VkLayeredDriverUnderlyingApiMSFT underlyingAPI; +} VkPhysicalDeviceLayeredDriverPropertiesMSFT; + + + +// VK_NV_descriptor_pool_overallocation is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_descriptor_pool_overallocation 1 +#define VK_NV_DESCRIPTOR_POOL_OVERALLOCATION_SPEC_VERSION 1 +#define VK_NV_DESCRIPTOR_POOL_OVERALLOCATION_EXTENSION_NAME "VK_NV_descriptor_pool_overallocation" +typedef struct VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 descriptorPoolOverallocation; +} VkPhysicalDeviceDescriptorPoolOverallocationFeaturesNV; + + + +// VK_QCOM_tile_memory_heap is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_tile_memory_heap 1 +#define VK_QCOM_TILE_MEMORY_HEAP_SPEC_VERSION 1 +#define VK_QCOM_TILE_MEMORY_HEAP_EXTENSION_NAME "VK_QCOM_tile_memory_heap" +typedef struct VkPhysicalDeviceTileMemoryHeapFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 tileMemoryHeap; +} VkPhysicalDeviceTileMemoryHeapFeaturesQCOM; + +typedef struct VkPhysicalDeviceTileMemoryHeapPropertiesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 queueSubmitBoundary; + VkBool32 tileBufferTransfers; +} VkPhysicalDeviceTileMemoryHeapPropertiesQCOM; + +typedef struct VkTileMemoryRequirementsQCOM { + VkStructureType sType; + void* pNext; + VkDeviceSize size; + VkDeviceSize alignment; +} VkTileMemoryRequirementsQCOM; + +typedef struct VkTileMemoryBindInfoQCOM { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkTileMemoryBindInfoQCOM; + +typedef struct VkTileMemorySizeInfoQCOM { + VkStructureType sType; + const void* pNext; + VkDeviceSize size; +} VkTileMemorySizeInfoQCOM; + +typedef void (VKAPI_PTR *PFN_vkCmdBindTileMemoryQCOM)(VkCommandBuffer commandBuffer, const VkTileMemoryBindInfoQCOM* pTileMemoryBindInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindTileMemoryQCOM( + VkCommandBuffer commandBuffer, + const VkTileMemoryBindInfoQCOM* pTileMemoryBindInfo); +#endif +#endif + + +// VK_EXT_memory_decompression is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_memory_decompression 1 +#define VK_EXT_MEMORY_DECOMPRESSION_SPEC_VERSION 1 +#define VK_EXT_MEMORY_DECOMPRESSION_EXTENSION_NAME "VK_EXT_memory_decompression" +typedef struct VkDecompressMemoryRegionEXT { + VkDeviceAddress srcAddress; + VkDeviceAddress dstAddress; + VkDeviceSize compressedSize; + VkDeviceSize decompressedSize; +} VkDecompressMemoryRegionEXT; + +typedef struct VkDecompressMemoryInfoEXT { + VkStructureType sType; + const void* pNext; + VkMemoryDecompressionMethodFlagsEXT decompressionMethod; + uint32_t regionCount; + const VkDecompressMemoryRegionEXT* pRegions; +} VkDecompressMemoryInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdDecompressMemoryEXT)(VkCommandBuffer commandBuffer, const VkDecompressMemoryInfoEXT* pDecompressMemoryInfoEXT); +typedef void (VKAPI_PTR *PFN_vkCmdDecompressMemoryIndirectCountEXT)(VkCommandBuffer commandBuffer, VkMemoryDecompressionMethodFlagsEXT decompressionMethod, VkDeviceAddress indirectCommandsAddress, VkDeviceAddress indirectCommandsCountAddress, uint32_t maxDecompressionCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDecompressMemoryEXT( + VkCommandBuffer commandBuffer, + const VkDecompressMemoryInfoEXT* pDecompressMemoryInfoEXT); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDecompressMemoryIndirectCountEXT( + VkCommandBuffer commandBuffer, + VkMemoryDecompressionMethodFlagsEXT decompressionMethod, + VkDeviceAddress indirectCommandsAddress, + VkDeviceAddress indirectCommandsCountAddress, + uint32_t maxDecompressionCount, + uint32_t stride); +#endif +#endif + + +// VK_NV_display_stereo is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_display_stereo 1 +#define VK_NV_DISPLAY_STEREO_SPEC_VERSION 1 +#define VK_NV_DISPLAY_STEREO_EXTENSION_NAME "VK_NV_display_stereo" + +typedef enum VkDisplaySurfaceStereoTypeNV { + VK_DISPLAY_SURFACE_STEREO_TYPE_NONE_NV = 0, + VK_DISPLAY_SURFACE_STEREO_TYPE_ONBOARD_DIN_NV = 1, + VK_DISPLAY_SURFACE_STEREO_TYPE_HDMI_3D_NV = 2, + VK_DISPLAY_SURFACE_STEREO_TYPE_INBAND_DISPLAYPORT_NV = 3, + VK_DISPLAY_SURFACE_STEREO_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkDisplaySurfaceStereoTypeNV; +typedef struct VkDisplaySurfaceStereoCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceStereoTypeNV stereoType; +} VkDisplaySurfaceStereoCreateInfoNV; + +typedef struct VkDisplayModeStereoPropertiesNV { + VkStructureType sType; + void* pNext; + VkBool32 hdmi3DSupported; +} VkDisplayModeStereoPropertiesNV; + + + +// VK_NV_raw_access_chains is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_raw_access_chains 1 +#define VK_NV_RAW_ACCESS_CHAINS_SPEC_VERSION 1 +#define VK_NV_RAW_ACCESS_CHAINS_EXTENSION_NAME "VK_NV_raw_access_chains" +typedef struct VkPhysicalDeviceRawAccessChainsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shaderRawAccessChains; +} VkPhysicalDeviceRawAccessChainsFeaturesNV; + + + +// VK_NV_external_compute_queue is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_external_compute_queue 1 +VK_DEFINE_HANDLE(VkExternalComputeQueueNV) +#define VK_NV_EXTERNAL_COMPUTE_QUEUE_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_COMPUTE_QUEUE_EXTENSION_NAME "VK_NV_external_compute_queue" +typedef struct VkExternalComputeQueueDeviceCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t reservedExternalQueues; +} VkExternalComputeQueueDeviceCreateInfoNV; + +typedef struct VkExternalComputeQueueCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkQueue preferredQueue; +} VkExternalComputeQueueCreateInfoNV; + +typedef struct VkExternalComputeQueueDataParamsNV { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndex; +} VkExternalComputeQueueDataParamsNV; + +typedef struct VkPhysicalDeviceExternalComputeQueuePropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t externalDataSize; + uint32_t maxExternalQueues; +} VkPhysicalDeviceExternalComputeQueuePropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateExternalComputeQueueNV)(VkDevice device, const VkExternalComputeQueueCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkExternalComputeQueueNV* pExternalQueue); +typedef void (VKAPI_PTR *PFN_vkDestroyExternalComputeQueueNV)(VkDevice device, VkExternalComputeQueueNV externalQueue, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetExternalComputeQueueDataNV)(VkExternalComputeQueueNV externalQueue, VkExternalComputeQueueDataParamsNV* params, void* pData); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateExternalComputeQueueNV( + VkDevice device, + const VkExternalComputeQueueCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkExternalComputeQueueNV* pExternalQueue); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyExternalComputeQueueNV( + VkDevice device, + VkExternalComputeQueueNV externalQueue, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetExternalComputeQueueDataNV( + VkExternalComputeQueueNV externalQueue, + VkExternalComputeQueueDataParamsNV* params, + void* pData); +#endif +#endif + + +// VK_NV_command_buffer_inheritance is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_command_buffer_inheritance 1 +#define VK_NV_COMMAND_BUFFER_INHERITANCE_SPEC_VERSION 1 +#define VK_NV_COMMAND_BUFFER_INHERITANCE_EXTENSION_NAME "VK_NV_command_buffer_inheritance" +typedef struct VkPhysicalDeviceCommandBufferInheritanceFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 commandBufferInheritance; +} VkPhysicalDeviceCommandBufferInheritanceFeaturesNV; + + + +// VK_NV_shader_atomic_float16_vector is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_shader_atomic_float16_vector 1 +#define VK_NV_SHADER_ATOMIC_FLOAT16_VECTOR_SPEC_VERSION 1 +#define VK_NV_SHADER_ATOMIC_FLOAT16_VECTOR_EXTENSION_NAME "VK_NV_shader_atomic_float16_vector" +typedef struct VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloat16VectorAtomics; +} VkPhysicalDeviceShaderAtomicFloat16VectorFeaturesNV; + + + +// VK_EXT_shader_replicated_composites is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_replicated_composites 1 +#define VK_EXT_SHADER_REPLICATED_COMPOSITES_SPEC_VERSION 1 +#define VK_EXT_SHADER_REPLICATED_COMPOSITES_EXTENSION_NAME "VK_EXT_shader_replicated_composites" +typedef struct VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderReplicatedComposites; +} VkPhysicalDeviceShaderReplicatedCompositesFeaturesEXT; + + + +// VK_EXT_shader_float8 is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_float8 1 +#define VK_EXT_SHADER_FLOAT8_SPEC_VERSION 1 +#define VK_EXT_SHADER_FLOAT8_EXTENSION_NAME "VK_EXT_shader_float8" +typedef struct VkPhysicalDeviceShaderFloat8FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloat8; + VkBool32 shaderFloat8CooperativeMatrix; +} VkPhysicalDeviceShaderFloat8FeaturesEXT; + + + +// VK_NV_ray_tracing_validation is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_ray_tracing_validation 1 +#define VK_NV_RAY_TRACING_VALIDATION_SPEC_VERSION 1 +#define VK_NV_RAY_TRACING_VALIDATION_EXTENSION_NAME "VK_NV_ray_tracing_validation" +typedef struct VkPhysicalDeviceRayTracingValidationFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingValidation; +} VkPhysicalDeviceRayTracingValidationFeaturesNV; + + + +// VK_NV_cluster_acceleration_structure is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_cluster_acceleration_structure 1 +#define VK_NV_CLUSTER_ACCELERATION_STRUCTURE_SPEC_VERSION 4 +#define VK_NV_CLUSTER_ACCELERATION_STRUCTURE_EXTENSION_NAME "VK_NV_cluster_acceleration_structure" + +typedef enum VkClusterAccelerationStructureTypeNV { + VK_CLUSTER_ACCELERATION_STRUCTURE_TYPE_CLUSTERS_BOTTOM_LEVEL_NV = 0, + VK_CLUSTER_ACCELERATION_STRUCTURE_TYPE_TRIANGLE_CLUSTER_NV = 1, + VK_CLUSTER_ACCELERATION_STRUCTURE_TYPE_TRIANGLE_CLUSTER_TEMPLATE_NV = 2, + VK_CLUSTER_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkClusterAccelerationStructureTypeNV; + +typedef enum VkClusterAccelerationStructureOpTypeNV { + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_TYPE_MOVE_OBJECTS_NV = 0, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_TYPE_BUILD_CLUSTERS_BOTTOM_LEVEL_NV = 1, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_TYPE_BUILD_TRIANGLE_CLUSTER_NV = 2, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_TYPE_BUILD_TRIANGLE_CLUSTER_TEMPLATE_NV = 3, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_TYPE_INSTANTIATE_TRIANGLE_CLUSTER_NV = 4, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_TYPE_GET_CLUSTER_TEMPLATE_INDICES_NV = 5, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkClusterAccelerationStructureOpTypeNV; + +typedef enum VkClusterAccelerationStructureOpModeNV { + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_MODE_IMPLICIT_DESTINATIONS_NV = 0, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_MODE_EXPLICIT_DESTINATIONS_NV = 1, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_MODE_COMPUTE_SIZES_NV = 2, + VK_CLUSTER_ACCELERATION_STRUCTURE_OP_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkClusterAccelerationStructureOpModeNV; + +typedef enum VkClusterAccelerationStructureAddressResolutionFlagBitsNV { + VK_CLUSTER_ACCELERATION_STRUCTURE_ADDRESS_RESOLUTION_NONE_NV = 0, + VK_CLUSTER_ACCELERATION_STRUCTURE_ADDRESS_RESOLUTION_INDIRECTED_DST_IMPLICIT_DATA_BIT_NV = 0x00000001, + VK_CLUSTER_ACCELERATION_STRUCTURE_ADDRESS_RESOLUTION_INDIRECTED_SCRATCH_DATA_BIT_NV = 0x00000002, + VK_CLUSTER_ACCELERATION_STRUCTURE_ADDRESS_RESOLUTION_INDIRECTED_DST_ADDRESS_ARRAY_BIT_NV = 0x00000004, + VK_CLUSTER_ACCELERATION_STRUCTURE_ADDRESS_RESOLUTION_INDIRECTED_DST_SIZES_ARRAY_BIT_NV = 0x00000008, + VK_CLUSTER_ACCELERATION_STRUCTURE_ADDRESS_RESOLUTION_INDIRECTED_SRC_INFOS_ARRAY_BIT_NV = 0x00000010, + VK_CLUSTER_ACCELERATION_STRUCTURE_ADDRESS_RESOLUTION_INDIRECTED_SRC_INFOS_COUNT_BIT_NV = 0x00000020, + VK_CLUSTER_ACCELERATION_STRUCTURE_ADDRESS_RESOLUTION_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkClusterAccelerationStructureAddressResolutionFlagBitsNV; +typedef VkFlags VkClusterAccelerationStructureAddressResolutionFlagsNV; + +typedef enum VkClusterAccelerationStructureClusterFlagBitsNV { + VK_CLUSTER_ACCELERATION_STRUCTURE_CLUSTER_ALLOW_DISABLE_OPACITY_MICROMAPS_NV = 0x00000001, + VK_CLUSTER_ACCELERATION_STRUCTURE_CLUSTER_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkClusterAccelerationStructureClusterFlagBitsNV; +typedef VkFlags VkClusterAccelerationStructureClusterFlagsNV; + +typedef enum VkClusterAccelerationStructureGeometryFlagBitsNV { + VK_CLUSTER_ACCELERATION_STRUCTURE_GEOMETRY_CULL_DISABLE_BIT_NV = 0x00000001, + VK_CLUSTER_ACCELERATION_STRUCTURE_GEOMETRY_NO_DUPLICATE_ANYHIT_INVOCATION_BIT_NV = 0x00000002, + VK_CLUSTER_ACCELERATION_STRUCTURE_GEOMETRY_OPAQUE_BIT_NV = 0x00000004, + VK_CLUSTER_ACCELERATION_STRUCTURE_GEOMETRY_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkClusterAccelerationStructureGeometryFlagBitsNV; +typedef VkFlags VkClusterAccelerationStructureGeometryFlagsNV; + +typedef enum VkClusterAccelerationStructureIndexFormatFlagBitsNV { + VK_CLUSTER_ACCELERATION_STRUCTURE_INDEX_FORMAT_8BIT_NV = 0x00000001, + VK_CLUSTER_ACCELERATION_STRUCTURE_INDEX_FORMAT_16BIT_NV = 0x00000002, + VK_CLUSTER_ACCELERATION_STRUCTURE_INDEX_FORMAT_32BIT_NV = 0x00000004, + VK_CLUSTER_ACCELERATION_STRUCTURE_INDEX_FORMAT_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkClusterAccelerationStructureIndexFormatFlagBitsNV; +typedef VkFlags VkClusterAccelerationStructureIndexFormatFlagsNV; +typedef struct VkPhysicalDeviceClusterAccelerationStructureFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 clusterAccelerationStructure; +} VkPhysicalDeviceClusterAccelerationStructureFeaturesNV; + +typedef struct VkPhysicalDeviceClusterAccelerationStructurePropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxVerticesPerCluster; + uint32_t maxTrianglesPerCluster; + uint32_t clusterScratchByteAlignment; + uint32_t clusterByteAlignment; + uint32_t clusterTemplateByteAlignment; + uint32_t clusterBottomLevelByteAlignment; + uint32_t clusterTemplateBoundsByteAlignment; + uint32_t maxClusterGeometryIndex; +} VkPhysicalDeviceClusterAccelerationStructurePropertiesNV; + +typedef struct VkClusterAccelerationStructureClustersBottomLevelInputNV { + VkStructureType sType; + void* pNext; + uint32_t maxTotalClusterCount; + uint32_t maxClusterCountPerAccelerationStructure; +} VkClusterAccelerationStructureClustersBottomLevelInputNV; + +typedef struct VkClusterAccelerationStructureTriangleClusterInputNV { + VkStructureType sType; + void* pNext; + VkFormat vertexFormat; + uint32_t maxGeometryIndexValue; + uint32_t maxClusterUniqueGeometryCount; + uint32_t maxClusterTriangleCount; + uint32_t maxClusterVertexCount; + uint32_t maxTotalTriangleCount; + uint32_t maxTotalVertexCount; + uint32_t minPositionTruncateBitCount; +} VkClusterAccelerationStructureTriangleClusterInputNV; + +typedef struct VkClusterAccelerationStructureMoveObjectsInputNV { + VkStructureType sType; + void* pNext; + VkClusterAccelerationStructureTypeNV type; + VkBool32 noMoveOverlap; + VkDeviceSize maxMovedBytes; +} VkClusterAccelerationStructureMoveObjectsInputNV; + +typedef union VkClusterAccelerationStructureOpInputNV { + VkClusterAccelerationStructureClustersBottomLevelInputNV* pClustersBottomLevel; + VkClusterAccelerationStructureTriangleClusterInputNV* pTriangleClusters; + VkClusterAccelerationStructureMoveObjectsInputNV* pMoveObjects; +} VkClusterAccelerationStructureOpInputNV; + +typedef struct VkClusterAccelerationStructureInputInfoNV { + VkStructureType sType; + void* pNext; + uint32_t maxAccelerationStructureCount; + VkBuildAccelerationStructureFlagsKHR flags; + VkClusterAccelerationStructureOpTypeNV opType; + VkClusterAccelerationStructureOpModeNV opMode; + VkClusterAccelerationStructureOpInputNV opInput; +} VkClusterAccelerationStructureInputInfoNV; + +typedef struct VkStridedDeviceAddressRegionKHR { + VkDeviceAddress deviceAddress; + VkDeviceSize stride; + VkDeviceSize size; +} VkStridedDeviceAddressRegionKHR; + +typedef struct VkClusterAccelerationStructureCommandsInfoNV { + VkStructureType sType; + void* pNext; + VkClusterAccelerationStructureInputInfoNV input; + VkDeviceAddress dstImplicitData; + VkDeviceAddress scratchData; + VkStridedDeviceAddressRegionKHR dstAddressesArray; + VkStridedDeviceAddressRegionKHR dstSizesArray; + VkStridedDeviceAddressRegionKHR srcInfosArray; + VkDeviceAddress srcInfosCount; + VkClusterAccelerationStructureAddressResolutionFlagsNV addressResolutionFlags; +} VkClusterAccelerationStructureCommandsInfoNV; + +typedef struct VkStridedDeviceAddressNV { + VkDeviceAddress startAddress; + VkDeviceSize strideInBytes; +} VkStridedDeviceAddressNV; + +typedef struct VkClusterAccelerationStructureGeometryIndexAndGeometryFlagsNV { + uint32_t geometryIndex:24; + uint32_t reserved:5; + uint32_t geometryFlags:3; +} VkClusterAccelerationStructureGeometryIndexAndGeometryFlagsNV; + +typedef struct VkClusterAccelerationStructureMoveObjectsInfoNV { + VkDeviceAddress srcAccelerationStructure; +} VkClusterAccelerationStructureMoveObjectsInfoNV; + +typedef struct VkClusterAccelerationStructureBuildClustersBottomLevelInfoNV { + uint32_t clusterReferencesCount; + uint32_t clusterReferencesStride; + VkDeviceAddress clusterReferences; +} VkClusterAccelerationStructureBuildClustersBottomLevelInfoNV; + +typedef struct VkClusterAccelerationStructureBuildTriangleClusterInfoNV { + uint32_t clusterID; + VkClusterAccelerationStructureClusterFlagsNV clusterFlags; + uint32_t triangleCount:9; + uint32_t vertexCount:9; + uint32_t positionTruncateBitCount:6; + uint32_t indexType:4; + uint32_t opacityMicromapIndexType:4; + VkClusterAccelerationStructureGeometryIndexAndGeometryFlagsNV baseGeometryIndexAndGeometryFlags; + uint16_t indexBufferStride; + uint16_t vertexBufferStride; + uint16_t geometryIndexAndFlagsBufferStride; + uint16_t opacityMicromapIndexBufferStride; + VkDeviceAddress indexBuffer; + VkDeviceAddress vertexBuffer; + VkDeviceAddress geometryIndexAndFlagsBuffer; + VkDeviceAddress opacityMicromapArray; + VkDeviceAddress opacityMicromapIndexBuffer; +} VkClusterAccelerationStructureBuildTriangleClusterInfoNV; + +typedef struct VkClusterAccelerationStructureBuildTriangleClusterTemplateInfoNV { + uint32_t clusterID; + VkClusterAccelerationStructureClusterFlagsNV clusterFlags; + uint32_t triangleCount:9; + uint32_t vertexCount:9; + uint32_t positionTruncateBitCount:6; + uint32_t indexType:4; + uint32_t opacityMicromapIndexType:4; + VkClusterAccelerationStructureGeometryIndexAndGeometryFlagsNV baseGeometryIndexAndGeometryFlags; + uint16_t indexBufferStride; + uint16_t vertexBufferStride; + uint16_t geometryIndexAndFlagsBufferStride; + uint16_t opacityMicromapIndexBufferStride; + VkDeviceAddress indexBuffer; + VkDeviceAddress vertexBuffer; + VkDeviceAddress geometryIndexAndFlagsBuffer; + VkDeviceAddress opacityMicromapArray; + VkDeviceAddress opacityMicromapIndexBuffer; + VkDeviceAddress instantiationBoundingBoxLimit; +} VkClusterAccelerationStructureBuildTriangleClusterTemplateInfoNV; + +typedef struct VkClusterAccelerationStructureInstantiateClusterInfoNV { + uint32_t clusterIdOffset; + uint32_t geometryIndexOffset:24; + uint32_t reserved:8; + VkDeviceAddress clusterTemplateAddress; + VkStridedDeviceAddressNV vertexBuffer; +} VkClusterAccelerationStructureInstantiateClusterInfoNV; + +typedef struct VkClusterAccelerationStructureGetTemplateIndicesInfoNV { + VkDeviceAddress clusterTemplateAddress; +} VkClusterAccelerationStructureGetTemplateIndicesInfoNV; + +typedef struct VkAccelerationStructureBuildSizesInfoKHR { + VkStructureType sType; + void* pNext; + VkDeviceSize accelerationStructureSize; + VkDeviceSize updateScratchSize; + VkDeviceSize buildScratchSize; +} VkAccelerationStructureBuildSizesInfoKHR; + +typedef struct VkRayTracingPipelineClusterAccelerationStructureCreateInfoNV { + VkStructureType sType; + void* pNext; + VkBool32 allowClusterAccelerationStructure; +} VkRayTracingPipelineClusterAccelerationStructureCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkGetClusterAccelerationStructureBuildSizesNV)(VkDevice device, const VkClusterAccelerationStructureInputInfoNV* pInfo, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBuildClusterAccelerationStructureIndirectNV)(VkCommandBuffer commandBuffer, const VkClusterAccelerationStructureCommandsInfoNV* pCommandInfos); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetClusterAccelerationStructureBuildSizesNV( + VkDevice device, + const VkClusterAccelerationStructureInputInfoNV* pInfo, + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBuildClusterAccelerationStructureIndirectNV( + VkCommandBuffer commandBuffer, + const VkClusterAccelerationStructureCommandsInfoNV* pCommandInfos); +#endif +#endif + + +// VK_NV_partitioned_acceleration_structure is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_partitioned_acceleration_structure 1 +#define VK_NV_PARTITIONED_ACCELERATION_STRUCTURE_SPEC_VERSION 1 +#define VK_NV_PARTITIONED_ACCELERATION_STRUCTURE_EXTENSION_NAME "VK_NV_partitioned_acceleration_structure" +#define VK_PARTITIONED_ACCELERATION_STRUCTURE_PARTITION_INDEX_GLOBAL_NV (~0U) + +typedef enum VkPartitionedAccelerationStructureOpTypeNV { + VK_PARTITIONED_ACCELERATION_STRUCTURE_OP_TYPE_WRITE_INSTANCE_NV = 0, + VK_PARTITIONED_ACCELERATION_STRUCTURE_OP_TYPE_UPDATE_INSTANCE_NV = 1, + VK_PARTITIONED_ACCELERATION_STRUCTURE_OP_TYPE_WRITE_PARTITION_TRANSLATION_NV = 2, + VK_PARTITIONED_ACCELERATION_STRUCTURE_OP_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkPartitionedAccelerationStructureOpTypeNV; + +typedef enum VkPartitionedAccelerationStructureInstanceFlagBitsNV { + VK_PARTITIONED_ACCELERATION_STRUCTURE_INSTANCE_FLAG_TRIANGLE_FACING_CULL_DISABLE_BIT_NV = 0x00000001, + VK_PARTITIONED_ACCELERATION_STRUCTURE_INSTANCE_FLAG_TRIANGLE_FLIP_FACING_BIT_NV = 0x00000002, + VK_PARTITIONED_ACCELERATION_STRUCTURE_INSTANCE_FLAG_FORCE_OPAQUE_BIT_NV = 0x00000004, + VK_PARTITIONED_ACCELERATION_STRUCTURE_INSTANCE_FLAG_FORCE_NO_OPAQUE_BIT_NV = 0x00000008, + VK_PARTITIONED_ACCELERATION_STRUCTURE_INSTANCE_FLAG_ENABLE_EXPLICIT_BOUNDING_BOX_NV = 0x00000010, + VK_PARTITIONED_ACCELERATION_STRUCTURE_INSTANCE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkPartitionedAccelerationStructureInstanceFlagBitsNV; +typedef VkFlags VkPartitionedAccelerationStructureInstanceFlagsNV; +typedef struct VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 partitionedAccelerationStructure; +} VkPhysicalDevicePartitionedAccelerationStructureFeaturesNV; + +typedef struct VkPhysicalDevicePartitionedAccelerationStructurePropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxPartitionCount; +} VkPhysicalDevicePartitionedAccelerationStructurePropertiesNV; + +typedef struct VkPartitionedAccelerationStructureFlagsNV { + VkStructureType sType; + void* pNext; + VkBool32 enablePartitionTranslation; +} VkPartitionedAccelerationStructureFlagsNV; + +typedef struct VkBuildPartitionedAccelerationStructureIndirectCommandNV { + VkPartitionedAccelerationStructureOpTypeNV opType; + uint32_t argCount; + VkStridedDeviceAddressNV argData; +} VkBuildPartitionedAccelerationStructureIndirectCommandNV; + +typedef struct VkPartitionedAccelerationStructureWriteInstanceDataNV { + VkTransformMatrixKHR transform; + float explicitAABB[6]; + uint32_t instanceID; + uint32_t instanceMask; + uint32_t instanceContributionToHitGroupIndex; + VkPartitionedAccelerationStructureInstanceFlagsNV instanceFlags; + uint32_t instanceIndex; + uint32_t partitionIndex; + VkDeviceAddress accelerationStructure; +} VkPartitionedAccelerationStructureWriteInstanceDataNV; + +typedef struct VkPartitionedAccelerationStructureUpdateInstanceDataNV { + uint32_t instanceIndex; + uint32_t instanceContributionToHitGroupIndex; + VkDeviceAddress accelerationStructure; +} VkPartitionedAccelerationStructureUpdateInstanceDataNV; + +typedef struct VkPartitionedAccelerationStructureWritePartitionTranslationDataNV { + uint32_t partitionIndex; + float partitionTranslation[3]; +} VkPartitionedAccelerationStructureWritePartitionTranslationDataNV; + +typedef struct VkWriteDescriptorSetPartitionedAccelerationStructureNV { + VkStructureType sType; + void* pNext; + uint32_t accelerationStructureCount; + const VkDeviceAddress* pAccelerationStructures; +} VkWriteDescriptorSetPartitionedAccelerationStructureNV; + +typedef struct VkPartitionedAccelerationStructureInstancesInputNV { + VkStructureType sType; + void* pNext; + VkBuildAccelerationStructureFlagsKHR flags; + uint32_t instanceCount; + uint32_t maxInstancePerPartitionCount; + uint32_t partitionCount; + uint32_t maxInstanceInGlobalPartitionCount; +} VkPartitionedAccelerationStructureInstancesInputNV; + +typedef struct VkBuildPartitionedAccelerationStructureInfoNV { + VkStructureType sType; + void* pNext; + VkPartitionedAccelerationStructureInstancesInputNV input; + VkDeviceAddress srcAccelerationStructureData; + VkDeviceAddress dstAccelerationStructureData; + VkDeviceAddress scratchData; + VkDeviceAddress srcInfos; + VkDeviceAddress srcInfosCount; +} VkBuildPartitionedAccelerationStructureInfoNV; + +typedef void (VKAPI_PTR *PFN_vkGetPartitionedAccelerationStructuresBuildSizesNV)(VkDevice device, const VkPartitionedAccelerationStructureInstancesInputNV* pInfo, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBuildPartitionedAccelerationStructuresNV)(VkCommandBuffer commandBuffer, const VkBuildPartitionedAccelerationStructureInfoNV* pBuildInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPartitionedAccelerationStructuresBuildSizesNV( + VkDevice device, + const VkPartitionedAccelerationStructureInstancesInputNV* pInfo, + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBuildPartitionedAccelerationStructuresNV( + VkCommandBuffer commandBuffer, + const VkBuildPartitionedAccelerationStructureInfoNV* pBuildInfo); +#endif +#endif + + +// VK_EXT_device_generated_commands is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectExecutionSetEXT) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutEXT) +#define VK_EXT_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 1 +#define VK_EXT_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_EXT_device_generated_commands" + +typedef enum VkIndirectExecutionSetInfoTypeEXT { + VK_INDIRECT_EXECUTION_SET_INFO_TYPE_PIPELINES_EXT = 0, + VK_INDIRECT_EXECUTION_SET_INFO_TYPE_SHADER_OBJECTS_EXT = 1, + VK_INDIRECT_EXECUTION_SET_INFO_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkIndirectExecutionSetInfoTypeEXT; + +typedef enum VkIndirectCommandsTokenTypeEXT { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_EXECUTION_SET_EXT = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_EXT = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_SEQUENCE_INDEX_EXT = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_EXT = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_EXT = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_EXT = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_EXT = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_COUNT_EXT = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_COUNT_EXT = 8, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_EXT = 9, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_DATA_EXT = 1000135000, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_DATA_SEQUENCE_INDEX_EXT = 1000135001, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_NV_EXT = 1000202002, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_COUNT_NV_EXT = 1000202003, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_EXT = 1000328000, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_MESH_TASKS_COUNT_EXT = 1000328001, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_TRACE_RAYS2_EXT = 1000386004, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeEXT; + +typedef enum VkIndirectCommandsInputModeFlagBitsEXT { + VK_INDIRECT_COMMANDS_INPUT_MODE_VULKAN_INDEX_BUFFER_EXT = 0x00000001, + VK_INDIRECT_COMMANDS_INPUT_MODE_DXGI_INDEX_BUFFER_EXT = 0x00000002, + VK_INDIRECT_COMMANDS_INPUT_MODE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkIndirectCommandsInputModeFlagBitsEXT; +typedef VkFlags VkIndirectCommandsInputModeFlagsEXT; + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsEXT { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_EXT = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_EXT = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsEXT; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsEXT; +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 deviceGeneratedCommands; + VkBool32 dynamicGeneratedPipelineLayout; +} VkPhysicalDeviceDeviceGeneratedCommandsFeaturesEXT; + +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxIndirectPipelineCount; + uint32_t maxIndirectShaderObjectCount; + uint32_t maxIndirectSequenceCount; + uint32_t maxIndirectCommandsTokenCount; + uint32_t maxIndirectCommandsTokenOffset; + uint32_t maxIndirectCommandsIndirectStride; + VkIndirectCommandsInputModeFlagsEXT supportedIndirectCommandsInputModes; + VkShaderStageFlags supportedIndirectCommandsShaderStages; + VkShaderStageFlags supportedIndirectCommandsShaderStagesPipelineBinding; + VkShaderStageFlags supportedIndirectCommandsShaderStagesShaderBinding; + VkBool32 deviceGeneratedCommandsTransformFeedback; + VkBool32 deviceGeneratedCommandsMultiDrawIndirectCount; +} VkPhysicalDeviceDeviceGeneratedCommandsPropertiesEXT; + +typedef struct VkGeneratedCommandsMemoryRequirementsInfoEXT { + VkStructureType sType; + const void* pNext; + VkIndirectExecutionSetEXT indirectExecutionSet; + VkIndirectCommandsLayoutEXT indirectCommandsLayout; + uint32_t maxSequenceCount; + uint32_t maxDrawCount; +} VkGeneratedCommandsMemoryRequirementsInfoEXT; + +typedef struct VkIndirectExecutionSetPipelineInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipeline initialPipeline; + uint32_t maxPipelineCount; +} VkIndirectExecutionSetPipelineInfoEXT; + +typedef struct VkIndirectExecutionSetShaderLayoutInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkIndirectExecutionSetShaderLayoutInfoEXT; + +typedef struct VkIndirectExecutionSetShaderInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t shaderCount; + const VkShaderEXT* pInitialShaders; + const VkIndirectExecutionSetShaderLayoutInfoEXT* pSetLayoutInfos; + uint32_t maxShaderCount; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkIndirectExecutionSetShaderInfoEXT; + +typedef union VkIndirectExecutionSetInfoEXT { + const VkIndirectExecutionSetPipelineInfoEXT* pPipelineInfo; + const VkIndirectExecutionSetShaderInfoEXT* pShaderInfo; +} VkIndirectExecutionSetInfoEXT; + +typedef struct VkIndirectExecutionSetCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkIndirectExecutionSetInfoTypeEXT type; + VkIndirectExecutionSetInfoEXT info; +} VkIndirectExecutionSetCreateInfoEXT; + +typedef struct VkGeneratedCommandsInfoEXT { + VkStructureType sType; + const void* pNext; + VkShaderStageFlags shaderStages; + VkIndirectExecutionSetEXT indirectExecutionSet; + VkIndirectCommandsLayoutEXT indirectCommandsLayout; + VkDeviceAddress indirectAddress; + VkDeviceSize indirectAddressSize; + VkDeviceAddress preprocessAddress; + VkDeviceSize preprocessSize; + uint32_t maxSequenceCount; + VkDeviceAddress sequenceCountAddress; + uint32_t maxDrawCount; +} VkGeneratedCommandsInfoEXT; + +typedef struct VkWriteIndirectExecutionSetPipelineEXT { + VkStructureType sType; + const void* pNext; + uint32_t index; + VkPipeline pipeline; +} VkWriteIndirectExecutionSetPipelineEXT; + +typedef struct VkIndirectCommandsPushConstantTokenEXT { + VkPushConstantRange updateRange; +} VkIndirectCommandsPushConstantTokenEXT; + +typedef struct VkIndirectCommandsVertexBufferTokenEXT { + uint32_t vertexBindingUnit; +} VkIndirectCommandsVertexBufferTokenEXT; + +typedef struct VkIndirectCommandsIndexBufferTokenEXT { + VkIndirectCommandsInputModeFlagBitsEXT mode; +} VkIndirectCommandsIndexBufferTokenEXT; + +typedef struct VkIndirectCommandsExecutionSetTokenEXT { + VkIndirectExecutionSetInfoTypeEXT type; + VkShaderStageFlags shaderStages; +} VkIndirectCommandsExecutionSetTokenEXT; + +typedef union VkIndirectCommandsTokenDataEXT { + const VkIndirectCommandsPushConstantTokenEXT* pPushConstant; + const VkIndirectCommandsVertexBufferTokenEXT* pVertexBuffer; + const VkIndirectCommandsIndexBufferTokenEXT* pIndexBuffer; + const VkIndirectCommandsExecutionSetTokenEXT* pExecutionSet; +} VkIndirectCommandsTokenDataEXT; + +typedef struct VkIndirectCommandsLayoutTokenEXT { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsTokenTypeEXT type; + VkIndirectCommandsTokenDataEXT data; + uint32_t offset; +} VkIndirectCommandsLayoutTokenEXT; + +typedef struct VkIndirectCommandsLayoutCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsLayoutUsageFlagsEXT flags; + VkShaderStageFlags shaderStages; + uint32_t indirectStride; + VkPipelineLayout pipelineLayout; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenEXT* pTokens; +} VkIndirectCommandsLayoutCreateInfoEXT; + +typedef struct VkDrawIndirectCountIndirectCommandEXT { + VkDeviceAddress bufferAddress; + uint32_t stride; + uint32_t commandCount; +} VkDrawIndirectCountIndirectCommandEXT; + +typedef struct VkBindVertexBufferIndirectCommandEXT { + VkDeviceAddress bufferAddress; + uint32_t size; + uint32_t stride; +} VkBindVertexBufferIndirectCommandEXT; + +typedef struct VkBindIndexBufferIndirectCommandEXT { + VkDeviceAddress bufferAddress; + uint32_t size; + VkIndexType indexType; +} VkBindIndexBufferIndirectCommandEXT; + +typedef struct VkGeneratedCommandsPipelineInfoEXT { + VkStructureType sType; + void* pNext; + VkPipeline pipeline; +} VkGeneratedCommandsPipelineInfoEXT; + +typedef struct VkGeneratedCommandsShaderInfoEXT { + VkStructureType sType; + void* pNext; + uint32_t shaderCount; + const VkShaderEXT* pShaders; +} VkGeneratedCommandsShaderInfoEXT; + +typedef struct VkWriteIndirectExecutionSetShaderEXT { + VkStructureType sType; + const void* pNext; + uint32_t index; + VkShaderEXT shader; +} VkWriteIndirectExecutionSetShaderEXT; + +typedef void (VKAPI_PTR *PFN_vkGetGeneratedCommandsMemoryRequirementsEXT)(VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoEXT* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdPreprocessGeneratedCommandsEXT)(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoEXT* pGeneratedCommandsInfo, VkCommandBuffer stateCommandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteGeneratedCommandsEXT)(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoEXT* pGeneratedCommandsInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutEXT)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutEXT* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutEXT)(VkDevice device, VkIndirectCommandsLayoutEXT indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectExecutionSetEXT)(VkDevice device, const VkIndirectExecutionSetCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectExecutionSetEXT* pIndirectExecutionSet); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectExecutionSetEXT)(VkDevice device, VkIndirectExecutionSetEXT indirectExecutionSet, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateIndirectExecutionSetPipelineEXT)(VkDevice device, VkIndirectExecutionSetEXT indirectExecutionSet, uint32_t executionSetWriteCount, const VkWriteIndirectExecutionSetPipelineEXT* pExecutionSetWrites); +typedef void (VKAPI_PTR *PFN_vkUpdateIndirectExecutionSetShaderEXT)(VkDevice device, VkIndirectExecutionSetEXT indirectExecutionSet, uint32_t executionSetWriteCount, const VkWriteIndirectExecutionSetShaderEXT* pExecutionSetWrites); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetGeneratedCommandsMemoryRequirementsEXT( + VkDevice device, + const VkGeneratedCommandsMemoryRequirementsInfoEXT* pInfo, + VkMemoryRequirements2* pMemoryRequirements); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPreprocessGeneratedCommandsEXT( + VkCommandBuffer commandBuffer, + const VkGeneratedCommandsInfoEXT* pGeneratedCommandsInfo, + VkCommandBuffer stateCommandBuffer); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteGeneratedCommandsEXT( + VkCommandBuffer commandBuffer, + VkBool32 isPreprocessed, + const VkGeneratedCommandsInfoEXT* pGeneratedCommandsInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutEXT( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutEXT* pIndirectCommandsLayout); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutEXT( + VkDevice device, + VkIndirectCommandsLayoutEXT indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectExecutionSetEXT( + VkDevice device, + const VkIndirectExecutionSetCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectExecutionSetEXT* pIndirectExecutionSet); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectExecutionSetEXT( + VkDevice device, + VkIndirectExecutionSetEXT indirectExecutionSet, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkUpdateIndirectExecutionSetPipelineEXT( + VkDevice device, + VkIndirectExecutionSetEXT indirectExecutionSet, + uint32_t executionSetWriteCount, + const VkWriteIndirectExecutionSetPipelineEXT* pExecutionSetWrites); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkUpdateIndirectExecutionSetShaderEXT( + VkDevice device, + VkIndirectExecutionSetEXT indirectExecutionSet, + uint32_t executionSetWriteCount, + const VkWriteIndirectExecutionSetShaderEXT* pExecutionSetWrites); +#endif +#endif + + +// VK_MESA_image_alignment_control is a preprocessor guard. Do not pass it to API calls. +#define VK_MESA_image_alignment_control 1 +#define VK_MESA_IMAGE_ALIGNMENT_CONTROL_SPEC_VERSION 1 +#define VK_MESA_IMAGE_ALIGNMENT_CONTROL_EXTENSION_NAME "VK_MESA_image_alignment_control" +typedef struct VkPhysicalDeviceImageAlignmentControlFeaturesMESA { + VkStructureType sType; + void* pNext; + VkBool32 imageAlignmentControl; +} VkPhysicalDeviceImageAlignmentControlFeaturesMESA; + +typedef struct VkPhysicalDeviceImageAlignmentControlPropertiesMESA { + VkStructureType sType; + void* pNext; + uint32_t supportedImageAlignmentMask; +} VkPhysicalDeviceImageAlignmentControlPropertiesMESA; + +typedef struct VkImageAlignmentControlCreateInfoMESA { + VkStructureType sType; + const void* pNext; + uint32_t maximumRequestedAlignment; +} VkImageAlignmentControlCreateInfoMESA; + + + +// VK_NV_push_constant_bank is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_push_constant_bank 1 +#define VK_NV_PUSH_CONSTANT_BANK_SPEC_VERSION 1 +#define VK_NV_PUSH_CONSTANT_BANK_EXTENSION_NAME "VK_NV_push_constant_bank" +typedef struct VkPushConstantBankInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t bank; +} VkPushConstantBankInfoNV; + +typedef struct VkPhysicalDevicePushConstantBankFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 pushConstantBank; +} VkPhysicalDevicePushConstantBankFeaturesNV; + +typedef struct VkPhysicalDevicePushConstantBankPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxGraphicsPushConstantBanks; + uint32_t maxComputePushConstantBanks; + uint32_t maxGraphicsPushDataBanks; + uint32_t maxComputePushDataBanks; +} VkPhysicalDevicePushConstantBankPropertiesNV; + + + +// VK_EXT_ray_tracing_invocation_reorder is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_ray_tracing_invocation_reorder 1 +#define VK_EXT_RAY_TRACING_INVOCATION_REORDER_SPEC_VERSION 1 +#define VK_EXT_RAY_TRACING_INVOCATION_REORDER_EXTENSION_NAME "VK_EXT_ray_tracing_invocation_reorder" +typedef struct VkPhysicalDeviceRayTracingInvocationReorderPropertiesEXT { + VkStructureType sType; + void* pNext; + VkRayTracingInvocationReorderModeEXT rayTracingInvocationReorderReorderingHint; + uint32_t maxShaderBindingTableRecordIndex; +} VkPhysicalDeviceRayTracingInvocationReorderPropertiesEXT; + +typedef struct VkPhysicalDeviceRayTracingInvocationReorderFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingInvocationReorder; +} VkPhysicalDeviceRayTracingInvocationReorderFeaturesEXT; + + + +// VK_EXT_depth_clamp_control is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_depth_clamp_control 1 +#define VK_EXT_DEPTH_CLAMP_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DEPTH_CLAMP_CONTROL_EXTENSION_NAME "VK_EXT_depth_clamp_control" +typedef struct VkPhysicalDeviceDepthClampControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 depthClampControl; +} VkPhysicalDeviceDepthClampControlFeaturesEXT; + +typedef struct VkPipelineViewportDepthClampControlCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDepthClampModeEXT depthClampMode; + const VkDepthClampRangeEXT* pDepthClampRange; +} VkPipelineViewportDepthClampControlCreateInfoEXT; + + + +// VK_HUAWEI_hdr_vivid is a preprocessor guard. Do not pass it to API calls. +#define VK_HUAWEI_hdr_vivid 1 +#define VK_HUAWEI_HDR_VIVID_SPEC_VERSION 1 +#define VK_HUAWEI_HDR_VIVID_EXTENSION_NAME "VK_HUAWEI_hdr_vivid" +typedef struct VkPhysicalDeviceHdrVividFeaturesHUAWEI { + VkStructureType sType; + void* pNext; + VkBool32 hdrVivid; +} VkPhysicalDeviceHdrVividFeaturesHUAWEI; + +typedef struct VkHdrVividDynamicMetadataHUAWEI { + VkStructureType sType; + const void* pNext; + size_t dynamicMetadataSize; + const void* pDynamicMetadata; +} VkHdrVividDynamicMetadataHUAWEI; + + + +// VK_NV_cooperative_matrix2 is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_cooperative_matrix2 1 +#define VK_NV_COOPERATIVE_MATRIX_2_SPEC_VERSION 1 +#define VK_NV_COOPERATIVE_MATRIX_2_EXTENSION_NAME "VK_NV_cooperative_matrix2" +typedef struct VkCooperativeMatrixFlexibleDimensionsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t MGranularity; + uint32_t NGranularity; + uint32_t KGranularity; + VkComponentTypeKHR AType; + VkComponentTypeKHR BType; + VkComponentTypeKHR CType; + VkComponentTypeKHR ResultType; + VkBool32 saturatingAccumulation; + VkScopeKHR scope; + uint32_t workgroupInvocations; +} VkCooperativeMatrixFlexibleDimensionsPropertiesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrix2FeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cooperativeMatrixWorkgroupScope; + VkBool32 cooperativeMatrixFlexibleDimensions; + VkBool32 cooperativeMatrixReductions; + VkBool32 cooperativeMatrixConversions; + VkBool32 cooperativeMatrixPerElementOperations; + VkBool32 cooperativeMatrixTensorAddressing; + VkBool32 cooperativeMatrixBlockLoads; +} VkPhysicalDeviceCooperativeMatrix2FeaturesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrix2PropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t cooperativeMatrixWorkgroupScopeMaxWorkgroupSize; + uint32_t cooperativeMatrixFlexibleDimensionsMaxDimension; + uint32_t cooperativeMatrixWorkgroupScopeReservedSharedMemory; +} VkPhysicalDeviceCooperativeMatrix2PropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixFlexibleDimensionsPropertiesNV* pProperties); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixFlexibleDimensionsPropertiesNV* pProperties); +#endif +#endif + + +// VK_ARM_pipeline_opacity_micromap is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_pipeline_opacity_micromap 1 +#define VK_ARM_PIPELINE_OPACITY_MICROMAP_SPEC_VERSION 1 +#define VK_ARM_PIPELINE_OPACITY_MICROMAP_EXTENSION_NAME "VK_ARM_pipeline_opacity_micromap" +typedef struct VkPhysicalDevicePipelineOpacityMicromapFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 pipelineOpacityMicromap; +} VkPhysicalDevicePipelineOpacityMicromapFeaturesARM; + + + +// VK_ARM_performance_counters_by_region is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_performance_counters_by_region 1 +#define VK_ARM_PERFORMANCE_COUNTERS_BY_REGION_SPEC_VERSION 1 +#define VK_ARM_PERFORMANCE_COUNTERS_BY_REGION_EXTENSION_NAME "VK_ARM_performance_counters_by_region" +typedef VkFlags VkPerformanceCounterDescriptionFlagsARM; +typedef struct VkPhysicalDevicePerformanceCountersByRegionFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 performanceCountersByRegion; +} VkPhysicalDevicePerformanceCountersByRegionFeaturesARM; + +typedef struct VkPhysicalDevicePerformanceCountersByRegionPropertiesARM { + VkStructureType sType; + void* pNext; + uint32_t maxPerRegionPerformanceCounters; + VkExtent2D performanceCounterRegionSize; + uint32_t rowStrideAlignment; + uint32_t regionAlignment; + VkBool32 identityTransformOrder; +} VkPhysicalDevicePerformanceCountersByRegionPropertiesARM; + +typedef struct VkPerformanceCounterARM { + VkStructureType sType; + void* pNext; + uint32_t counterID; +} VkPerformanceCounterARM; + +typedef struct VkPerformanceCounterDescriptionARM { + VkStructureType sType; + void* pNext; + VkPerformanceCounterDescriptionFlagsARM flags; + char name[VK_MAX_DESCRIPTION_SIZE]; +} VkPerformanceCounterDescriptionARM; + +typedef struct VkRenderPassPerformanceCountersByRegionBeginInfoARM { + VkStructureType sType; + void* pNext; + uint32_t counterAddressCount; + const VkDeviceAddress* pCounterAddresses; + VkBool32 serializeRegions; + uint32_t counterIndexCount; + uint32_t* pCounterIndices; +} VkRenderPassPerformanceCountersByRegionBeginInfoARM; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceCountersByRegionARM)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterARM* pCounters, VkPerformanceCounterDescriptionARM* pCounterDescriptions); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceQueueFamilyPerformanceCountersByRegionARM( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pCounterCount, + VkPerformanceCounterARM* pCounters, + VkPerformanceCounterDescriptionARM* pCounterDescriptions); +#endif +#endif + + +// VK_EXT_vertex_attribute_robustness is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_vertex_attribute_robustness 1 +#define VK_EXT_VERTEX_ATTRIBUTE_ROBUSTNESS_SPEC_VERSION 1 +#define VK_EXT_VERTEX_ATTRIBUTE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_vertex_attribute_robustness" +typedef struct VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 vertexAttributeRobustness; +} VkPhysicalDeviceVertexAttributeRobustnessFeaturesEXT; + + + +// VK_ARM_format_pack is a preprocessor guard. Do not pass it to API calls. +#define VK_ARM_format_pack 1 +#define VK_ARM_FORMAT_PACK_SPEC_VERSION 1 +#define VK_ARM_FORMAT_PACK_EXTENSION_NAME "VK_ARM_format_pack" +typedef struct VkPhysicalDeviceFormatPackFeaturesARM { + VkStructureType sType; + void* pNext; + VkBool32 formatPack; +} VkPhysicalDeviceFormatPackFeaturesARM; + + + +// VK_VALVE_fragment_density_map_layered is a preprocessor guard. Do not pass it to API calls. +#define VK_VALVE_fragment_density_map_layered 1 +#define VK_VALVE_FRAGMENT_DENSITY_MAP_LAYERED_SPEC_VERSION 1 +#define VK_VALVE_FRAGMENT_DENSITY_MAP_LAYERED_EXTENSION_NAME "VK_VALVE_fragment_density_map_layered" +typedef struct VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMapLayered; +} VkPhysicalDeviceFragmentDensityMapLayeredFeaturesVALVE; + +typedef struct VkPhysicalDeviceFragmentDensityMapLayeredPropertiesVALVE { + VkStructureType sType; + void* pNext; + uint32_t maxFragmentDensityMapLayers; +} VkPhysicalDeviceFragmentDensityMapLayeredPropertiesVALVE; + +typedef struct VkPipelineFragmentDensityMapLayeredCreateInfoVALVE { + VkStructureType sType; + const void* pNext; + uint32_t maxFragmentDensityMapLayers; +} VkPipelineFragmentDensityMapLayeredCreateInfoVALVE; + + + +// VK_NV_present_metering is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_present_metering 1 +#define VK_NV_PRESENT_METERING_SPEC_VERSION 1 +#define VK_NV_PRESENT_METERING_EXTENSION_NAME "VK_NV_present_metering" +typedef struct VkSetPresentConfigNV { + VkStructureType sType; + const void* pNext; + uint32_t numFramesPerBatch; + uint32_t presentConfigFeedback; +} VkSetPresentConfigNV; + +typedef struct VkPhysicalDevicePresentMeteringFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 presentMetering; +} VkPhysicalDevicePresentMeteringFeaturesNV; + + + +// VK_EXT_fragment_density_map_offset is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_fragment_density_map_offset 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_OFFSET_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_OFFSET_EXTENSION_NAME "VK_EXT_fragment_density_map_offset" +typedef VkRenderingEndInfoKHR VkRenderingEndInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdEndRendering2EXT)(VkCommandBuffer commandBuffer, const VkRenderingEndInfoKHR* pRenderingEndInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdEndRendering2EXT( + VkCommandBuffer commandBuffer, + const VkRenderingEndInfoKHR* pRenderingEndInfo); +#endif +#endif + + +// VK_EXT_zero_initialize_device_memory is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_zero_initialize_device_memory 1 +#define VK_EXT_ZERO_INITIALIZE_DEVICE_MEMORY_SPEC_VERSION 1 +#define VK_EXT_ZERO_INITIALIZE_DEVICE_MEMORY_EXTENSION_NAME "VK_EXT_zero_initialize_device_memory" +typedef struct VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 zeroInitializeDeviceMemory; +} VkPhysicalDeviceZeroInitializeDeviceMemoryFeaturesEXT; + + + +// VK_EXT_shader_64bit_indexing is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_64bit_indexing 1 +#define VK_EXT_SHADER_64BIT_INDEXING_SPEC_VERSION 1 +#define VK_EXT_SHADER_64BIT_INDEXING_EXTENSION_NAME "VK_EXT_shader_64bit_indexing" +typedef struct VkPhysicalDeviceShader64BitIndexingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shader64BitIndexing; +} VkPhysicalDeviceShader64BitIndexingFeaturesEXT; + + + +// VK_EXT_custom_resolve is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_custom_resolve 1 +#define VK_EXT_CUSTOM_RESOLVE_SPEC_VERSION 1 +#define VK_EXT_CUSTOM_RESOLVE_EXTENSION_NAME "VK_EXT_custom_resolve" +typedef struct VkPhysicalDeviceCustomResolveFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 customResolve; +} VkPhysicalDeviceCustomResolveFeaturesEXT; + +typedef struct VkBeginCustomResolveInfoEXT { + VkStructureType sType; + void* pNext; +} VkBeginCustomResolveInfoEXT; + +typedef struct VkCustomResolveCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 customResolve; + uint32_t colorAttachmentCount; + const VkFormat* pColorAttachmentFormats; + VkFormat depthAttachmentFormat; + VkFormat stencilAttachmentFormat; +} VkCustomResolveCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBeginCustomResolveEXT)(VkCommandBuffer commandBuffer, const VkBeginCustomResolveInfoEXT* pBeginCustomResolveInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginCustomResolveEXT( + VkCommandBuffer commandBuffer, + const VkBeginCustomResolveInfoEXT* pBeginCustomResolveInfo); +#endif +#endif + + +// VK_QCOM_data_graph_model is a preprocessor guard. Do not pass it to API calls. +#define VK_QCOM_data_graph_model 1 +#define VK_DATA_GRAPH_MODEL_TOOLCHAIN_VERSION_LENGTH_QCOM 3U +#define VK_QCOM_DATA_GRAPH_MODEL_SPEC_VERSION 1 +#define VK_QCOM_DATA_GRAPH_MODEL_EXTENSION_NAME "VK_QCOM_data_graph_model" + +typedef enum VkDataGraphModelCacheTypeQCOM { + VK_DATA_GRAPH_MODEL_CACHE_TYPE_GENERIC_BINARY_QCOM = 0, + VK_DATA_GRAPH_MODEL_CACHE_TYPE_MAX_ENUM_QCOM = 0x7FFFFFFF +} VkDataGraphModelCacheTypeQCOM; +typedef struct VkPipelineCacheHeaderVersionDataGraphQCOM { + uint32_t headerSize; + VkPipelineCacheHeaderVersion headerVersion; + VkDataGraphModelCacheTypeQCOM cacheType; + uint32_t cacheVersion; + uint32_t toolchainVersion[VK_DATA_GRAPH_MODEL_TOOLCHAIN_VERSION_LENGTH_QCOM]; +} VkPipelineCacheHeaderVersionDataGraphQCOM; + +typedef struct VkDataGraphPipelineBuiltinModelCreateInfoQCOM { + VkStructureType sType; + const void* pNext; + const VkPhysicalDeviceDataGraphOperationSupportARM* pOperation; +} VkDataGraphPipelineBuiltinModelCreateInfoQCOM; + +typedef struct VkPhysicalDeviceDataGraphModelFeaturesQCOM { + VkStructureType sType; + void* pNext; + VkBool32 dataGraphModel; +} VkPhysicalDeviceDataGraphModelFeaturesQCOM; + + + +// VK_EXT_shader_long_vector is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_long_vector 1 +#define VK_EXT_SHADER_LONG_VECTOR_SPEC_VERSION 1 +#define VK_EXT_SHADER_LONG_VECTOR_EXTENSION_NAME "VK_EXT_shader_long_vector" +typedef struct VkPhysicalDeviceShaderLongVectorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 longVector; +} VkPhysicalDeviceShaderLongVectorFeaturesEXT; + +typedef struct VkPhysicalDeviceShaderLongVectorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVectorComponents; +} VkPhysicalDeviceShaderLongVectorPropertiesEXT; + + + +// VK_SEC_pipeline_cache_incremental_mode is a preprocessor guard. Do not pass it to API calls. +#define VK_SEC_pipeline_cache_incremental_mode 1 +#define VK_SEC_PIPELINE_CACHE_INCREMENTAL_MODE_SPEC_VERSION 1 +#define VK_SEC_PIPELINE_CACHE_INCREMENTAL_MODE_EXTENSION_NAME "VK_SEC_pipeline_cache_incremental_mode" +typedef struct VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC { + VkStructureType sType; + void* pNext; + VkBool32 pipelineCacheIncrementalMode; +} VkPhysicalDevicePipelineCacheIncrementalModeFeaturesSEC; + + + +// VK_EXT_shader_uniform_buffer_unsized_array is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_uniform_buffer_unsized_array 1 +#define VK_EXT_SHADER_UNIFORM_BUFFER_UNSIZED_ARRAY_SPEC_VERSION 1 +#define VK_EXT_SHADER_UNIFORM_BUFFER_UNSIZED_ARRAY_EXTENSION_NAME "VK_EXT_shader_uniform_buffer_unsized_array" +typedef struct VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderUniformBufferUnsizedArray; +} VkPhysicalDeviceShaderUniformBufferUnsizedArrayFeaturesEXT; + + + +// VK_NV_compute_occupancy_priority is a preprocessor guard. Do not pass it to API calls. +#define VK_NV_compute_occupancy_priority 1 +#define VK_NV_COMPUTE_OCCUPANCY_PRIORITY_SPEC_VERSION 1 +#define VK_NV_COMPUTE_OCCUPANCY_PRIORITY_EXTENSION_NAME "VK_NV_compute_occupancy_priority" +#define VK_COMPUTE_OCCUPANCY_PRIORITY_LOW_NV 0.25f +#define VK_COMPUTE_OCCUPANCY_PRIORITY_NORMAL_NV 0.50f +#define VK_COMPUTE_OCCUPANCY_PRIORITY_HIGH_NV 0.75f +typedef struct VkComputeOccupancyPriorityParametersNV { + VkStructureType sType; + const void* pNext; + float occupancyPriority; + float occupancyThrottling; +} VkComputeOccupancyPriorityParametersNV; + +typedef struct VkPhysicalDeviceComputeOccupancyPriorityFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 computeOccupancyPriority; +} VkPhysicalDeviceComputeOccupancyPriorityFeaturesNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetComputeOccupancyPriorityNV)(VkCommandBuffer commandBuffer, const VkComputeOccupancyPriorityParametersNV* pParameters); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetComputeOccupancyPriorityNV( + VkCommandBuffer commandBuffer, + const VkComputeOccupancyPriorityParametersNV* pParameters); +#endif +#endif + + +// VK_EXT_shader_subgroup_partitioned is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_shader_subgroup_partitioned 1 +#define VK_EXT_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_EXT_shader_subgroup_partitioned" +typedef struct VkPhysicalDeviceShaderSubgroupPartitionedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupPartitioned; +} VkPhysicalDeviceShaderSubgroupPartitionedFeaturesEXT; + + + +// VK_KHR_acceleration_structure is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_acceleration_structure 1 +#define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 13 +#define VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME "VK_KHR_acceleration_structure" + +typedef enum VkBuildAccelerationStructureModeKHR { + VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR = 0, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR = 1, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkBuildAccelerationStructureModeKHR; + +typedef enum VkAccelerationStructureCreateFlagBitsKHR { + VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = 0x00000001, + VK_ACCELERATION_STRUCTURE_CREATE_DESCRIPTOR_BUFFER_CAPTURE_REPLAY_BIT_EXT = 0x00000008, + VK_ACCELERATION_STRUCTURE_CREATE_MOTION_BIT_NV = 0x00000004, + VK_ACCELERATION_STRUCTURE_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureCreateFlagBitsKHR; +typedef VkFlags VkAccelerationStructureCreateFlagsKHR; +typedef struct VkAccelerationStructureBuildRangeInfoKHR { + uint32_t primitiveCount; + uint32_t primitiveOffset; + uint32_t firstVertex; + uint32_t transformOffset; +} VkAccelerationStructureBuildRangeInfoKHR; + +typedef struct VkAccelerationStructureGeometryTrianglesDataKHR { + VkStructureType sType; + const void* pNext; + VkFormat vertexFormat; + VkDeviceOrHostAddressConstKHR vertexData; + VkDeviceSize vertexStride; + uint32_t maxVertex; + VkIndexType indexType; + VkDeviceOrHostAddressConstKHR indexData; + VkDeviceOrHostAddressConstKHR transformData; +} VkAccelerationStructureGeometryTrianglesDataKHR; + +typedef struct VkAccelerationStructureGeometryAabbsDataKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR data; + VkDeviceSize stride; +} VkAccelerationStructureGeometryAabbsDataKHR; + +typedef struct VkAccelerationStructureGeometryInstancesDataKHR { + VkStructureType sType; + const void* pNext; + VkBool32 arrayOfPointers; + VkDeviceOrHostAddressConstKHR data; +} VkAccelerationStructureGeometryInstancesDataKHR; + +typedef union VkAccelerationStructureGeometryDataKHR { + VkAccelerationStructureGeometryTrianglesDataKHR triangles; + VkAccelerationStructureGeometryAabbsDataKHR aabbs; + VkAccelerationStructureGeometryInstancesDataKHR instances; +} VkAccelerationStructureGeometryDataKHR; + +typedef struct VkAccelerationStructureGeometryKHR { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkAccelerationStructureGeometryDataKHR geometry; + VkGeometryFlagsKHR flags; +} VkAccelerationStructureGeometryKHR; + +typedef struct VkAccelerationStructureBuildGeometryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeKHR type; + VkBuildAccelerationStructureFlagsKHR flags; + VkBuildAccelerationStructureModeKHR mode; + VkAccelerationStructureKHR srcAccelerationStructure; + VkAccelerationStructureKHR dstAccelerationStructure; + uint32_t geometryCount; + const VkAccelerationStructureGeometryKHR* pGeometries; + const VkAccelerationStructureGeometryKHR* const* ppGeometries; + VkDeviceOrHostAddressKHR scratchData; +} VkAccelerationStructureBuildGeometryInfoKHR; + +typedef struct VkAccelerationStructureCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureCreateFlagsKHR createFlags; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; + VkAccelerationStructureTypeKHR type; + VkDeviceAddress deviceAddress; +} VkAccelerationStructureCreateInfoKHR; + +typedef struct VkWriteDescriptorSetAccelerationStructureKHR { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureKHR* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureKHR; + +typedef struct VkPhysicalDeviceAccelerationStructureFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 accelerationStructure; + VkBool32 accelerationStructureCaptureReplay; + VkBool32 accelerationStructureIndirectBuild; + VkBool32 accelerationStructureHostCommands; + VkBool32 descriptorBindingAccelerationStructureUpdateAfterBind; +} VkPhysicalDeviceAccelerationStructureFeaturesKHR; + +typedef struct VkPhysicalDeviceAccelerationStructurePropertiesKHR { + VkStructureType sType; + void* pNext; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxPrimitiveCount; + uint32_t maxPerStageDescriptorAccelerationStructures; + uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures; + uint32_t maxDescriptorSetAccelerationStructures; + uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures; + uint32_t minAccelerationStructureScratchOffsetAlignment; +} VkPhysicalDeviceAccelerationStructurePropertiesKHR; + +typedef struct VkAccelerationStructureDeviceAddressInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR accelerationStructure; +} VkAccelerationStructureDeviceAddressInfoKHR; + +typedef struct VkAccelerationStructureVersionInfoKHR { + VkStructureType sType; + const void* pNext; + const uint8_t* pVersionData; +} VkAccelerationStructureVersionInfoKHR; + +typedef struct VkCopyAccelerationStructureToMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkDeviceOrHostAddressKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureToMemoryInfoKHR; + +typedef struct VkCopyMemoryToAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyMemoryToAccelerationStructureInfoKHR; + +typedef struct VkCopyAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureKHR)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresIndirectKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts); +typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructuresKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef void (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureBuildSizesKHR)(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR( + VkDevice device, + const VkAccelerationStructureCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureKHR* pAccelerationStructure); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureKHR( + VkDevice device, + VkAccelerationStructureKHR accelerationStructure, + const VkAllocationCallbacks* pAllocator); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresIndirectKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkDeviceAddress* pIndirectDeviceAddresses, + const uint32_t* pIndirectStrides, + const uint32_t* const* ppMaxPrimitiveCounts); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructuresKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureInfoKHR* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR( + VkDevice device, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + size_t dataSize, + void* pData, + size_t stride); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureInfoKHR* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR( + VkDevice device, + const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesKHR( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR( + VkDevice device, + const VkAccelerationStructureVersionInfoKHR* pVersionInfo, + VkAccelerationStructureCompatibilityKHR* pCompatibility); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureBuildSizesKHR( + VkDevice device, + VkAccelerationStructureBuildTypeKHR buildType, + const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, + const uint32_t* pMaxPrimitiveCounts, + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); +#endif +#endif + + +// VK_KHR_ray_tracing_pipeline is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_ray_tracing_pipeline 1 +#define VK_KHR_RAY_TRACING_PIPELINE_SPEC_VERSION 1 +#define VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME "VK_KHR_ray_tracing_pipeline" + +typedef enum VkShaderGroupShaderKHR { + VK_SHADER_GROUP_SHADER_GENERAL_KHR = 0, + VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR = 1, + VK_SHADER_GROUP_SHADER_ANY_HIT_KHR = 2, + VK_SHADER_GROUP_SHADER_INTERSECTION_KHR = 3, + VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR = 0x7FFFFFFF +} VkShaderGroupShaderKHR; +typedef struct VkRayTracingShaderGroupCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; + const void* pShaderGroupCaptureReplayHandle; +} VkRayTracingShaderGroupCreateInfoKHR; + +typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxPipelineRayPayloadSize; + uint32_t maxPipelineRayHitAttributeSize; +} VkRayTracingPipelineInterfaceCreateInfoKHR; + +typedef struct VkRayTracingPipelineCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoKHR* pGroups; + uint32_t maxPipelineRayRecursionDepth; + const VkPipelineLibraryCreateInfoKHR* pLibraryInfo; + const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoKHR; + +typedef struct VkPhysicalDeviceRayTracingPipelineFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingPipeline; + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplay; + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed; + VkBool32 rayTracingPipelineTraceRaysIndirect; + VkBool32 rayTraversalPrimitiveCulling; +} VkPhysicalDeviceRayTracingPipelineFeaturesKHR; + +typedef struct VkPhysicalDeviceRayTracingPipelinePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRayRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint32_t shaderGroupHandleCaptureReplaySize; + uint32_t maxRayDispatchInvocationCount; + uint32_t shaderGroupHandleAlignment; + uint32_t maxRayHitAttributeSize; +} VkPhysicalDeviceRayTracingPipelinePropertiesKHR; + +typedef struct VkTraceRaysIndirectCommandKHR { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkTraceRaysIndirectCommandKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress); +typedef VkDeviceSize (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupStackSizeKHR)(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader); +typedef void (VKAPI_PTR *PFN_vkCmdSetRayTracingPipelineStackSizeKHR)(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + uint32_t width, + uint32_t height, + uint32_t depth); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + VkDeviceAddress indirectDeviceAddress); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR VkDeviceSize VKAPI_CALL vkGetRayTracingShaderGroupStackSizeKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t group, + VkShaderGroupShaderKHR groupShader); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetRayTracingPipelineStackSizeKHR( + VkCommandBuffer commandBuffer, + uint32_t pipelineStackSize); +#endif +#endif + + +// VK_KHR_ray_query is a preprocessor guard. Do not pass it to API calls. +#define VK_KHR_ray_query 1 +#define VK_KHR_RAY_QUERY_SPEC_VERSION 1 +#define VK_KHR_RAY_QUERY_EXTENSION_NAME "VK_KHR_ray_query" +typedef struct VkPhysicalDeviceRayQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayQuery; +} VkPhysicalDeviceRayQueryFeaturesKHR; + + + +// VK_EXT_mesh_shader is a preprocessor guard. Do not pass it to API calls. +#define VK_EXT_mesh_shader 1 +#define VK_EXT_MESH_SHADER_SPEC_VERSION 1 +#define VK_EXT_MESH_SHADER_EXTENSION_NAME "VK_EXT_mesh_shader" +typedef struct VkPhysicalDeviceMeshShaderFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 taskShader; + VkBool32 meshShader; + VkBool32 multiviewMeshShader; + VkBool32 primitiveFragmentShadingRateMeshShader; + VkBool32 meshShaderQueries; +} VkPhysicalDeviceMeshShaderFeaturesEXT; + +typedef struct VkPhysicalDeviceMeshShaderPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxTaskWorkGroupTotalCount; + uint32_t maxTaskWorkGroupCount[3]; + uint32_t maxTaskWorkGroupInvocations; + uint32_t maxTaskWorkGroupSize[3]; + uint32_t maxTaskPayloadSize; + uint32_t maxTaskSharedMemorySize; + uint32_t maxTaskPayloadAndSharedMemorySize; + uint32_t maxMeshWorkGroupTotalCount; + uint32_t maxMeshWorkGroupCount[3]; + uint32_t maxMeshWorkGroupInvocations; + uint32_t maxMeshWorkGroupSize[3]; + uint32_t maxMeshSharedMemorySize; + uint32_t maxMeshPayloadAndSharedMemorySize; + uint32_t maxMeshOutputMemorySize; + uint32_t maxMeshPayloadAndOutputMemorySize; + uint32_t maxMeshOutputComponents; + uint32_t maxMeshOutputVertices; + uint32_t maxMeshOutputPrimitives; + uint32_t maxMeshOutputLayers; + uint32_t maxMeshMultiviewViewCount; + uint32_t meshOutputPerVertexGranularity; + uint32_t meshOutputPerPrimitiveGranularity; + uint32_t maxPreferredTaskWorkGroupInvocations; + uint32_t maxPreferredMeshWorkGroupInvocations; + VkBool32 prefersLocalInvocationVertexOutput; + VkBool32 prefersLocalInvocationPrimitiveOutput; + VkBool32 prefersCompactVertexOutput; + VkBool32 prefersCompactPrimitiveOutput; +} VkPhysicalDeviceMeshShaderPropertiesEXT; + +typedef struct VkDrawMeshTasksIndirectCommandEXT { + uint32_t groupCountX; + uint32_t groupCountY; + uint32_t groupCountZ; +} VkDrawMeshTasksIndirectCommandEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksEXT)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectEXT)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountEXT)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksEXT( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectEXT( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); +#endif + +#ifndef VK_ONLY_EXPORTED_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountEXT( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/docs/vulkan-readme.md b/docs/vulkan-readme.md new file mode 100644 index 0000000000..cc7121ab83 --- /dev/null +++ b/docs/vulkan-readme.md @@ -0,0 +1,337 @@ +# Vulkan Renderer + +The Vulkan renderer is an alternate renderer for ioquake3 based on +[vkQuake3](https://github.com/suijingfeng/vkQuake3) (Sui Jingfeng's fork), +which itself is derived from +[Quake-III-Arena-Kenny-Edition](https://github.com/kennyalive/Quake-III-Arena-Kenny-Edition). +It replaces the OpenGL fixed-function pipeline with Vulkan, providing a modern +graphics API path while maintaining compatibility with existing Quake 3 content. + + +------------------------------------------------------------------------------- + FEATURES +------------------------------------------------------------------------------- + + - Compatible with vanilla Quake 3 and most mods. + - Vulkan 1.0 rendering backend. + - Pre-compiled SPIR-V shaders (single-texture and multi-texture paths). + - Specialization constants for alpha testing, clipping planes, and texture + blend operations -- avoiding redundant shader permutations. + - Hardware clip distance support where available. + - Push constants for per-primitive MVP and clipping plane data. + - Single command buffer, single render pass architecture. + - Self-contained renderer with its own image loaders (BMP, TGA, JPG, PNG, + PCX) and font rendering. + + +------------------------------------------------------------------------------- + BUILDING +------------------------------------------------------------------------------- + +The Vulkan renderer is not built by default. To enable it, pass +`-DBUILD_RENDERER_VULKAN=ON` to CMake. + + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Release -DBUILD_RENDERER_VULKAN=ON + make renderer_vulkan + +This produces `renderer_vulkan_.so` (Linux), `renderer_vulkan_.dll` +(Windows), or a framework bundle (macOS). + +### Vulkan headers + +By default the renderer builds against the Vulkan headers bundled in +`code/renderer_vulkan/vulkan/` (Khronos Vulkan 1.4, header version 341). +No Vulkan SDK installation is required. + +To build against system-provided Vulkan headers instead (e.g. from the +`vulkan-headers` package on Linux or the LunarG Vulkan SDK), pass: + + cmake .. -DBUILD_RENDERER_VULKAN=ON -DUSE_INTERNAL_VULKAN_HEADERS=OFF + +CMake will then use `find_package(Vulkan)` to locate the system headers. +This can be useful when you want to build against the latest headers from +your distribution or SDK without updating the bundled copy. + +### Shader compilation + +The SPIR-V shaders are shipped pre-compiled as C byte arrays in +`code/renderer_vulkan/shaders/Compiled/`. You do not need the Vulkan SDK to +build the renderer. + +To regenerate the shaders from GLSL source (requires `glslangValidator`): + + cd code/renderer_vulkan/shaders + ./compile.sh + + +------------------------------------------------------------------------------- + INSTALLATION +------------------------------------------------------------------------------- + +For *nix: + +1. Build ioquake3 as usual. The Vulkan renderer shared library will be + produced alongside the OpenGL renderers. + +2. Copy the renderer library into your Quake 3 install directory alongside + the client binary. + +For Win32: + +1. Have a Quake 3 install, fully patched. + +2. Copy the following files into Quake 3's install directory: + + ioquake3.x86_64.exe + renderer_vulkan_x86_64.dll + + These can be found in the build output directory after compiling. + +For macOS: + +macOS does not provide a native Vulkan implementation. The Vulkan renderer +requires MoltenVK, a Vulkan Portability library that translates Vulkan calls +to Apple's Metal API. + +1. Install MoltenVK via Homebrew: + + brew install molten-vk + + This provides `libMoltenVK.dylib` and the Vulkan loader (`libvulkan`). + +2. SDL2 searches for the Vulkan library at runtime. It looks for (in order): + `vulkan.framework/vulkan`, `libvulkan.1.dylib`, + `MoltenVK.framework/MoltenVK`, and `libMoltenVK.dylib`. + + If the library is not found in the default search paths, you can either: + + a. Copy or symlink `libMoltenVK.dylib` into the app bundle: + + cp $(brew --prefix molten-vk)/lib/libMoltenVK.dylib \ + ioquake3.app/Contents/MacOS/ + + b. Or set the library path before launching: + + export DYLD_LIBRARY_PATH=$(brew --prefix molten-vk)/lib + ./ioquake3.app/Contents/MacOS/ioquake3 +set cl_renderer vulkan + + Without MoltenVK installed, the Vulkan renderer will fail with: + "Failed to load Vulkan Portability library". + + +------------------------------------------------------------------------------- + RUNNING +------------------------------------------------------------------------------- + +1. Start ioquake3. + +2. Open the console (the default key is tilde ~) and type: + `/cl_renderer vulkan` and press enter, + `/vid_restart` then press enter again. + +3. To switch back to an OpenGL renderer: + `/cl_renderer opengl1` (or `opengl2`) and press enter, + `/vid_restart` then press enter again. + + +------------------------------------------------------------------------------- + CONSOLE COMMANDS +------------------------------------------------------------------------------- + + * `pipelineList` - List all created Vulkan pipelines. + * `gpuMem` - Show GPU image memory allocation. + * `printOR` - Print the value of backend.or. + * `displayResoList` - List display resolutions your monitor supports. + + +------------------------------------------------------------------------------- + CVARS +------------------------------------------------------------------------------- + +Cvars for display: + + * `r_mode` - Video mode selection. + -2 - Use desktop resolution. (default) + -1 - Use r_customwidth / r_customheight. + 0-28 - Predefined modes (e.g. 4 = 800x600, + 12 = 1280x720, 23 = 1920x1080). + + * `r_fullscreen` - Run in fullscreen mode. + 0 - Windowed. + 1 - Fullscreen. (default) + + * `r_allowResize` - Allow the window to be resized. + 0 - No. (default) + 1 - Yes. + + * `r_displayRefresh` - Display refresh rate in Hz. + 60 - Default. + + * `r_displayIndex` - Select which display/monitor to use. + 0 - Primary display. (default) + + * `r_customwidth` - Custom resolution width (when r_mode -1). + 960 - Default. + + * `r_customheight` - Custom resolution height (when r_mode -1). + 540 - Default. + +Cvars for image quality: + + * `r_picmip` - Texture detail reduction level (0 = full quality). + 0 - Highest quality. + 1 - Default. + 2-8 - Progressively lower quality. + + * `r_intensity` - Linear intensity multiplier applied to textures. + Requires vid_restart. + 1.0 - No change. + 1.5 - Default. Good starting point. + 2.0 - Brighter. + + * `r_gamma` - Non-linear gamma correction applied to textures + before upload to GPU. Requires vid_restart. + 1.0 - No correction. (default) + 1.5 - Brighter. + + * `r_simpleMipMaps` - Use simple box filter for mipmap generation. + 0 - No. + 1 - Yes. (default) + + * `r_subdivisions` - Curve tessellation quality. + 4 - Default. Lower = smoother curves. + + * `r_vertexLight` - Use vertex lighting instead of lightmaps. + Better performance, lower visual quality. + Requires vid_restart. + 0 - No. (default) + 1 - Yes. + + * `r_mapOverBrightBits` - Overbrightening of lightmap data. + Requires vid_restart. + 1 - Default. + +Cvars for frame rate control: + + * `r_swapInterval` - VSync / swap interval control. + 0 - VSync off; present mode is + MAILBOX > IMMEDIATE > FIFO. (default) + 1 - VSync on; forces FIFO present mode, + locking the frame rate to the display + refresh rate. + + NOTE: When VSync is off (`r_swapInterval 0`), the engine can produce very + high frame rates (1000+ fps) if `com_maxfps` is also set to 0. This may + cause erratic frame timing that triggers "Connection Interrupted" messages + during online play. To avoid this, set `com_maxfps` to a reasonable value + (e.g. 125 or 250) when playing with VSync disabled. + +Cvars for rendering features: + + * `r_dynamiclight` - Enable dynamic lights. + 0 - No. + 1 - Yes. (default) + + * `r_flares` - Enable light flares. + 0 - No. (default) + 1 - Yes. + + * `r_facePlaneCull` - Enable backface culling on planar surfaces. + 0 - No. + 1 - Yes. (default) + + * `r_inGameVideo` - Enable in-game video playback. + 0 - No. + 1 - Yes. (default) + + * `cg_shadows` - Shadow rendering mode. + 0 - None. + 1 - Blur shadows. (default) + 2 - Stencil volume shadows. + 3 - Black planar projection shadows. + +Cvars for railgun effects: + + * `r_railWidth` - Rail trail width. Default 16. + * `r_railCoreWidth` - Rail trail core width. Default 6. + * `r_railSegmentLength` - Rail trail segment length. Default 32. + +Cvars for debugging (cheat-protected): + + * `r_speeds` - Display rendering performance stats. + 0 - Off. (default) + 1 - On. + + * `r_showtris` - Wireframe rendering. + * `r_shownormals` - Draw surface normals. + * `r_showsky` - Force sky in front of all surfaces. + * `r_nocull` - Disable frustum culling. + * `r_novis` - Disable PVS culling. + * `r_drawworld` - Toggle world rendering. + * `r_drawentities` - Toggle entity rendering. + * `r_lightmap` - Render lightmaps only. + * `r_fullbright` - Skip lightmap pass entirely. + * `r_singleShader` - Force default shader on all world surfaces. + * `r_lockpvs` - Lock the PVS to the current viewpoint. + * `r_noportals` - Disable portal rendering. + * `r_verbose` - Enable verbose renderer debug output. + * `r_znear` - Near Z clip plane distance. Default 4. + + +------------------------------------------------------------------------------- + ARCHITECTURE +------------------------------------------------------------------------------- + +The Vulkan renderer is loaded as a shared library via `cl_renderer vulkan`. +It exports the standard `GetRefAPI` entry point (REF_API_VERSION 8) and is +ABI-compatible with the OpenGL renderers. + +### Geometry + +Quake 3's renderer prepares geometry in `tess.xyz` and `tess.indexes` arrays. +The Vulkan backend appends this data to host-visible vertex and index buffers +each frame. Typically up to 500 KB of vertex data and 100 KB of index data +are written per frame. + +### Descriptor sets + +One descriptor set per image, each containing a single combined image sampler. +Descriptor sets are created and updated once during initialization. No +descriptor set updates occur during frame rendering. + +### Push constants + +MVP transforms (64 bytes) are passed via push constants. For portal and +mirror views, an additional 64 bytes are used for the eye transform and +clipping plane (128 bytes total, the Vulkan guaranteed minimum). + +### Pipelines + +Standard pipelines (skybox, fog, dynamic light, shadow volumes, debug +overlays) are created at renderer startup. Map-specific pipelines are created +during Q3 shader parsing at map load time. For each Q3 shader, three +pipelines are created: regular view, portal view, and mirror view. + +### Shaders + +SPIR-V vertex and fragment shaders emulate the corresponding fixed-function +OpenGL operations. Specialization constants are used for alpha test function +selection, texture blend operations, and optional clipping plane support, +avoiding the need for large numbers of shader permutations. + + +------------------------------------------------------------------------------- + THANKS +------------------------------------------------------------------------------- + + - Id Software, for creating Quake 3 and releasing its source code under the + GPL. + + - Sui Jingfeng, for the vkQuake3 Vulkan renderer implementation. + + - Kenny Cason (kennyalive), for the original Quake-III-Arena-Kenny-Edition + Vulkan backend that vkQuake3 is based on. + + - The ioquake3 team and contributors, for maintaining and improving the + engine.