Merge pull request #44540 from jacobcoughenour/vulkan-sdk-1.2.126.0

Vulkan: loader, headers, and glslang updated to sdk-1.2.162.0
This commit is contained in:
Rémi Verschelde 2020-12-23 01:13:18 +01:00 committed by GitHub
commit 727faf9b48
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
54 changed files with 21227 additions and 16753 deletions

View file

@ -146,7 +146,7 @@ Files extracted from upstream source:
## glslang
- Upstream: https://github.com/KhronosGroup/glslang
- Version: git (bacaef3237c515e40d1a24722be48c0a0b30f75f, 2020)
- Version: git (dd69df7f3dac26362e10b0f38efb9e47990f7537, 2020)
- License: glslang
Version should be kept in sync with the one of the used Vulkan SDK (see `vulkan`
@ -162,8 +162,6 @@ Files extracted from upstream source:
- `LICENSE.txt`
- Unnecessary files like `CMakeLists.txt` and `updateGrammar` removed.
Patches in the `patches` directory should be re-applied after updates.
## Graphite engine
- Upstream: https://github.com/silnrsi/graphite
@ -624,7 +622,7 @@ folder.
## vulkan
- Upstream: https://github.com/KhronosGroup/Vulkan-Loader
- Version: sdk-1.2.154.0 (2020)
- Version: sdk-1.2.162.0 (2020)
- License: Apache 2.0
Unless there is a specific reason to package a more recent version, please stick

View file

@ -36,5 +36,6 @@ static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fu
static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density";
static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation";
static const char* const E_SPV_EXT_shader_atomic_float_add = "SPV_EXT_shader_atomic_float_add";
static const char* const E_SPV_EXT_shader_image_int64 = "SPV_EXT_shader_image_int64";
#endif // #ifndef GLSLextEXT_H

View file

@ -48,4 +48,7 @@ static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shade
static const char* const E_SPV_KHR_non_semantic_info = "SPV_KHR_non_semantic_info";
static const char* const E_SPV_KHR_ray_tracing = "SPV_KHR_ray_tracing";
static const char* const E_SPV_KHR_ray_query = "SPV_KHR_ray_query";
static const char* const E_SPV_KHR_fragment_shading_rate = "SPV_KHR_fragment_shading_rate";
static const char* const E_SPV_KHR_terminate_invocation = "SPV_KHR_terminate_invocation";
#endif // #ifndef GLSLextKHR_H

View file

@ -149,6 +149,7 @@ protected:
spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier);
spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier);
spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier);
spv::Decoration TranslateNonUniformDecoration(const spv::Builder::AccessChain::CoherentFlags& coherentFlags);
spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type);
spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags);
@ -189,6 +190,7 @@ protected:
bool originalParam(glslang::TStorageQualifier, const glslang::TType&, bool implicitThisParam);
void makeFunctions(const glslang::TIntermSequence&);
void makeGlobalInitializers(const glslang::TIntermSequence&);
void collectRayTracingLinkerObjects();
void visitFunctions(const glslang::TIntermSequence&);
void handleFunctionEntry(const glslang::TIntermAggregate* node);
void translateArguments(const glslang::TIntermAggregate& node, std::vector<spv::Id>& arguments,
@ -272,6 +274,9 @@ protected:
// requiring local translation to and from SPIR-V type on every access.
// Maps <builtin-variable-id -> AST-required-type-id>
std::unordered_map<spv::Id, spv::Id> forceType;
// Used later for generating OpTraceKHR/OpExecuteCallableKHR
std::unordered_map<unsigned int, glslang::TIntermSymbol *> locationToSymbol[2];
};
//
@ -539,6 +544,20 @@ spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glsl
return spv::DecorationMax;
}
// If lvalue flags contains nonUniform, return SPIR-V NonUniform decoration.
spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(
const spv::Builder::AccessChain::CoherentFlags& coherentFlags)
{
#ifndef GLSLANG_WEB
if (coherentFlags.isNonUniform()) {
builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5);
builder.addCapability(spv::CapabilityShaderNonUniformEXT);
return spv::DecorationNonUniformEXT;
} else
#endif
return spv::DecorationMax;
}
spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess(
const spv::Builder::AccessChain::CoherentFlags &coherentFlags)
{
@ -614,6 +633,7 @@ spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCohere
flags.volatil;
flags.isImage = type.getBasicType() == glslang::EbtSampler;
#endif
flags.nonUniform = type.getQualifier().nonUniform;
return flags;
}
@ -709,13 +729,20 @@ spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltI
return spv::BuiltInCullDistance;
case glslang::EbvViewportIndex:
builder.addCapability(spv::CapabilityMultiViewport);
if (glslangIntermediate->getStage() == EShLangGeometry ||
glslangIntermediate->getStage() == EShLangFragment) {
builder.addCapability(spv::CapabilityMultiViewport);
}
if (glslangIntermediate->getStage() == EShLangVertex ||
glslangIntermediate->getStage() == EShLangTessControl ||
glslangIntermediate->getStage() == EShLangTessEvaluation) {
builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5);
builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT);
if (builder.getSpvVersion() < spv::Spv_1_5) {
builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5);
builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT);
}
else
builder.addCapability(spv::CapabilityShaderViewportIndex);
}
return spv::BuiltInViewportIndex;
@ -734,13 +761,19 @@ spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltI
if (glslangIntermediate->getStage() == EShLangMeshNV) {
return spv::BuiltInLayer;
}
builder.addCapability(spv::CapabilityGeometry);
if (glslangIntermediate->getStage() == EShLangGeometry ||
glslangIntermediate->getStage() == EShLangFragment) {
builder.addCapability(spv::CapabilityGeometry);
}
if (glslangIntermediate->getStage() == EShLangVertex ||
glslangIntermediate->getStage() == EShLangTessControl ||
glslangIntermediate->getStage() == EShLangTessEvaluation) {
builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5);
builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT);
if (builder.getSpvVersion() < spv::Spv_1_5) {
builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5);
builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT);
} else
builder.addCapability(spv::CapabilityShaderLayer);
}
return spv::BuiltInLayer;
@ -769,6 +802,16 @@ spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltI
builder.addCapability(spv::CapabilityStencilExportEXT);
return spv::BuiltInFragStencilRefEXT;
case glslang::EbvShadingRateKHR:
builder.addExtension(spv::E_SPV_KHR_fragment_shading_rate);
builder.addCapability(spv::CapabilityFragmentShadingRateKHR);
return spv::BuiltInShadingRateKHR;
case glslang::EbvPrimitiveShadingRateKHR:
builder.addExtension(spv::E_SPV_KHR_fragment_shading_rate);
builder.addCapability(spv::CapabilityFragmentShadingRateKHR);
return spv::BuiltInPrimitiveShadingRateKHR;
case glslang::EbvInvocationId: return spv::BuiltInInvocationId;
case glslang::EbvTessLevelInner: return spv::BuiltInTessLevelInner;
case glslang::EbvTessLevelOuter: return spv::BuiltInTessLevelOuter;
@ -963,7 +1006,17 @@ spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltI
case glslang::EbvInstanceCustomIndex:
return spv::BuiltInInstanceCustomIndexKHR;
case glslang::EbvHitT:
return spv::BuiltInHitTKHR;
{
// this is a GLSL alias of RayTmax
// in SPV_NV_ray_tracing it has a dedicated builtin
// but in SPV_KHR_ray_tracing it gets mapped to RayTmax
auto& extensions = glslangIntermediate->getRequestedExtensions();
if (extensions.find("GL_NV_ray_tracing") != extensions.end()) {
return spv::BuiltInHitTNV;
} else {
return spv::BuiltInRayTmaxKHR;
}
}
case glslang::EbvHitKind:
return spv::BuiltInHitKindKHR;
case glslang::EbvObjectToWorld:
@ -1071,6 +1124,10 @@ spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TTy
builder.addCapability(spv::CapabilityStorageImageExtendedFormats);
break;
case glslang::ElfR64ui:
case glslang::ElfR64i:
builder.addExtension(spv::E_SPV_EXT_shader_image_int64);
builder.addCapability(spv::CapabilityInt64ImageEXT);
default:
break;
}
@ -1117,6 +1174,8 @@ spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TTy
case glslang::ElfRg8ui: return spv::ImageFormatRg8ui;
case glslang::ElfR16ui: return spv::ImageFormatR16ui;
case glslang::ElfR8ui: return spv::ImageFormatR8ui;
case glslang::ElfR64ui: return spv::ImageFormatR64ui;
case glslang::ElfR64i: return spv::ImageFormatR64i;
default: return spv::ImageFormatMax;
}
}
@ -1187,7 +1246,7 @@ spv::LoopControlMask TGlslangToSpvTraverser::TranslateLoopControl(const glslang:
spv::StorageClass TGlslangToSpvTraverser::TranslateStorageClass(const glslang::TType& type)
{
if (type.getBasicType() == glslang::EbtRayQuery)
return spv::StorageClassFunction;
return spv::StorageClassPrivate;
if (type.getQualifier().isPipeInput())
return spv::StorageClassInput;
if (type.getQualifier().isPipeOutput())
@ -1353,6 +1412,8 @@ void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& pa
if (parent.writeonly)
child.writeonly = true;
#endif
if (parent.nonUniform)
child.nonUniform = true;
}
bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier)
@ -1454,7 +1515,7 @@ TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion,
}
if (glslangIntermediate->getLayoutPrimitiveCulling()) {
builder.addCapability(spv::CapabilityRayTraversalPrimitiveCullingProvisionalKHR);
builder.addCapability(spv::CapabilityRayTraversalPrimitiveCullingKHR);
}
unsigned int mode;
@ -1621,7 +1682,7 @@ TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion,
{
auto& extensions = glslangIntermediate->getRequestedExtensions();
if (extensions.find("GL_NV_ray_tracing") == extensions.end()) {
builder.addCapability(spv::CapabilityRayTracingProvisionalKHR);
builder.addCapability(spv::CapabilityRayTracingKHR);
builder.addExtension("SPV_KHR_ray_tracing");
}
else {
@ -1710,6 +1771,12 @@ void TGlslangToSpvTraverser::visitSymbol(glslang::TIntermSymbol* symbol)
if (symbol->getType().getQualifier().isSpecConstant())
spec_constant_op_mode_setter.turnOnSpecConstantOpMode();
#ifdef ENABLE_HLSL
// Skip symbol handling if it is string-typed
if (symbol->getBasicType() == glslang::EbtString)
return;
#endif
// getSymbolId() will set up all the IO decorations on the first call.
// Formal function parameters were mapped during makeFunctions().
spv::Id id = getSymbolId(symbol);
@ -1852,9 +1919,11 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
spv::Id leftRValue = accessChainLoad(node->getLeft()->getType());
// do the operation
spv::Builder::AccessChain::CoherentFlags coherentFlags = TranslateCoherent(node->getLeft()->getType());
coherentFlags |= TranslateCoherent(node->getRight()->getType());
OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()),
TranslateNoContractionDecoration(node->getType().getQualifier()),
TranslateNonUniformDecoration(node->getType().getQualifier()) };
TranslateNonUniformDecoration(coherentFlags) };
rValue = createBinaryOperation(node->getOp(), decorations,
convertGlslangToSpvType(node->getType()), leftRValue, rValue,
node->getType().getBasicType());
@ -1885,13 +1954,16 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
if (! node->getLeft()->getType().isArray() &&
node->getLeft()->getType().isVector() &&
node->getOp() == glslang::EOpIndexDirect) {
// Swizzle is uniform so propagate uniform into access chain
spv::Builder::AccessChain::CoherentFlags coherentFlags = TranslateCoherent(node->getLeft()->getType());
coherentFlags.nonUniform = 0;
// This is essentially a hard-coded vector swizzle of size 1,
// so short circuit the access-chain stuff with a swizzle.
std::vector<unsigned> swizzle;
swizzle.push_back(glslangIndex);
int dummySize;
builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()),
TranslateCoherent(node->getLeft()->getType()),
coherentFlags,
glslangIntermediate->getBaseAlignmentScalar(
node->getLeft()->getType(), dummySize));
} else {
@ -1922,9 +1994,14 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
}
}
// Struct reference propagates uniform lvalue
spv::Builder::AccessChain::CoherentFlags coherentFlags =
TranslateCoherent(node->getLeft()->getType());
coherentFlags.nonUniform = 0;
// normal case for indexing array or structure or block
builder.accessChainPush(builder.makeIntConstant(spvIndex),
TranslateCoherent(node->getLeft()->getType()),
coherentFlags,
node->getLeft()->getType().getBufferReferenceAlignment());
// Add capabilities here for accessing PointSize and clip/cull distance.
@ -1958,15 +2035,20 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
// restore the saved access chain
builder.setAccessChain(partial);
// Only if index is nonUniform should we propagate nonUniform into access chain
spv::Builder::AccessChain::CoherentFlags index_flags = TranslateCoherent(node->getRight()->getType());
spv::Builder::AccessChain::CoherentFlags coherent_flags = TranslateCoherent(node->getLeft()->getType());
coherent_flags.nonUniform = index_flags.nonUniform;
if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector()) {
int dummySize;
builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()),
TranslateCoherent(node->getLeft()->getType()),
builder.accessChainPushComponent(
index, convertGlslangToSpvType(node->getLeft()->getType()), coherent_flags,
glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(),
dummySize));
} else
builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()),
node->getLeft()->getType().getBufferReferenceAlignment());
builder.accessChainPush(index, coherent_flags,
node->getLeft()->getType().getBufferReferenceAlignment());
}
return false;
case glslang::EOpVectorSwizzle:
@ -2050,8 +2132,9 @@ std::pair<spv::Id, spv::Id> TGlslangToSpvTraverser::getForcedType(glslang::TBuil
// these require changing a 64-bit scaler -> a vector of 32-bit components
if (glslangType.isVector())
break;
std::pair<spv::Id, spv::Id> ret(builder.makeVectorType(builder.makeUintType(32), 4),
builder.makeUintType(64));
spv::Id ivec4_type = builder.makeVectorType(builder.makeUintType(32), 4);
spv::Id uint64_type = builder.makeUintType(64);
std::pair<spv::Id, spv::Id> ret(ivec4_type, uint64_type);
return ret;
}
// There are no SPIR-V builtins defined for these and map onto original non-transposed
@ -2090,7 +2173,7 @@ spv::Id TGlslangToSpvTraverser::translateForcedType(spv::Id object)
// handle 32-bit v.xy* -> 64-bit
builder.clearAccessChain();
builder.setAccessChainLValue(object);
object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, objectTypeId);
object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, spv::DecorationMax, objectTypeId);
std::vector<spv::Id> components;
components.push_back(builder.createCompositeExtract(object, builder.getContainedTypeId(objectTypeId), 0));
components.push_back(builder.createCompositeExtract(object, builder.getContainedTypeId(objectTypeId), 1));
@ -2106,7 +2189,7 @@ spv::Id TGlslangToSpvTraverser::translateForcedType(spv::Id object)
// and we insert a transpose after loading the original non-transposed builtins
builder.clearAccessChain();
builder.setAccessChainLValue(object);
object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, objectTypeId);
object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, spv::DecorationMax, objectTypeId);
return builder.createUnaryOp(spv::OpTranspose, desiredTypeId, object);
} else {
@ -2292,7 +2375,8 @@ bool TGlslangToSpvTraverser::visitUnary(glslang::TVisit /* visit */, glslang::TI
// The result of operation is always stored, but conditionally the
// consumed result. The consumed result is always an r-value.
builder.accessChainStore(result);
builder.accessChainStore(result,
TranslateNonUniformDecoration(builder.getAccessChain().coherentFlags));
builder.clearAccessChain();
if (node->getOp() == glslang::EOpPreIncrement ||
node->getOp() == glslang::EOpPreDecrement)
@ -2421,6 +2505,10 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
// anything else gets there, so visit out of order, doing them all now.
makeGlobalInitializers(node->getAsAggregate()->getSequence());
//Pre process linker objects for ray tracing stages
if (glslangIntermediate->isRayTracingStage())
collectRayTracingLinkerObjects();
// Initializers are done, don't want to visit again, but functions and link objects need to be processed,
// so do them manually.
visitFunctions(node->getAsAggregate()->getSequence());
@ -2611,6 +2699,10 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
else
constructed = builder.createConstructor(precision, arguments, resultType());
if (node->getType().getQualifier().isNonUniform()) {
builder.addDecoration(constructed, spv::DecorationNonUniformEXT);
}
builder.clearAccessChain();
builder.setAccessChainRValue(constructed);
@ -2726,10 +2818,12 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
binOp = node->getOp();
break;
case glslang::EOpIgnoreIntersection:
case glslang::EOpTerminateRay:
case glslang::EOpTrace:
case glslang::EOpExecuteCallable:
case glslang::EOpIgnoreIntersectionNV:
case glslang::EOpTerminateRayNV:
case glslang::EOpTraceNV:
case glslang::EOpTraceKHR:
case glslang::EOpExecuteCallableNV:
case glslang::EOpExecuteCallableKHR:
case glslang::EOpWritePackedPrimitiveIndices4x8NV:
noReturnValue = true;
break;
@ -2738,7 +2832,7 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
case glslang::EOpRayQueryGenerateIntersection:
case glslang::EOpRayQueryConfirmIntersection:
builder.addExtension("SPV_KHR_ray_query");
builder.addCapability(spv::CapabilityRayQueryProvisionalKHR);
builder.addCapability(spv::CapabilityRayQueryKHR);
noReturnValue = true;
break;
case glslang::EOpRayQueryProceed:
@ -2761,7 +2855,7 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
case glslang::EOpRayQueryGetIntersectionObjectToWorld:
case glslang::EOpRayQueryGetIntersectionWorldToObject:
builder.addExtension("SPV_KHR_ray_query");
builder.addCapability(spv::CapabilityRayQueryProvisionalKHR);
builder.addCapability(spv::CapabilityRayQueryKHR);
break;
case glslang::EOpCooperativeMatrixLoad:
case glslang::EOpCooperativeMatrixStore:
@ -3014,11 +3108,18 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
)) {
bool cond = glslangOperands[arg]->getAsConstantUnion()->getConstArray()[0].getBConst();
operands.push_back(builder.makeIntConstant(cond ? 1 : 0));
}
else {
} else if ((arg == 10 && glslangOp == glslang::EOpTraceKHR) ||
(arg == 1 && glslangOp == glslang::EOpExecuteCallableKHR)) {
const int opdNum = glslangOp == glslang::EOpTraceKHR ? 10 : 1;
const int set = glslangOp == glslang::EOpTraceKHR ? 0 : 1;
const int location = glslangOperands[opdNum]->getAsConstantUnion()->getConstArray()[0].getUConst();
auto itNode = locationToSymbol[set].find(location);
visitSymbol(itNode->second);
spv::Id symId = getSymbolId(itNode->second);
operands.push_back(symId);
} else {
operands.push_back(accessChainLoad(glslangOperands[arg]->getAsTyped()->getType()));
}
}
}
}
@ -3089,7 +3190,8 @@ bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TInt
for (unsigned int i = 0; i < temporaryLvalues.size(); ++i) {
builder.setAccessChain(complexLvalues[i]);
builder.accessChainStore(builder.createLoad(temporaryLvalues[i], spv::NoPrecision));
builder.accessChainStore(builder.createLoad(temporaryLvalues[i], spv::NoPrecision),
TranslateNonUniformDecoration(complexLvalues[i].coherentFlags));
}
}
@ -3420,7 +3522,11 @@ bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::T
switch (node->getFlowOp()) {
case glslang::EOpKill:
builder.makeDiscard();
builder.makeStatementTerminator(spv::OpKill, "post-discard");
break;
case glslang::EOpTerminateInvocation:
builder.addExtension(spv::E_SPV_KHR_terminate_invocation);
builder.makeStatementTerminator(spv::OpTerminateInvocation, "post-terminate-invocation");
break;
case glslang::EOpBreak:
if (breakForLoop.top())
@ -3457,6 +3563,12 @@ bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::T
builder.addExtension(spv::E_SPV_EXT_demote_to_helper_invocation);
builder.addCapability(spv::CapabilityDemoteToHelperInvocationEXT);
break;
case glslang::EOpTerminateRayKHR:
builder.makeStatementTerminator(spv::OpTerminateRayKHR, "post-terminateRayKHR");
break;
case glslang::EOpIgnoreIntersectionKHR:
builder.makeStatementTerminator(spv::OpIgnoreIntersectionKHR, "post-ignoreIntersectionKHR");
break;
#endif
default:
@ -3564,6 +3676,12 @@ spv::Id TGlslangToSpvTraverser::getSampledType(const glslang::TSampler& sampler)
builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float_fetch);
builder.addCapability(spv::CapabilityFloat16ImageAMD);
return builder.makeFloatType(16);
case glslang::EbtInt64: return builder.makeIntType(64);
builder.addExtension(spv::E_SPV_EXT_shader_image_int64);
builder.addCapability(spv::CapabilityFloat16ImageAMD);
case glslang::EbtUint64: return builder.makeUintType(64);
builder.addExtension(spv::E_SPV_EXT_shader_image_int64);
builder.addCapability(spv::CapabilityFloat16ImageAMD);
#endif
default:
assert(0);
@ -3670,10 +3788,36 @@ spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& ty
spvType = builder.makeUintType(32);
break;
case glslang::EbtAccStruct:
switch (glslangIntermediate->getStage()) {
case EShLangRayGen:
case EShLangIntersect:
case EShLangAnyHit:
case EShLangClosestHit:
case EShLangMiss:
case EShLangCallable:
// these all should have the RayTracingNV/KHR capability already
break;
default:
{
auto& extensions = glslangIntermediate->getRequestedExtensions();
if (extensions.find("GL_EXT_ray_query") != extensions.end()) {
builder.addExtension(spv::E_SPV_KHR_ray_query);
builder.addCapability(spv::CapabilityRayQueryKHR);
}
}
break;
}
spvType = builder.makeAccelerationStructureType();
break;
case glslang::EbtRayQuery:
spvType = builder.makeRayQueryType();
{
auto& extensions = glslangIntermediate->getRequestedExtensions();
if (extensions.find("GL_EXT_ray_query") != extensions.end()) {
builder.addExtension(spv::E_SPV_KHR_ray_query);
builder.addCapability(spv::CapabilityRayQueryKHR);
}
spvType = builder.makeRayQueryType();
}
break;
case glslang::EbtReference:
{
@ -3929,6 +4073,8 @@ void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type,
// Name and decorate the non-hidden members
int offset = -1;
int locationOffset = 0; // for use within the members of this struct
bool memberLocationInvalid = type.isArrayOfArrays() ||
(type.isArray() && (type.getQualifier().isArrayedIo(glslangIntermediate->getStage()) == false));
for (int i = 0; i < (int)glslangMembers->size(); i++) {
glslang::TType& glslangMember = *(*glslangMembers)[i].type;
int member = i;
@ -3981,7 +4127,7 @@ void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type,
// just track whether a member needs to be decorated.
// Ignore member locations if the container is an array, as that's
// ill-specified and decisions have been made to not allow this.
if (! type.isArray() && memberQualifier.hasLocation())
if (!memberLocationInvalid && memberQualifier.hasLocation())
builder.addMemberDecoration(spvType, member, spv::DecorationLocation, memberQualifier.layoutLocation);
if (qualifier.hasLocation()) // track for upcoming inheritance
@ -4087,6 +4233,7 @@ spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type)
alignment |= type.getBufferReferenceAlignment();
spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type),
TranslateNonUniformDecoration(builder.getAccessChain().coherentFlags),
TranslateNonUniformDecoration(type.getQualifier()),
nominalTypeId,
spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask),
@ -4154,7 +4301,7 @@ void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::I
unsigned int alignment = builder.getAccessChain().alignment;
alignment |= type.getBufferReferenceAlignment();
builder.accessChainStore(rvalue,
builder.accessChainStore(rvalue, TranslateNonUniformDecoration(builder.getAccessChain().coherentFlags),
spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) &
~spv::MemoryAccessMakePointerVisibleKHRMask),
TranslateMemoryScope(coherentFlags), alignment);
@ -4542,7 +4689,39 @@ void TGlslangToSpvTraverser::makeGlobalInitializers(const glslang::TIntermSequen
}
}
}
// Walk over all linker objects to create a map for payload and callable data linker objects
// and their location to be used during codegen for OpTraceKHR and OpExecuteCallableKHR
// This is done here since it is possible that these linker objects are not be referenced in the AST
void TGlslangToSpvTraverser::collectRayTracingLinkerObjects()
{
glslang::TIntermAggregate* linkerObjects = glslangIntermediate->findLinkerObjects();
for (auto& objSeq : linkerObjects->getSequence()) {
auto objNode = objSeq->getAsSymbolNode();
if (objNode != nullptr) {
if (objNode->getQualifier().hasLocation()) {
unsigned int location = objNode->getQualifier().layoutLocation;
auto st = objNode->getQualifier().storage;
int set;
switch (st)
{
case glslang::EvqPayload:
case glslang::EvqPayloadIn:
set = 0;
break;
case glslang::EvqCallableData:
case glslang::EvqCallableDataIn:
set = 1;
break;
default:
set = -1;
}
if (set != -1)
locationToSymbol[set].insert(std::make_pair(location, objNode));
}
}
}
}
// Process all the functions, while skipping initializers.
void TGlslangToSpvTraverser::visitFunctions(const glslang::TIntermSequence& glslFunctions)
{
@ -4686,8 +4865,10 @@ void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate&
}
if (lvalue) {
arguments.push_back(builder.accessChainGetLValue());
spv::Id lvalue_id = builder.accessChainGetLValue();
arguments.push_back(lvalue_id);
lvalueCoherentFlags = builder.getAccessChain().coherentFlags;
builder.addDecoration(lvalue_id, TranslateNonUniformDecoration(lvalueCoherentFlags));
lvalueCoherentFlags |= TranslateCoherent(glslangArguments[i]->getAsTyped()->getType());
} else
#endif
@ -4750,12 +4931,15 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
const bool isUnsignedResult = node->getType().getBasicType() == glslang::EbtUint;
if (builder.isSampledImage(params.sampler) &&
((cracked.query && node->getOp() != glslang::EOpTextureQueryLod) || cracked.fragMask || cracked.fetch)) {
params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
if (imageType.getQualifier().isNonUniform()) {
builder.addDecoration(params.sampler, spv::DecorationNonUniformEXT);
}
}
// Check for queries
if (cracked.query) {
// OpImageQueryLod works on a sampled image, for other queries the image has to be extracted first
if (node->getOp() != glslang::EOpTextureQueryLod && builder.isSampledImage(params.sampler))
params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
switch (node->getOp()) {
case glslang::EOpImageQuerySize:
case glslang::EOpTextureQuerySize:
@ -5009,10 +5193,6 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
auto opIt = arguments.begin();
std::vector<spv::Id> operands;
// Extract the image if necessary
if (builder.isSampledImage(params.sampler))
params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
operands.push_back(params.sampler);
++opIt;
@ -5073,13 +5253,6 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
bias = true;
}
// See if the sampler param should really be just the SPV image part
if (cracked.fetch) {
// a fetch needs to have the image extracted first
if (builder.isSampledImage(params.sampler))
params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler);
}
#ifndef GLSLANG_WEB
if (cracked.gather) {
const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions();
@ -5239,7 +5412,7 @@ spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermO
builder.accessChainPush(builder.makeIntConstant(i), flags, 0);
builder.accessChainStore(builder.createCompositeExtract(res, builder.getContainedTypeId(resType, i+1),
i+1));
i+1), TranslateNonUniformDecoration(imageType.getQualifier()));
}
return builder.createCompositeExtract(res, resultType(), 0);
}
@ -5375,6 +5548,7 @@ spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAgg
// 3. Make the call.
spv::Id result = builder.createFunctionCall(function, spvArgs);
builder.setPrecision(result, TranslatePrecisionDecoration(node->getType()));
builder.addDecoration(result, TranslateNonUniformDecoration(node->getType().getQualifier()));
// 4. Copy back out an "out" arguments.
lValueCount = 0;
@ -5384,6 +5558,7 @@ spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAgg
else if (writableParam(qualifiers[a])) {
if (qualifiers[a] == glslang::EvqOut || qualifiers[a] == glslang::EvqInOut) {
spv::Id copy = builder.createLoad(spvArgs[a], spv::NoPrecision);
builder.addDecoration(copy, TranslateNonUniformDecoration(argTypes[a]->getQualifier()));
builder.setAccessChain(lValues[lValueCount]);
multiTypeStore(*argTypes[a], copy);
}
@ -6171,6 +6346,11 @@ spv::Id TGlslangToSpvTraverser::createUnaryOperation(glslang::TOperator op, OpDe
case glslang::EOpConstructReference:
unaryOp = spv::OpBitcast;
break;
case glslang::EOpConvUint64ToAccStruct:
case glslang::EOpConvUvec2ToAccStruct:
unaryOp = spv::OpConvertUToAccelerationStructureKHR;
break;
#endif
case glslang::EOpCopyObject:
@ -7757,10 +7937,16 @@ spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::
typeId = builder.makeBoolType();
opCode = spv::OpReportIntersectionKHR;
break;
case glslang::EOpTrace:
case glslang::EOpTraceNV:
builder.createNoResultOp(spv::OpTraceNV, operands);
return 0;
case glslang::EOpTraceKHR:
builder.createNoResultOp(spv::OpTraceRayKHR, operands);
return 0;
case glslang::EOpExecuteCallable:
case glslang::EOpExecuteCallableNV:
builder.createNoResultOp(spv::OpExecuteCallableNV, operands);
return 0;
case glslang::EOpExecuteCallableKHR:
builder.createNoResultOp(spv::OpExecuteCallableKHR, operands);
return 0;
@ -8048,11 +8234,11 @@ spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv:
spv::Id id = builder.createBuiltinCall(typeId, getExtBuiltins(spv::E_SPV_AMD_gcn_shader), spv::TimeAMD, args);
return builder.setPrecision(id, precision);
}
case glslang::EOpIgnoreIntersection:
builder.createNoResultOp(spv::OpIgnoreIntersectionKHR);
case glslang::EOpIgnoreIntersectionNV:
builder.createNoResultOp(spv::OpIgnoreIntersectionNV);
return 0;
case glslang::EOpTerminateRay:
builder.createNoResultOp(spv::OpTerminateRayKHR);
case glslang::EOpTerminateRayNV:
builder.createNoResultOp(spv::OpTerminateRayNV);
return 0;
case glslang::EOpRayQueryInitialize:
builder.createNoResultOp(spv::OpRayQueryInitializeKHR);
@ -8180,7 +8366,8 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol
}
#ifndef GLSLANG_WEB
if (symbol->getType().isImage()) {
// Subgroup builtins which have input storage class are volatile for ray tracing stages.
if (symbol->getType().isImage() || symbol->getQualifier().isPipeInput()) {
std::vector<spv::Decoration> memory;
TranslateMemoryDecoration(symbol->getType().getQualifier(), memory,
glslangIntermediate->usingVulkanMemoryModel());
@ -8188,9 +8375,6 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol
builder.addDecoration(id, memory[i]);
}
// nonuniform
builder.addDecoration(id, TranslateNonUniformDecoration(symbol->getType().getQualifier()));
if (builtIn == spv::BuiltInSampleMask) {
spv::Decoration decoration;
// GL_NV_sample_mask_override_coverage extension

View file

@ -830,7 +830,15 @@ namespace spv {
[&](spv::Id& id) {
if (thisOpCode != spv::OpNop) {
++idCounter;
const std::uint32_t hashval = opCounter[thisOpCode] * thisOpCode * 50047 + idCounter + fnId * 117;
const std::uint32_t hashval =
// Explicitly cast operands to unsigned int to avoid integer
// promotion to signed int followed by integer overflow,
// which would result in undefined behavior.
static_cast<unsigned int>(opCounter[thisOpCode])
* thisOpCode
* 50047
+ idCounter
+ static_cast<unsigned int>(fnId) * 117;
if (isOldIdUnmapped(id))
localId(id, nextUnusedId(hashval % softTypeIdLimit + firstMappedID));

View file

@ -621,13 +621,13 @@ Id Builder::makeAccelerationStructureType()
Id Builder::makeRayQueryType()
{
Instruction *type;
if (groupedTypes[OpTypeRayQueryProvisionalKHR].size() == 0) {
type = new Instruction(getUniqueId(), NoType, OpTypeRayQueryProvisionalKHR);
groupedTypes[OpTypeRayQueryProvisionalKHR].push_back(type);
if (groupedTypes[OpTypeRayQueryKHR].size() == 0) {
type = new Instruction(getUniqueId(), NoType, OpTypeRayQueryKHR);
groupedTypes[OpTypeRayQueryKHR].push_back(type);
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
} else {
type = groupedTypes[OpTypeRayQueryProvisionalKHR].back();
type = groupedTypes[OpTypeRayQueryKHR].back();
}
return type->getResultId();
@ -1447,10 +1447,10 @@ void Builder::leaveFunction()
}
// Comments in header
void Builder::makeDiscard()
void Builder::makeStatementTerminator(spv::Op opcode, const char *name)
{
buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(OpKill)));
createAndSetNoPredecessorBlock("post-discard");
buildPoint->addInstruction(std::unique_ptr<Instruction>(new Instruction(opcode)));
createAndSetNoPredecessorBlock(name);
}
// Comments in header
@ -2761,12 +2761,14 @@ void Builder::accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizz
}
// Comments in header
void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
void Builder::accessChainStore(Id rvalue, Decoration nonUniform, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
{
assert(accessChain.isRValue == false);
transferAccessChainSwizzle(true);
Id base = collapseAccessChain();
addDecoration(base, nonUniform);
Id source = rvalue;
// dynamic component should be gone
@ -2789,8 +2791,9 @@ void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, sp
}
// Comments in header
Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType,
spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment)
Id Builder::accessChainLoad(Decoration precision, Decoration l_nonUniform,
Decoration r_nonUniform, Id resultType, spv::MemoryAccessMask memoryAccess,
spv::Scope scope, unsigned int alignment)
{
Id id;
@ -2854,9 +2857,9 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu
// Buffer accesses need the access chain decorated, and this is where
// loaded image types get decorated. TODO: This should maybe move to
// createImageTextureFunctionCall.
addDecoration(id, nonUniform);
addDecoration(id, l_nonUniform);
id = createLoad(id, precision, memoryAccess, scope, alignment);
addDecoration(id, nonUniform);
addDecoration(id, r_nonUniform);
}
// Done, unless there are swizzles to do
@ -2877,7 +2880,7 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu
if (accessChain.component != NoResult)
id = setPrecision(createVectorExtractDynamic(id, resultType, accessChain.component), precision);
addDecoration(id, nonUniform);
addDecoration(id, r_nonUniform);
return id;
}

View file

@ -357,8 +357,9 @@ public:
// Generate all the code needed to finish up a function.
void leaveFunction();
// Create a discard.
void makeDiscard();
// Create block terminator instruction for certain statements like
// discard, terminate-invocation, terminateRayEXT, or ignoreIntersectionEXT
void makeStatementTerminator(spv::Op opcode, const char *name);
// Create a global or function local or IO variable.
Id createVariable(Decoration precision, StorageClass, Id type, const char* name = nullptr,
@ -624,6 +625,7 @@ public:
CoherentFlags operator |=(const CoherentFlags &other) { return *this; }
#else
bool isVolatile() const { return volatil; }
bool isNonUniform() const { return nonUniform; }
bool anyCoherent() const {
return coherent || devicecoherent || queuefamilycoherent || workgroupcoherent ||
subgroupcoherent || shadercallcoherent;
@ -638,6 +640,7 @@ public:
unsigned nonprivate : 1;
unsigned volatil : 1;
unsigned isImage : 1;
unsigned nonUniform : 1;
void clear() {
coherent = 0;
@ -649,6 +652,7 @@ public:
nonprivate = 0;
volatil = 0;
isImage = 0;
nonUniform = 0;
}
CoherentFlags operator |=(const CoherentFlags &other) {
@ -661,6 +665,7 @@ public:
nonprivate |= other.nonprivate;
volatil |= other.volatil;
isImage |= other.isImage;
nonUniform |= other.nonUniform;
return *this;
}
#endif
@ -721,11 +726,12 @@ public:
}
// use accessChain and swizzle to store value
void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone,
void accessChainStore(Id rvalue, Decoration nonUniform,
spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone,
spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0);
// use accessChain and swizzle to load an r-value
Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType,
Id accessChainLoad(Decoration precision, Decoration l_nonUniform, Decoration r_nonUniform, Id ResultType,
spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax,
unsigned int alignment = 0);

View file

@ -44,7 +44,6 @@
#include "SpvTools.h"
#include "spirv-tools/optimizer.hpp"
#include "spirv-tools/libspirv.h"
namespace glslang {
@ -114,11 +113,18 @@ void OptimizerMesssageConsumer(spv_message_level_t level, const char *source,
out << std::endl;
}
// Use the SPIRV-Tools disassembler to print SPIR-V.
// Use the SPIRV-Tools disassembler to print SPIR-V using a SPV_ENV_UNIVERSAL_1_3 environment.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv)
{
SpirvToolsDisassemble(out, spirv, spv_target_env::SPV_ENV_UNIVERSAL_1_3);
}
// Use the SPIRV-Tools disassembler to print SPIR-V with a provided SPIR-V environment.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv,
spv_target_env requested_context)
{
// disassemble
spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
spv_context context = spvContextCreate(requested_context);
spv_text text;
spv_diagnostic diagnostic = nullptr;
spvBinaryToText(context, spirv.data(), spirv.size(),
@ -174,10 +180,7 @@ void SpirvToolsTransform(const glslang::TIntermediate& intermediate, std::vector
// line information into all SPIR-V instructions. This avoids loss of
// information when instructions are deleted or moved. Later, remove
// redundant information to minimize final SPRIR-V size.
if (options->generateDebugInfo) {
optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass());
}
else if (options->stripDebugInfo) {
if (options->stripDebugInfo) {
optimizer.RegisterPass(spvtools::CreateStripDebugInfoPass());
}
optimizer.RegisterPass(spvtools::CreateWrapOpKillPass());
@ -207,9 +210,6 @@ void SpirvToolsTransform(const glslang::TIntermediate& intermediate, std::vector
}
optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
optimizer.RegisterPass(spvtools::CreateCFGCleanupPass());
if (options->generateDebugInfo) {
optimizer.RegisterPass(spvtools::CreateRedundantLineInfoElimPass());
}
spvtools::OptimizerOptions spvOptOptions;
optimizer.SetTargetEnv(MapToSpirvToolsEnv(intermediate.getSpv(), logger));

View file

@ -41,9 +41,10 @@
#ifndef GLSLANG_SPV_TOOLS_H
#define GLSLANG_SPV_TOOLS_H
#ifdef ENABLE_OPT
#if ENABLE_OPT
#include <vector>
#include <ostream>
#include "spirv-tools/libspirv.h"
#endif
#include "glslang/MachineIndependent/localintermediate.h"
@ -62,11 +63,15 @@ struct SpvOptions {
bool validate;
};
#ifdef ENABLE_OPT
#if ENABLE_OPT
// Use the SPIRV-Tools disassembler to print SPIR-V.
// Use the SPIRV-Tools disassembler to print SPIR-V using a SPV_ENV_UNIVERSAL_1_3 environment.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv);
// Use the SPIRV-Tools disassembler to print SPIR-V with a provided SPIR-V environment.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv,
spv_target_env requested_context);
// Apply the SPIRV-Tools validator to generated SPIR-V.
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*, bool prelegalization);

View file

@ -46,7 +46,6 @@
#include "disassemble.h"
#include "doc.h"
#include "SpvTools.h"
namespace spv {
extern "C" {

View file

@ -372,6 +372,8 @@ const char* BuiltInString(int builtIn)
case 4424: return "BaseVertex";
case 4425: return "BaseInstance";
case 4426: return "DrawIndex";
case 4432: return "PrimitiveShadingRateKHR";
case 4444: return "ShadingRateKHR";
case 5014: return "FragStencilRefEXT";
case 4992: return "BaryCoordNoPerspAMD";
@ -393,7 +395,7 @@ const char* BuiltInString(int builtIn)
case BuiltInRayGeometryIndexKHR: return "RayGeometryIndexKHR";
case BuiltInObjectToWorldKHR: return "ObjectToWorldKHR";
case BuiltInWorldToObjectKHR: return "WorldToObjectKHR";
case BuiltInHitTKHR: return "HitTKHR";
case BuiltInHitTNV: return "HitTNV";
case BuiltInHitKindKHR: return "HitKindKHR";
case BuiltInIncomingRayFlagsKHR: return "IncomingRayFlagsKHR";
case BuiltInViewportMaskNV: return "ViewportMaskNV";
@ -521,6 +523,8 @@ const char* ImageFormatString(int format)
case 37: return "Rg8ui";
case 38: return "R16ui";
case 39: return "R8ui";
case 40: return "R64ui";
case 41: return "R64i";
default:
return "Bad";
@ -911,9 +915,9 @@ const char* CapabilityString(int info)
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
case CapabilityGroupNonUniformPartitionedNV: return "GroupNonUniformPartitionedNV";
case CapabilityRayTracingNV: return "RayTracingNV";
case CapabilityRayTracingProvisionalKHR: return "RayTracingProvisionalKHR";
case CapabilityRayQueryProvisionalKHR: return "RayQueryProvisionalKHR";
case CapabilityRayTraversalPrimitiveCullingProvisionalKHR: return "RayTraversalPrimitiveCullingProvisionalKHR";
case CapabilityRayTracingKHR: return "RayTracingKHR";
case CapabilityRayQueryKHR: return "RayQueryKHR";
case CapabilityRayTraversalPrimitiveCullingKHR: return "RayTraversalPrimitiveCullingKHR";
case CapabilityComputeDerivativeGroupQuadsNV: return "ComputeDerivativeGroupQuadsNV";
case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV";
case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
@ -952,8 +956,11 @@ const char* CapabilityString(int info)
case CapabilityFragmentShaderPixelInterlockEXT: return "CapabilityFragmentShaderPixelInterlockEXT";
case CapabilityFragmentShaderShadingRateInterlockEXT: return "CapabilityFragmentShaderShadingRateInterlockEXT";
case CapabilityFragmentShadingRateKHR: return "FragmentShadingRateKHR";
case CapabilityDemoteToHelperInvocationEXT: return "DemoteToHelperInvocationEXT";
case CapabilityShaderClockKHR: return "ShaderClockKHR";
case CapabilityInt64ImageEXT: return "Int64ImageEXT";
case CapabilityIntegerFunctions2INTEL: return "CapabilityIntegerFunctions2INTEL";
@ -1329,6 +1336,8 @@ const char* OpcodeString(int op)
case 365: return "OpGroupNonUniformQuadBroadcast";
case 366: return "OpGroupNonUniformQuadSwap";
case OpTerminateInvocation: return "OpTerminateInvocation";
case 4421: return "OpSubgroupBallotKHR";
case 4422: return "OpSubgroupFirstInvocationKHR";
case 4428: return "OpSubgroupAllKHR";
@ -1355,17 +1364,23 @@ const char* OpcodeString(int op)
case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE";
case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE";
case OpReportIntersectionKHR: return "OpReportIntersectionKHR";
case OpIgnoreIntersectionNV: return "OpIgnoreIntersectionNV";
case OpIgnoreIntersectionKHR: return "OpIgnoreIntersectionKHR";
case OpTerminateRayNV: return "OpTerminateRayNV";
case OpTerminateRayKHR: return "OpTerminateRayKHR";
case OpTraceNV: return "OpTraceNV";
case OpTraceRayKHR: return "OpTraceRayKHR";
case OpTypeAccelerationStructureKHR: return "OpTypeAccelerationStructureKHR";
case OpExecuteCallableNV: return "OpExecuteCallableNV";
case OpExecuteCallableKHR: return "OpExecuteCallableKHR";
case OpConvertUToAccelerationStructureKHR: return "OpConvertUToAccelerationStructureKHR";
case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV";
case OpReportIntersectionKHR: return "OpReportIntersectionKHR";
case OpIgnoreIntersectionKHR: return "OpIgnoreIntersectionKHR";
case OpTerminateRayKHR: return "OpTerminateRayKHR";
case OpTraceRayKHR: return "OpTraceRayKHR";
case OpTypeAccelerationStructureKHR: return "OpTypeAccelerationStructureKHR";
case OpExecuteCallableKHR: return "OpExecuteCallableKHR";
case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV";
case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV";
case OpTypeRayQueryProvisionalKHR: return "OpTypeRayQueryProvisionalKHR";
case OpTypeRayQueryKHR: return "OpTypeRayQueryKHR";
case OpRayQueryInitializeKHR: return "OpRayQueryInitializeKHR";
case OpRayQueryTerminateKHR: return "OpRayQueryTerminateKHR";
case OpRayQueryGenerateIntersectionKHR: return "OpRayQueryGenerateIntersectionKHR";
@ -1497,6 +1512,7 @@ void Parameterize()
InstructionDesc[OpBranchConditional].setResultAndType(false, false);
InstructionDesc[OpSwitch].setResultAndType(false, false);
InstructionDesc[OpKill].setResultAndType(false, false);
InstructionDesc[OpTerminateInvocation].setResultAndType(false, false);
InstructionDesc[OpReturn].setResultAndType(false, false);
InstructionDesc[OpReturnValue].setResultAndType(false, false);
InstructionDesc[OpUnreachable].setResultAndType(false, false);
@ -2761,7 +2777,20 @@ void Parameterize()
InstructionDesc[OpTypeAccelerationStructureKHR].setResultAndType(true, false);
InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'NV Acceleration Structure'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Acceleration Structure'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Flags'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Cull Mask'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Offset'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'SBT Record Stride'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Miss Index'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Origin'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMin'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Ray Direction'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'TMax'");
InstructionDesc[OpTraceNV].operands.push(OperandId, "'Payload'");
InstructionDesc[OpTraceNV].setResultAndType(false, false);
InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Acceleration Structure'");
InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Ray Flags'");
InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Cull Mask'");
InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'SBT Record Offset'");
@ -2777,17 +2806,28 @@ void Parameterize()
InstructionDesc[OpReportIntersectionKHR].operands.push(OperandId, "'Hit Parameter'");
InstructionDesc[OpReportIntersectionKHR].operands.push(OperandId, "'Hit Kind'");
InstructionDesc[OpIgnoreIntersectionNV].setResultAndType(false, false);
InstructionDesc[OpIgnoreIntersectionKHR].setResultAndType(false, false);
InstructionDesc[OpTerminateRayNV].setResultAndType(false, false);
InstructionDesc[OpTerminateRayKHR].setResultAndType(false, false);
InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "SBT Record Index");
InstructionDesc[OpExecuteCallableNV].operands.push(OperandId, "CallableData ID");
InstructionDesc[OpExecuteCallableNV].setResultAndType(false, false);
InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "SBT Record Index");
InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "CallableData ID");
InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "CallableData");
InstructionDesc[OpExecuteCallableKHR].setResultAndType(false, false);
InstructionDesc[OpConvertUToAccelerationStructureKHR].operands.push(OperandId, "Value");
InstructionDesc[OpConvertUToAccelerationStructureKHR].setResultAndType(true, true);
// Ray Query
InstructionDesc[OpTypeAccelerationStructureKHR].setResultAndType(true, false);
InstructionDesc[OpTypeRayQueryProvisionalKHR].setResultAndType(true, false);
InstructionDesc[OpTypeRayQueryKHR].setResultAndType(true, false);
InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'RayQuery'");
InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'AccelerationS'");

View file

@ -50,11 +50,11 @@ namespace spv {
typedef unsigned int Id;
#define SPV_VERSION 0x10500
#define SPV_REVISION 3
#define SPV_REVISION 4
static const unsigned int MagicNumber = 0x07230203;
static const unsigned int Version = 0x00010500;
static const unsigned int Revision = 3;
static const unsigned int Revision = 4;
static const unsigned int OpCodeMask = 0xffff;
static const unsigned int WordCountShift = 16;
@ -274,6 +274,8 @@ enum ImageFormat {
ImageFormatRg8ui = 37,
ImageFormatR16ui = 38,
ImageFormatR8ui = 39,
ImageFormatR64ui = 40,
ImageFormatR64i = 41,
ImageFormatMax = 0x7fffffff,
};
@ -558,8 +560,10 @@ enum BuiltIn {
BuiltInBaseVertex = 4424,
BuiltInBaseInstance = 4425,
BuiltInDrawIndex = 4426,
BuiltInPrimitiveShadingRateKHR = 4432,
BuiltInDeviceIndex = 4438,
BuiltInViewIndex = 4440,
BuiltInShadingRateKHR = 4444,
BuiltInBaryCoordNoPerspAMD = 4992,
BuiltInBaryCoordNoPerspCentroidAMD = 4993,
BuiltInBaryCoordNoPerspSampleAMD = 4994,
@ -610,7 +614,6 @@ enum BuiltIn {
BuiltInObjectToWorldNV = 5330,
BuiltInWorldToObjectKHR = 5331,
BuiltInWorldToObjectNV = 5331,
BuiltInHitTKHR = 5332,
BuiltInHitTNV = 5332,
BuiltInHitKindKHR = 5333,
BuiltInHitKindNV = 5333,
@ -870,6 +873,7 @@ enum Capability {
CapabilityGroupNonUniformQuad = 68,
CapabilityShaderLayer = 69,
CapabilityShaderViewportIndex = 70,
CapabilityFragmentShadingRateKHR = 4422,
CapabilitySubgroupBallotKHR = 4423,
CapabilityDrawParameters = 4427,
CapabilitySubgroupVoteKHR = 4431,
@ -894,12 +898,15 @@ enum Capability {
CapabilityRoundingModeRTE = 4467,
CapabilityRoundingModeRTZ = 4468,
CapabilityRayQueryProvisionalKHR = 4471,
CapabilityRayTraversalPrimitiveCullingProvisionalKHR = 4478,
CapabilityRayQueryKHR = 4472,
CapabilityRayTraversalPrimitiveCullingKHR = 4478,
CapabilityRayTracingKHR = 4479,
CapabilityFloat16ImageAMD = 5008,
CapabilityImageGatherBiasLodAMD = 5009,
CapabilityFragmentMaskAMD = 5010,
CapabilityStencilExportEXT = 5013,
CapabilityImageReadWriteLodAMD = 5015,
CapabilityInt64ImageEXT = 5016,
CapabilityShaderClockKHR = 5055,
CapabilitySampleMaskOverrideCoverageNV = 5249,
CapabilityGeometryShaderPassthroughNV = 5251,
@ -1024,6 +1031,22 @@ enum RayQueryCandidateIntersectionType {
RayQueryCandidateIntersectionTypeMax = 0x7fffffff,
};
enum FragmentShadingRateShift {
FragmentShadingRateVertical2PixelsShift = 0,
FragmentShadingRateVertical4PixelsShift = 1,
FragmentShadingRateHorizontal2PixelsShift = 2,
FragmentShadingRateHorizontal4PixelsShift = 3,
FragmentShadingRateMax = 0x7fffffff,
};
enum FragmentShadingRateMask {
FragmentShadingRateMaskNone = 0,
FragmentShadingRateVertical2PixelsMask = 0x00000001,
FragmentShadingRateVertical4PixelsMask = 0x00000002,
FragmentShadingRateHorizontal2PixelsMask = 0x00000004,
FragmentShadingRateHorizontal4PixelsMask = 0x00000008,
};
enum Op {
OpNop = 0,
OpUndef = 1,
@ -1369,13 +1392,19 @@ enum Op {
OpPtrEqual = 401,
OpPtrNotEqual = 402,
OpPtrDiff = 403,
OpTerminateInvocation = 4416,
OpSubgroupBallotKHR = 4421,
OpSubgroupFirstInvocationKHR = 4422,
OpSubgroupAllKHR = 4428,
OpSubgroupAnyKHR = 4429,
OpSubgroupAllEqualKHR = 4430,
OpSubgroupReadInvocationKHR = 4432,
OpTypeRayQueryProvisionalKHR = 4472,
OpTraceRayKHR = 4445,
OpExecuteCallableKHR = 4446,
OpConvertUToAccelerationStructureKHR = 4447,
OpIgnoreIntersectionKHR = 4448,
OpTerminateRayKHR = 4449,
OpTypeRayQueryKHR = 4472,
OpRayQueryInitializeKHR = 4473,
OpRayQueryTerminateKHR = 4474,
OpRayQueryGenerateIntersectionKHR = 4475,
@ -1398,15 +1427,11 @@ enum Op {
OpWritePackedPrimitiveIndices4x8NV = 5299,
OpReportIntersectionKHR = 5334,
OpReportIntersectionNV = 5334,
OpIgnoreIntersectionKHR = 5335,
OpIgnoreIntersectionNV = 5335,
OpTerminateRayKHR = 5336,
OpTerminateRayNV = 5336,
OpTraceNV = 5337,
OpTraceRayKHR = 5337,
OpTypeAccelerationStructureKHR = 5341,
OpTypeAccelerationStructureNV = 5341,
OpExecuteCallableKHR = 5344,
OpExecuteCallableNV = 5344,
OpTypeCooperativeMatrixNV = 5358,
OpCooperativeMatrixLoadNV = 5359,
@ -1939,13 +1964,19 @@ inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
case OpPtrEqual: *hasResult = true; *hasResultType = true; break;
case OpPtrNotEqual: *hasResult = true; *hasResultType = true; break;
case OpPtrDiff: *hasResult = true; *hasResultType = true; break;
case OpTerminateInvocation: *hasResult = false; *hasResultType = false; break;
case OpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break;
case OpTypeRayQueryProvisionalKHR: *hasResult = true; *hasResultType = false; break;
case OpTraceRayKHR: *hasResult = false; *hasResultType = false; break;
case OpExecuteCallableKHR: *hasResult = false; *hasResultType = false; break;
case OpConvertUToAccelerationStructureKHR: *hasResult = true; *hasResultType = true; break;
case OpIgnoreIntersectionKHR: *hasResult = false; *hasResultType = false; break;
case OpTerminateRayKHR: *hasResult = false; *hasResultType = false; break;
case OpTypeRayQueryKHR: *hasResult = true; *hasResultType = false; break;
case OpRayQueryInitializeKHR: *hasResult = false; *hasResultType = false; break;
case OpRayQueryTerminateKHR: *hasResult = false; *hasResultType = false; break;
case OpRayQueryGenerateIntersectionKHR: *hasResult = false; *hasResultType = false; break;
@ -2164,6 +2195,7 @@ inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask
inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); }
inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); }
inline RayFlagsMask operator|(RayFlagsMask a, RayFlagsMask b) { return RayFlagsMask(unsigned(a) | unsigned(b)); }
inline FragmentShadingRateMask operator|(FragmentShadingRateMask a, FragmentShadingRateMask b) { return FragmentShadingRateMask(unsigned(a) | unsigned(b)); }
} // end namespace spv

View file

@ -263,6 +263,7 @@ public:
case OpBranchConditional:
case OpSwitch:
case OpKill:
case OpTerminateInvocation:
case OpReturn:
case OpReturnValue:
case OpUnreachable:

View file

@ -228,6 +228,9 @@ enum TBuiltInVariable {
EbvViewIndex,
EbvDeviceIndex,
EbvShadingRateKHR,
EbvPrimitiveShadingRateKHR,
EbvFragSizeEXT,
EbvFragInvocationCountEXT,
@ -480,6 +483,9 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvWarpID: return "WarpIDNV";
case EbvSMID: return "SMIDNV";
case EbvShadingRateKHR: return "ShadingRateKHR";
case EbvPrimitiveShadingRateKHR: return "PrimitiveShadingRateKHR";
default: return "unknown built-in variable";
}
}

View file

@ -406,6 +406,7 @@ enum TLayoutFormat {
ElfRg8i,
ElfR16i,
ElfR8i,
ElfR64i,
ElfIntGuard, // to help with comparisons
@ -423,6 +424,7 @@ enum TLayoutFormat {
ElfRg8ui,
ElfR16ui,
ElfR8ui,
ElfR64ui,
ElfCount
};
@ -755,6 +757,12 @@ public:
bool isPerPrimitive() const { return perPrimitiveNV; }
bool isPerView() const { return perViewNV; }
bool isTaskMemory() const { return perTaskNV; }
bool isAnyPayload() const {
return storage == EvqPayload || storage == EvqPayloadIn;
}
bool isAnyCallable() const {
return storage == EvqCallableData || storage == EvqCallableDataIn;
}
// True if this type of IO is supposed to be arrayed with extra level for per-vertex data
bool isArrayedIo(EShLanguage language) const
@ -1117,6 +1125,8 @@ public:
case ElfR32ui: return "r32ui";
case ElfR16ui: return "r16ui";
case ElfR8ui: return "r8ui";
case ElfR64ui: return "r64ui";
case ElfR64i: return "r64i";
default: return "none";
}
}
@ -1986,6 +1996,7 @@ public:
case EbtAccStruct: return "accelerationStructureNV";
case EbtRayQuery: return "rayQueryEXT";
case EbtReference: return "reference";
case EbtString: return "string";
#endif
default: return "unknown type";
}

View file

@ -280,6 +280,12 @@ enum TOperator {
EOpConvUvec2ToPtr,
EOpConvPtrToUvec2,
// uint64_t -> accelerationStructureEXT
EOpConvUint64ToAccStruct,
// uvec2 -> accelerationStructureEXT
EOpConvUvec2ToAccStruct,
//
// binary operations
//
@ -628,13 +634,16 @@ enum TOperator {
// Branch
//
EOpKill, // Fragment only
EOpKill, // Fragment only
EOpTerminateInvocation, // Fragment only
EOpDemote, // Fragment only
EOpTerminateRayKHR, // Any-hit only
EOpIgnoreIntersectionKHR, // Any-hit only
EOpReturn,
EOpBreak,
EOpContinue,
EOpCase,
EOpDefault,
EOpDemote, // Fragment only
//
// Constructors
@ -751,6 +760,7 @@ enum TOperator {
EOpConstructNonuniform, // expected to be transformed away, not present in final AST
EOpConstructReference,
EOpConstructCooperativeMatrix,
EOpConstructAccStruct,
EOpConstructGuardEnd,
//
@ -911,11 +921,13 @@ enum TOperator {
EOpAverageRounded,
EOpMul32x16,
EOpTrace,
EOpTraceNV,
EOpTraceKHR,
EOpReportIntersection,
EOpIgnoreIntersection,
EOpTerminateRay,
EOpExecuteCallable,
EOpIgnoreIntersectionNV,
EOpTerminateRayNV,
EOpExecuteCallableNV,
EOpExecuteCallableKHR,
EOpWritePackedPrimitiveIndices4x8NV,
//
@ -1282,6 +1294,8 @@ public:
TIntermTyped* getConstSubtree() const { return constSubtree; }
#ifndef GLSLANG_WEB
void setFlattenSubset(int subset) { flattenSubset = subset; }
virtual const TString& getAccessName() const;
int getFlattenSubset() const { return flattenSubset; } // -1 means full object
#endif

View file

@ -509,6 +509,8 @@ TBuiltIns::TBuiltIns()
prefixes[EbtUint8] = "u8";
prefixes[EbtInt16] = "i16";
prefixes[EbtUint16] = "u16";
prefixes[EbtInt64] = "i64";
prefixes[EbtUint64] = "u64";
#endif
postfixes[2] = "2";
@ -4419,9 +4421,7 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"\n");
stageBuiltins[EShLangAnyHit].append(
"void ignoreIntersectionNV();"
"void ignoreIntersectionEXT();"
"void terminateRayNV();"
"void terminateRayEXT();"
"\n");
stageBuiltins[EShLangClosestHit].append(
"void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);"
@ -4928,6 +4928,11 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"\n");
}
if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 310)) {
stageBuiltins[EShLangVertex].append(
"out highp int gl_PrimitiveShadingRateEXT;" // GL_EXT_fragment_shading_rate
"\n");
}
//============================================================================
//
@ -5041,6 +5046,12 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"\n");
}
if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 310)) {
stageBuiltins[EShLangGeometry].append(
"out highp int gl_PrimitiveShadingRateEXT;" // GL_EXT_fragment_shading_rate
"\n");
}
//============================================================================
//
// Define the interface to the tessellation control shader.
@ -5338,6 +5349,11 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"in vec3 gl_BaryCoordNoPerspNV;"
);
if (version >= 450)
stageBuiltins[EShLangFragment].append(
"flat in int gl_ShadingRateEXT;" // GL_EXT_fragment_shading_rate
);
} else {
// ES profile
@ -5396,6 +5412,10 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"in vec3 gl_BaryCoordNV;"
"in vec3 gl_BaryCoordNoPerspNV;"
);
if (version >= 310)
stageBuiltins[EShLangFragment].append(
"flat in highp int gl_ShadingRateEXT;" // GL_EXT_fragment_shading_rate
);
}
#endif
@ -5414,6 +5434,12 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"\n");
}
if (version >= 300 /* both ES and non-ES */) {
stageBuiltins[EShLangFragment].append(
"flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2
"\n");
}
#ifndef GLSLANG_ANGLE
// GL_ARB_shader_ballot
if (profile != EEsProfile && version >= 450) {
@ -5426,6 +5452,15 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"in uint64_t gl_SubGroupLeMaskARB;"
"in uint64_t gl_SubGroupLtMaskARB;"
"\n";
const char* rtBallotDecls =
"uniform volatile uint gl_SubGroupSizeARB;"
"in volatile uint gl_SubGroupInvocationARB;"
"in volatile uint64_t gl_SubGroupEqMaskARB;"
"in volatile uint64_t gl_SubGroupGeMaskARB;"
"in volatile uint64_t gl_SubGroupGtMaskARB;"
"in volatile uint64_t gl_SubGroupLeMaskARB;"
"in volatile uint64_t gl_SubGroupLtMaskARB;"
"\n";
const char* fragmentBallotDecls =
"uniform uint gl_SubGroupSizeARB;"
"flat in uint gl_SubGroupInvocationARB;"
@ -5443,6 +5478,13 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
stageBuiltins[EShLangFragment] .append(fragmentBallotDecls);
stageBuiltins[EShLangMeshNV] .append(ballotDecls);
stageBuiltins[EShLangTaskNV] .append(ballotDecls);
stageBuiltins[EShLangRayGen] .append(rtBallotDecls);
stageBuiltins[EShLangIntersect] .append(rtBallotDecls);
// No volatile qualifier on these builtins in any-hit
stageBuiltins[EShLangAnyHit] .append(ballotDecls);
stageBuiltins[EShLangClosestHit] .append(rtBallotDecls);
stageBuiltins[EShLangMiss] .append(rtBallotDecls);
stageBuiltins[EShLangCallable] .append(rtBallotDecls);
}
// GL_KHR_shader_subgroup
@ -5480,6 +5522,21 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"in highp uint gl_NumSubgroups;"
"in highp uint gl_SubgroupID;"
"\n";
// These builtins are volatile for RT stages
const char* rtSubgroupDecls =
"in mediump volatile uint gl_SubgroupSize;"
"in mediump volatile uint gl_SubgroupInvocationID;"
"in highp volatile uvec4 gl_SubgroupEqMask;"
"in highp volatile uvec4 gl_SubgroupGeMask;"
"in highp volatile uvec4 gl_SubgroupGtMask;"
"in highp volatile uvec4 gl_SubgroupLeMask;"
"in highp volatile uvec4 gl_SubgroupLtMask;"
// GL_NV_shader_sm_builtins
"in highp uint gl_WarpsPerSMNV;"
"in highp uint gl_SMCountNV;"
"in highp volatile uint gl_WarpIDNV;"
"in highp volatile uint gl_SMIDNV;"
"\n";
stageBuiltins[EShLangVertex] .append(subgroupDecls);
stageBuiltins[EShLangTessControl] .append(subgroupDecls);
@ -5492,12 +5549,13 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
stageBuiltins[EShLangMeshNV] .append(computeSubgroupDecls);
stageBuiltins[EShLangTaskNV] .append(subgroupDecls);
stageBuiltins[EShLangTaskNV] .append(computeSubgroupDecls);
stageBuiltins[EShLangRayGen] .append(subgroupDecls);
stageBuiltins[EShLangIntersect] .append(subgroupDecls);
stageBuiltins[EShLangRayGen] .append(rtSubgroupDecls);
stageBuiltins[EShLangIntersect] .append(rtSubgroupDecls);
// No volatile qualifier on these builtins in any-hit
stageBuiltins[EShLangAnyHit] .append(subgroupDecls);
stageBuiltins[EShLangClosestHit] .append(subgroupDecls);
stageBuiltins[EShLangMiss] .append(subgroupDecls);
stageBuiltins[EShLangCallable] .append(subgroupDecls);
stageBuiltins[EShLangClosestHit] .append(rtSubgroupDecls);
stageBuiltins[EShLangMiss] .append(rtSubgroupDecls);
stageBuiltins[EShLangCallable] .append(rtSubgroupDecls);
}
// GL_NV_ray_tracing/GL_EXT_ray_tracing
@ -5565,7 +5623,7 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
"in float gl_RayTminNV;"
"in float gl_RayTminEXT;"
"in float gl_RayTmaxNV;"
"in float gl_RayTmaxEXT;"
"in volatile float gl_RayTmaxEXT;"
"in mat4x3 gl_ObjectToWorldNV;"
"in mat4x3 gl_ObjectToWorldEXT;"
"in mat3x4 gl_ObjectToWorld3x4EXT;"
@ -5685,13 +5743,57 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV
commonBuiltins.append("const int gl_StorageSemanticsOutput = 0x1000;\n");
}
#endif // !GLSLANG_ANGLE
if (version >= 300 /* both ES and non-ES */) {
stageBuiltins[EShLangFragment].append(
"flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2
"\n");
// Adding these to common built-ins triggers an assert due to a memory corruption in related code when testing
// So instead add to each stage individually, avoiding the GLSLang bug
if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 310)) {
for (int stage=EShLangVertex; stage<EShLangCount; stage++)
{
stageBuiltins[static_cast<EShLanguage>(stage)].append("const highp int gl_ShadingRateFlag2VerticalPixelsEXT = 1;\n");
stageBuiltins[static_cast<EShLanguage>(stage)].append("const highp int gl_ShadingRateFlag4VerticalPixelsEXT = 2;\n");
stageBuiltins[static_cast<EShLanguage>(stage)].append("const highp int gl_ShadingRateFlag2HorizontalPixelsEXT = 4;\n");
stageBuiltins[static_cast<EShLanguage>(stage)].append("const highp int gl_ShadingRateFlag4HorizontalPixelsEXT = 8;\n");
}
}
// GL_EXT_shader_image_int64
if ((profile != EEsProfile && version >= 420) ||
(profile == EEsProfile && version >= 310)) {
const TBasicType bTypes[] = { EbtInt64, EbtUint64 };
for (int ms = 0; ms <= 1; ++ms) { // loop over "bool" multisample or not
for (int arrayed = 0; arrayed <= 1; ++arrayed) { // loop over "bool" arrayed or not
for (int dim = Esd1D; dim < EsdSubpass; ++dim) { // 1D, ..., buffer
if ((dim == Esd1D || dim == EsdRect) && profile == EEsProfile)
continue;
if ((dim == Esd3D || dim == EsdRect || dim == EsdBuffer) && arrayed)
continue;
if (dim != Esd2D && ms)
continue;
// Loop over the bTypes
for (size_t bType = 0; bType < sizeof(bTypes)/sizeof(TBasicType); ++bType) {
//
// Now, make all the function prototypes for the type we just built...
//
TSampler sampler;
sampler.setImage(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false,
false,
ms ? true : false);
TString typeName = sampler.getString();
addQueryFunctions(sampler, typeName, version, profile);
addImageFunctions(sampler, typeName, version, profile);
}
}
}
}
}
#endif // !GLSLANG_ANGLE
#endif // !GLSLANG_WEB
// printf("%s\n", commonBuiltins.c_str());
@ -5788,7 +5890,6 @@ void TBuiltIns::add2ndGenerationSamplingImaging(int version, EProfile profile, c
#endif
if (shadow && (bTypes[bType] == EbtInt || bTypes[bType] == EbtUint))
continue;
//
// Now, make all the function prototypes for the type we just built...
//
@ -6013,8 +6114,16 @@ void TBuiltIns::addImageFunctions(TSampler sampler, const TString& typeName, int
if ( profile != EEsProfile ||
(profile == EEsProfile && version >= 310)) {
if (sampler.type == EbtInt || sampler.type == EbtUint) {
const char* dataType = sampler.type == EbtInt ? "highp int" : "highp uint";
if (sampler.type == EbtInt || sampler.type == EbtUint || sampler.type == EbtInt64 || sampler.type == EbtUint64 ) {
const char* dataType;
switch (sampler.type) {
case(EbtInt): dataType = "highp int"; break;
case(EbtUint): dataType = "highp uint"; break;
case(EbtInt64): dataType = "highp int64_t"; break;
case(EbtUint64): dataType = "highp uint64_t"; break;
default: dataType = "";
}
const int numBuiltins = 7;
@ -7650,6 +7759,20 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable);
BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable);
}
if (language == EShLangGeometry || language == EShLangVertex) {
if ((profile == EEsProfile && version >= 310) ||
(profile != EEsProfile && version >= 450)) {
symbolTable.setVariableExtensions("gl_PrimitiveShadingRateEXT", 1, &E_GL_EXT_fragment_shading_rate);
BuiltInVariable("gl_PrimitiveShadingRateEXT", EbvPrimitiveShadingRateKHR, symbolTable);
symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
}
}
#endif // !GLSLANG_WEB
break;
@ -8156,6 +8279,17 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
}
symbolTable.setFunctionExtensions("helperInvocationEXT", 1, &E_GL_EXT_demote_to_helper_invocation);
if ((profile == EEsProfile && version >= 310) ||
(profile != EEsProfile && version >= 450)) {
symbolTable.setVariableExtensions("gl_ShadingRateEXT", 1, &E_GL_EXT_fragment_shading_rate);
BuiltInVariable("gl_ShadingRateEXT", EbvShadingRateKHR, symbolTable);
symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
}
#endif // !GLSLANG_WEB
break;
@ -8288,6 +8422,14 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_NV_compute_shader_derivatives);
symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_NV_compute_shader_derivatives);
}
if ((profile == EEsProfile && version >= 310) ||
(profile != EEsProfile && version >= 450)) {
symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
}
#endif // !GLSLANG_WEB
break;
@ -8342,9 +8484,7 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
symbolTable.setFunctionExtensions("reportIntersectionNV", 1, &E_GL_NV_ray_tracing);
symbolTable.setFunctionExtensions("reportIntersectionEXT", 1, &E_GL_EXT_ray_tracing);
symbolTable.setFunctionExtensions("ignoreIntersectionNV", 1, &E_GL_NV_ray_tracing);
symbolTable.setFunctionExtensions("ignoreIntersectionEXT", 1, &E_GL_EXT_ray_tracing);
symbolTable.setFunctionExtensions("terminateRayNV", 1, &E_GL_NV_ray_tracing);
symbolTable.setFunctionExtensions("terminateRayEXT", 1, &E_GL_EXT_ray_tracing);
symbolTable.setFunctionExtensions("executeCallableNV", 1, &E_GL_NV_ray_tracing);
symbolTable.setFunctionExtensions("executeCallableEXT", 1, &E_GL_EXT_ray_tracing);
@ -8437,6 +8577,13 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable);
BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable);
}
if ((profile == EEsProfile && version >= 310) ||
(profile != EEsProfile && version >= 450)) {
symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
}
break;
case EShLangMeshNV:
@ -8581,6 +8728,14 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable);
BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable);
}
if ((profile == EEsProfile && version >= 310) ||
(profile != EEsProfile && version >= 450)) {
symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
}
break;
case EShLangTaskNV:
@ -8681,6 +8836,13 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable);
BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable);
}
if ((profile == EEsProfile && version >= 310) ||
(profile != EEsProfile && version >= 450)) {
symbolTable.setVariableExtensions("gl_ShadingRateFlag2VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4VerticalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag2HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
symbolTable.setVariableExtensions("gl_ShadingRateFlag4HorizontalPixelsEXT", 1, &E_GL_EXT_fragment_shading_rate);
}
break;
#endif
@ -9152,10 +9314,10 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
case EShLangClosestHit:
case EShLangMiss:
if (profile != EEsProfile && version >= 460) {
symbolTable.relateToOperator("traceNV", EOpTrace);
symbolTable.relateToOperator("traceRayEXT", EOpTrace);
symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallable);
symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallable);
symbolTable.relateToOperator("traceNV", EOpTraceNV);
symbolTable.relateToOperator("traceRayEXT", EOpTraceKHR);
symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallableNV);
symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallableKHR);
}
break;
case EShLangIntersect:
@ -9166,16 +9328,14 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion
break;
case EShLangAnyHit:
if (profile != EEsProfile && version >= 460) {
symbolTable.relateToOperator("ignoreIntersectionNV", EOpIgnoreIntersection);
symbolTable.relateToOperator("ignoreIntersectionEXT", EOpIgnoreIntersection);
symbolTable.relateToOperator("terminateRayNV", EOpTerminateRay);
symbolTable.relateToOperator("terminateRayEXT", EOpTerminateRay);
symbolTable.relateToOperator("ignoreIntersectionNV", EOpIgnoreIntersectionNV);
symbolTable.relateToOperator("terminateRayNV", EOpTerminateRayNV);
}
break;
case EShLangCallable:
if (profile != EEsProfile && version >= 460) {
symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallable);
symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallable);
symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallableNV);
symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallableKHR);
}
break;
case EShLangMeshNV:

View file

@ -71,6 +71,13 @@ void TIntermConstantUnion::traverse(TIntermTraverser *it)
it->visitConstantUnion(this);
}
const TString& TIntermSymbol::getAccessName() const {
if (getBasicType() == EbtBlock)
return getType().getTypeName();
else
return getName();
}
//
// Traverse a binary node.
//

View file

@ -2298,6 +2298,10 @@ TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
case EbtReference:
op = EOpConstructReference;
break;
case EbtAccStruct:
op = EOpConstructAccStruct;
break;
#endif
default:
break;

View file

@ -127,22 +127,6 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
{
TIntermBinary* binaryNode = node->getAsBinaryNode();
if (binaryNode) {
switch(binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect: // fall through
case EOpIndexDirectStruct: // fall through
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
return lValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
}
error(loc, " l-value required", op, "", "");
return true;
}
const char* symbol = nullptr;
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode != nullptr)
@ -203,15 +187,40 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
// Everything else is okay, no error.
//
if (message == nullptr)
{
if (binaryNode) {
switch (binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect: // fall through
case EOpIndexDirectStruct: // fall through
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
return lValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
}
error(loc, " l-value required", op, "", "");
return true;
}
return false;
}
//
// If we get here, we have an error and a message.
//
const TIntermTyped* leftMostTypeNode = TIntermediate::findLValueBase(node, true);
if (symNode)
error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message);
else
error(loc, " l-value required", op, "(%s)", message);
if (binaryNode && binaryNode->getAsOperator()->getOp() == EOpIndexDirectStruct)
if(IsAnonymous(leftMostTypeNode->getAsSymbolNode()->getName()))
error(loc, " l-value required", op, "\"%s\" (%s)", leftMostTypeNode->getAsSymbolNode()->getAccessName().c_str(), message);
else
error(loc, " l-value required", op, "\"%s\" (%s)", leftMostTypeNode->getAsSymbolNode()->getName().c_str(), message);
else
error(loc, " l-value required", op, "(%s)", message);
return true;
}
@ -219,28 +228,41 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
// Test for and give an error if the node can't be read from.
void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
{
TIntermBinary* binaryNode = node->getAsBinaryNode();
const TIntermSymbol* symNode = node->getAsSymbolNode();
if (! node)
return;
TIntermBinary* binaryNode = node->getAsBinaryNode();
if (binaryNode) {
switch(binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect:
case EOpIndexDirectStruct:
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
rValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
if (node->getQualifier().isWriteOnly()) {
const TIntermTyped* leftMostTypeNode = TIntermediate::findLValueBase(node, true);
if (symNode != nullptr)
error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
else if (binaryNode &&
(binaryNode->getAsOperator()->getOp() == EOpIndexDirectStruct ||
binaryNode->getAsOperator()->getOp() == EOpIndexDirect))
if(IsAnonymous(leftMostTypeNode->getAsSymbolNode()->getName()))
error(loc, "can't read from writeonly object: ", op, leftMostTypeNode->getAsSymbolNode()->getAccessName().c_str());
else
error(loc, "can't read from writeonly object: ", op, leftMostTypeNode->getAsSymbolNode()->getName().c_str());
else
error(loc, "can't read from writeonly object: ", op, "");
} else {
if (binaryNode) {
switch (binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect:
case EOpIndexDirectStruct:
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
rValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
}
}
return;
}
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode && symNode->getQualifier().isWriteOnly())
error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
}
// Add 'symbol' to the list of deferred linkage symbols, which

View file

@ -2076,14 +2076,32 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
}
#ifndef GLSLANG_WEB
case EOpTrace:
case EOpTraceNV:
if (!(*argp)[10]->getAsConstantUnion())
error(loc, "argument must be compile-time constant", "payload number", "");
error(loc, "argument must be compile-time constant", "payload number", "a");
break;
case EOpExecuteCallable:
case EOpTraceKHR:
if (!(*argp)[10]->getAsConstantUnion())
error(loc, "argument must be compile-time constant", "payload number", "a");
else {
unsigned int location = (*argp)[10]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
if (intermediate.checkLocationRT(0, location) < 0)
error(loc, "with layout(location =", "no rayPayloadEXT/rayPayloadInEXT declared", "%d)", location);
}
break;
case EOpExecuteCallableNV:
if (!(*argp)[1]->getAsConstantUnion())
error(loc, "argument must be compile-time constant", "callable data number", "");
break;
case EOpExecuteCallableKHR:
if (!(*argp)[1]->getAsConstantUnion())
error(loc, "argument must be compile-time constant", "callable data number", "");
else {
unsigned int location = (*argp)[1]->getAsConstantUnion()->getAsConstantUnion()->getConstArray()[0].getUConst();
if (intermediate.checkLocationRT(1, location) < 0)
error(loc, "with layout(location =", "no callableDataEXT/callableDataInEXT declared", "%d)", location);
}
break;
case EOpRayQueryGetIntersectionType:
case EOpRayQueryGetIntersectionT:
@ -2121,9 +2139,15 @@ void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCan
{
// Make sure the image types have the correct layout() format and correct argument types
const TType& imageType = arg0->getType();
if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) {
if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui)
if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint ||
imageType.getSampler().type == EbtInt64 || imageType.getSampler().type == EbtUint64) {
if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui &&
imageType.getQualifier().getFormat() != ElfR64i && imageType.getQualifier().getFormat() != ElfR64ui)
error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), "");
if (callNode.getType().getBasicType() == EbtInt64 && imageType.getQualifier().getFormat() != ElfR64i)
error(loc, "only supported on image with format r64i", fnCandidate.getName().c_str(), "");
else if (callNode.getType().getBasicType() == EbtUint64 && imageType.getQualifier().getFormat() != ElfR64ui)
error(loc, "only supported on image with format r64ui", fnCandidate.getName().c_str(), "");
} else {
bool isImageAtomicOnFloatAllowed = ((fnCandidate.getName().compare(0, 14, "imageAtomicAdd") == 0) ||
(fnCandidate.getName().compare(0, 15, "imageAtomicLoad") == 0) ||
@ -3368,7 +3392,7 @@ void TParseContext::transparentOpaqueCheck(const TSourceLoc& loc, const TType& t
//
void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType)
{
globalQualifierFixCheck(publicType.loc, publicType.qualifier);
globalQualifierFixCheck(publicType.loc, publicType.qualifier, true);
checkNoShaderLayouts(publicType.loc, publicType.shaderQualifiers);
if (publicType.qualifier.isNonUniform()) {
error(publicType.loc, "not allowed on block or structure members", "nonuniformEXT", "");
@ -3379,7 +3403,7 @@ void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType)
//
// Check/fix just a full qualifier (no variables or types yet, but qualifier is complete) at global level.
//
void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier)
void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier, bool isMemberCheck)
{
bool nonuniformOkay = false;
@ -3404,6 +3428,16 @@ void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& q
case EvqTemporary:
nonuniformOkay = true;
break;
case EvqUniform:
// According to GLSL spec: The std430 qualifier is supported only for shader storage blocks; a shader using
// the std430 qualifier on a uniform block will fail to compile.
// Only check the global declaration: layout(std430) uniform;
if (blockName == nullptr &&
qualifier.layoutPacking == ElpStd430)
{
error(loc, "it is invalid to declare std430 qualifier on uniform", "", "");
}
break;
default:
break;
}
@ -3411,7 +3445,9 @@ void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& q
if (!nonuniformOkay && qualifier.isNonUniform())
error(loc, "for non-parameter, can only apply to 'in' or no storage qualifier", "nonuniformEXT", "");
invariantCheck(loc, qualifier);
// Storage qualifier isn't ready for memberQualifierCheck, we should skip invariantCheck for it.
if (!isMemberCheck || structNestingLevel > 0)
invariantCheck(loc, qualifier);
}
//
@ -3422,7 +3458,7 @@ void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQuali
if (! symbolTable.atGlobalLevel())
return;
if (!(publicType.userDef && publicType.userDef->isReference())) {
if (!(publicType.userDef && publicType.userDef->isReference()) && !parsingBuiltins) {
if (qualifier.isMemoryQualifierImageAndSSBOOnly() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer) {
error(loc, "memory qualifiers cannot be used on this type", "", "");
} else if (qualifier.isMemory() && (publicType.basicType != EbtSampler) && !publicType.qualifier.isUniformOrBuffer()) {
@ -4083,6 +4119,9 @@ void TParseContext::checkRuntimeSizable(const TSourceLoc& loc, const TIntermType
if (isRuntimeLength(base))
return;
if (base.getType().getQualifier().builtIn == EbvSampleMask)
return;
// Check for last member of a bufferreference type, which is runtime sizeable
// but doesn't support runtime length
if (base.getType().getQualifier().storage == EvqBuffer) {
@ -4226,6 +4265,8 @@ TSymbol* TParseContext::redeclareBuiltinVariable(const TSourceLoc& loc, const TS
(identifier == "gl_FragCoord" && ((nonEsRedecls && version >= 150) || esRedecls)) ||
identifier == "gl_ClipDistance" ||
identifier == "gl_CullDistance" ||
identifier == "gl_ShadingRateEXT" ||
identifier == "gl_PrimitiveShadingRateEXT" ||
identifier == "gl_FrontColor" ||
identifier == "gl_BackColor" ||
identifier == "gl_FrontSecondaryColor" ||
@ -4632,14 +4673,14 @@ void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& quali
void TParseContext::nestedBlockCheck(const TSourceLoc& loc)
{
if (structNestingLevel > 0)
if (structNestingLevel > 0 || blockNestingLevel > 0)
error(loc, "cannot nest a block definition inside a structure or block", "", "");
++structNestingLevel;
++blockNestingLevel;
}
void TParseContext::nestedStructCheck(const TSourceLoc& loc)
{
if (structNestingLevel > 0)
if (structNestingLevel > 0 || blockNestingLevel > 0)
error(loc, "cannot nest a structure definition inside a structure or block", "", "");
++structNestingLevel;
}
@ -6535,13 +6576,15 @@ void TParseContext::declareTypeDefaults(const TSourceLoc& loc, const TPublicType
error(loc, "atomic_uint binding is too large", "binding", "");
return;
}
if(publicType.qualifier.hasOffset()) {
if (publicType.qualifier.hasOffset())
atomicUintOffsets[publicType.qualifier.layoutBinding] = publicType.qualifier.layoutOffset;
}
return;
}
if (publicType.arraySizes) {
error(loc, "expect an array name", "", "");
}
if (publicType.qualifier.hasLayout() && !publicType.qualifier.hasBufferReference())
warn(loc, "useless application of layout qualifier", "layout", "");
#endif
@ -6632,6 +6675,22 @@ TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& iden
if (type.getQualifier().storage == EvqShared && type.containsCoopMat())
error(loc, "qualifier", "Cooperative matrix types must not be used in shared memory", "");
if (profile == EEsProfile) {
if (type.getQualifier().isPipeInput() && type.getBasicType() == EbtStruct) {
if (type.getQualifier().isArrayedIo(language)) {
TType perVertexType(type, 0);
if (perVertexType.containsArray() && perVertexType.containsBuiltIn() == false) {
error(loc, "A per vertex structure containing an array is not allowed as input in ES", type.getTypeName().c_str(), "");
}
}
else if (type.containsArray() && type.containsBuiltIn() == false) {
error(loc, "A structure containing an array is not allowed as input in ES", type.getTypeName().c_str(), "");
}
if (type.containsStructure())
error(loc, "A structure containing an struct is not allowed as input in ES", type.getTypeName().c_str(), "");
}
}
if (identifier != "gl_FragCoord" && (publicType.shaderQualifiers.originUpperLeft || publicType.shaderQualifiers.pixelCenterInteger))
error(loc, "can only apply origin_upper_left and pixel_center_origin to gl_FragCoord", "layout qualifier", "");
if (identifier != "gl_FragDepth" && publicType.shaderQualifiers.getDepth() != EldNone)
@ -6954,6 +7013,15 @@ TIntermTyped* TParseContext::convertInitializerList(const TSourceLoc& loc, const
error(loc, "wrong vector size (or rows in a matrix column):", "initializer list", type.getCompleteString().c_str());
return nullptr;
}
TBasicType destType = type.getBasicType();
for (int i = 0; i < type.getVectorSize(); ++i) {
TBasicType initType = initList->getSequence()[i]->getAsTyped()->getBasicType();
if (destType != initType && !intermediate.canImplicitlyPromote(initType, destType)) {
error(loc, "type mismatch in initializer list", "initializer list", type.getCompleteString().c_str());
return nullptr;
}
}
} else {
error(loc, "unexpected initializer-list type:", "initializer list", type.getCompleteString().c_str());
return nullptr;
@ -7410,6 +7478,19 @@ TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, T
return node;
case EOpConstructAccStruct:
if ((node->getType().isScalar() && node->getType().getBasicType() == EbtUint64)) {
// construct acceleration structure from uint64
requireExtensions(loc, 1, &E_GL_EXT_ray_tracing, "uint64_t conversion to acclerationStructureEXT");
return intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUint64ToAccStruct, true, node,
type);
} else if (node->getType().isVector() && node->getType().getBasicType() == EbtUint && node->getVectorSize() == 2) {
// construct acceleration structure from uint64
requireExtensions(loc, 1, &E_GL_EXT_ray_tracing, "uvec2 conversion to accelerationStructureEXT");
return intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUvec2ToAccStruct, true, node,
type);
} else
return nullptr;
#endif // GLSLANG_WEB
default:
@ -7490,10 +7571,10 @@ void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, con
TType& memberType = *typeList[member].type;
TQualifier& memberQualifier = memberType.getQualifier();
const TSourceLoc& memberLoc = typeList[member].loc;
globalQualifierFixCheck(memberLoc, memberQualifier);
if (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal && memberQualifier.storage != currentBlockQualifier.storage)
error(memberLoc, "member storage qualifier cannot contradict block storage qualifier", memberType.getFieldName().c_str(), "");
memberQualifier.storage = currentBlockQualifier.storage;
globalQualifierFixCheck(memberLoc, memberQualifier);
#ifndef GLSLANG_WEB
inheritMemoryQualifiers(currentBlockQualifier, memberQualifier);
if (currentBlockQualifier.perPrimitiveNV)
@ -8191,7 +8272,7 @@ void TParseContext::invariantCheck(const TSourceLoc& loc, const TQualifier& qual
bool pipeOut = qualifier.isPipeOutput();
bool pipeIn = qualifier.isPipeInput();
if (version >= 300 || (!isEsProfile() && version >= 420)) {
if ((version >= 300 && isEsProfile()) || (!isEsProfile() && version >= 420)) {
if (! pipeOut)
error(loc, "can only apply to an output", "invariant", "");
} else {

View file

@ -83,7 +83,7 @@ public:
: TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
scopeMangler("::"),
symbolTable(symbolTable),
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), blockNestingLevel(0), controlFlowNestingLevel(0),
currentFunctionType(nullptr),
postEntryPointReturn(false),
contextPragma(true, false),
@ -178,7 +178,8 @@ public:
TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile
int statementNestingLevel; // 0 if outside all flow control or compound statements
int loopNestingLevel; // 0 if outside all loops
int structNestingLevel; // 0 if outside blocks and structures
int structNestingLevel; // 0 if outside structures
int blockNestingLevel; // 0 if outside blocks
int controlFlowNestingLevel; // 0 if outside all flow control
const TType* currentFunctionType; // the return type of the function that's currently being parsed
bool functionReturnsValue; // true if a non-void function has a return
@ -365,7 +366,7 @@ public:
void accStructCheck(const TSourceLoc & loc, const TType & type, const TString & identifier);
void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier);
void memberQualifierCheck(glslang::TPublicType&);
void globalQualifierFixCheck(const TSourceLoc&, TQualifier&);
void globalQualifierFixCheck(const TSourceLoc&, TQualifier&, bool isMemberCheck = false);
void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&);
bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType);
void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force);

View file

@ -365,6 +365,9 @@ void TScanContext::fillInKeywordMap()
(*KeywordMap)["if"] = IF;
(*KeywordMap)["else"] = ELSE;
(*KeywordMap)["discard"] = DISCARD;
(*KeywordMap)["terminateInvocation"] = TERMINATE_INVOCATION;
(*KeywordMap)["terminateRayEXT"] = TERMINATE_RAY;
(*KeywordMap)["ignoreIntersectionEXT"] = IGNORE_INTERSECTION;
(*KeywordMap)["return"] = RETURN;
(*KeywordMap)["void"] = VOID;
(*KeywordMap)["bool"] = BOOL;
@ -471,6 +474,28 @@ void TScanContext::fillInKeywordMap()
(*KeywordMap)["image2DMSArray"] = IMAGE2DMSARRAY;
(*KeywordMap)["iimage2DMSArray"] = IIMAGE2DMSARRAY;
(*KeywordMap)["uimage2DMSArray"] = UIMAGE2DMSARRAY;
(*KeywordMap)["i64image1D"] = I64IMAGE1D;
(*KeywordMap)["u64image1D"] = U64IMAGE1D;
(*KeywordMap)["i64image2D"] = I64IMAGE2D;
(*KeywordMap)["u64image2D"] = U64IMAGE2D;
(*KeywordMap)["i64image3D"] = I64IMAGE3D;
(*KeywordMap)["u64image3D"] = U64IMAGE3D;
(*KeywordMap)["i64image2DRect"] = I64IMAGE2DRECT;
(*KeywordMap)["u64image2DRect"] = U64IMAGE2DRECT;
(*KeywordMap)["i64imageCube"] = I64IMAGECUBE;
(*KeywordMap)["u64imageCube"] = U64IMAGECUBE;
(*KeywordMap)["i64imageBuffer"] = I64IMAGEBUFFER;
(*KeywordMap)["u64imageBuffer"] = U64IMAGEBUFFER;
(*KeywordMap)["i64image1DArray"] = I64IMAGE1DARRAY;
(*KeywordMap)["u64image1DArray"] = U64IMAGE1DARRAY;
(*KeywordMap)["i64image2DArray"] = I64IMAGE2DARRAY;
(*KeywordMap)["u64image2DArray"] = U64IMAGE2DARRAY;
(*KeywordMap)["i64imageCubeArray"] = I64IMAGECUBEARRAY;
(*KeywordMap)["u64imageCubeArray"] = U64IMAGECUBEARRAY;
(*KeywordMap)["i64image2DMS"] = I64IMAGE2DMS;
(*KeywordMap)["u64image2DMS"] = U64IMAGE2DMS;
(*KeywordMap)["i64image2DMSArray"] = I64IMAGE2DMSARRAY;
(*KeywordMap)["u64image2DMSArray"] = U64IMAGE2DMSARRAY;
(*KeywordMap)["double"] = DOUBLE;
(*KeywordMap)["dvec2"] = DVEC2;
(*KeywordMap)["dvec3"] = DVEC3;
@ -914,6 +939,17 @@ int TScanContext::tokenizeIdentifier()
case CASE:
return keyword;
case TERMINATE_INVOCATION:
if (!parseContext.extensionTurnedOn(E_GL_EXT_terminate_invocation))
return identifierOrType();
return keyword;
case TERMINATE_RAY:
case IGNORE_INTERSECTION:
if (!parseContext.extensionTurnedOn(E_GL_EXT_ray_tracing))
return identifierOrType();
return keyword;
case BUFFER:
afterBuffer = true;
if ((parseContext.isEsProfile() && parseContext.version < 310) ||
@ -982,7 +1018,7 @@ int TScanContext::tokenizeIdentifier()
return keyword;
case PACKED:
if ((parseContext.isEsProfile() && parseContext.version < 300) ||
(!parseContext.isEsProfile() && parseContext.version < 330))
(!parseContext.isEsProfile() && parseContext.version < 140))
return reservedWord();
return identifierOrType();
@ -1147,6 +1183,19 @@ int TScanContext::tokenizeIdentifier()
afterType = true;
return firstGenerationImage(false);
case I64IMAGE1D:
case U64IMAGE1D:
case I64IMAGE1DARRAY:
case U64IMAGE1DARRAY:
case I64IMAGE2DRECT:
case U64IMAGE2DRECT:
afterType = true;
if (parseContext.symbolTable.atBuiltInLevel() ||
parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) {
return firstGenerationImage(false);
}
return identifierOrType();
case IMAGEBUFFER:
case IIMAGEBUFFER:
case UIMAGEBUFFER:
@ -1155,6 +1204,18 @@ int TScanContext::tokenizeIdentifier()
parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer))
return keyword;
return firstGenerationImage(false);
case I64IMAGEBUFFER:
case U64IMAGEBUFFER:
afterType = true;
if (parseContext.symbolTable.atBuiltInLevel() ||
parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) {
if ((parseContext.isEsProfile() && parseContext.version >= 320) ||
parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer))
return keyword;
return firstGenerationImage(false);
}
return identifierOrType();
case IMAGE2D:
case IIMAGE2D:
@ -1171,6 +1232,20 @@ int TScanContext::tokenizeIdentifier()
afterType = true;
return firstGenerationImage(true);
case I64IMAGE2D:
case U64IMAGE2D:
case I64IMAGE3D:
case U64IMAGE3D:
case I64IMAGECUBE:
case U64IMAGECUBE:
case I64IMAGE2DARRAY:
case U64IMAGE2DARRAY:
afterType = true;
if (parseContext.symbolTable.atBuiltInLevel() ||
parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64))
return firstGenerationImage(true);
return identifierOrType();
case IMAGECUBEARRAY:
case IIMAGECUBEARRAY:
case UIMAGECUBEARRAY:
@ -1179,6 +1254,18 @@ int TScanContext::tokenizeIdentifier()
parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array))
return keyword;
return secondGenerationImage();
case I64IMAGECUBEARRAY:
case U64IMAGECUBEARRAY:
afterType = true;
if (parseContext.symbolTable.atBuiltInLevel() ||
parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) {
if ((parseContext.isEsProfile() && parseContext.version >= 320) ||
parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array))
return keyword;
return secondGenerationImage();
}
return identifierOrType();
case IMAGE2DMS:
case IIMAGE2DMS:
@ -1188,6 +1275,17 @@ int TScanContext::tokenizeIdentifier()
case UIMAGE2DMSARRAY:
afterType = true;
return secondGenerationImage();
case I64IMAGE2DMS:
case U64IMAGE2DMS:
case I64IMAGE2DMSARRAY:
case U64IMAGE2DMSARRAY:
afterType = true;
if (parseContext.symbolTable.atBuiltInLevel() ||
parseContext.extensionTurnedOn(E_GL_EXT_shader_image_int64)) {
return secondGenerationImage();
}
return identifierOrType();
case DOUBLE:
case DVEC2:

View file

@ -2016,7 +2016,7 @@ bool TProgram::linkStage(EShLanguage stage, EShMessages messages)
intermediate[stage] = new TIntermediate(stage,
firstIntermediate->getVersion(),
firstIntermediate->getProfile());
intermediate[stage]->setLimits(firstIntermediate->getLimits());
// The new TIntermediate must use the same origin as the original TIntermediates.
// Otherwise linking will fail due to different coordinate systems.

View file

@ -85,6 +85,8 @@ void TType::buildMangledName(TString& mangledName) const
#endif
case EbtInt: mangledName += "i"; break;
case EbtUint: mangledName += "u"; break;
case EbtInt64: mangledName += "i64"; break;
case EbtUint64: mangledName += "u64"; break;
default: break; // some compilers want this
}
if (sampler.isImageClass())
@ -146,6 +148,8 @@ void TType::buildMangledName(TString& mangledName) const
if (typeName)
mangledName += *typeName;
for (unsigned int i = 0; i < structure->size(); ++i) {
if ((*structure)[i].type->getBasicType() == EbtVoid)
continue;
mangledName += '-';
(*structure)[i].type->buildMangledName(mangledName);
}

View file

@ -613,20 +613,24 @@ public:
//
protected:
static const int globalLevel = 3;
bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels
bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals
bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals
static bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels
static bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals
static bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals
public:
bool isEmpty() { return table.size() == 0; }
bool atBuiltInLevel() { return isBuiltInLevel(currentLevel()); }
bool atGlobalLevel() { return isGlobalLevel(currentLevel()); }
static bool isBuiltInSymbol(int uniqueId) {
int level = uniqueId >> LevelFlagBitOffset;
return isBuiltInLevel(level);
}
void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; }
void setSeparateNameSpaces() { separateNameSpaces = true; }
void push()
{
table.push_back(new TSymbolTableLevel);
updateUniqueIdLevelFlag();
}
// Make a new symbol-table level to represent the scope introduced by a structure
@ -639,6 +643,7 @@ public:
{
assert(thisSymbol.getName().size() == 0);
table.push_back(new TSymbolTableLevel);
updateUniqueIdLevelFlag();
table.back()->setThisLevel();
insert(thisSymbol);
}
@ -648,6 +653,7 @@ public:
table[currentLevel()]->getPreviousDefaultPrecisions(p);
delete table.back();
table.pop_back();
updateUniqueIdLevelFlag();
}
//
@ -867,12 +873,20 @@ public:
table[level]->readOnly();
}
// Add current level in the high-bits of unique id
void updateUniqueIdLevelFlag() {
// clamp level to avoid overflow
uint32_t level = currentLevel() > 7 ? 7 : currentLevel();
uniqueId &= ((1 << LevelFlagBitOffset) - 1);
uniqueId |= (level << LevelFlagBitOffset);
}
protected:
TSymbolTable(TSymbolTable&);
TSymbolTable& operator=(TSymbolTableLevel&);
int currentLevel() const { return static_cast<int>(table.size()) - 1; }
static const uint32_t LevelFlagBitOffset = 28;
std::vector<TSymbolTableLevel*> table;
int uniqueId; // for unique identification in code generation
bool noBuiltInRedeclarations;

View file

@ -327,6 +327,9 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_EXT_ray_flags_primitive_culling] = EBhDisable;
extensionBehavior[E_GL_EXT_blend_func_extended] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_implicit_conversions] = EBhDisable;
extensionBehavior[E_GL_EXT_fragment_shading_rate] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_image_int64] = EBhDisable;
extensionBehavior[E_GL_EXT_terminate_invocation] = EBhDisable;
// OVR extensions
extensionBehavior[E_GL_OVR_multiview] = EBhDisable;
@ -371,6 +374,7 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_EXT_YUV_target 1\n"
"#define GL_EXT_shader_texture_lod 1\n"
"#define GL_EXT_shadow_samplers 1\n"
"#define GL_EXT_fragment_shading_rate 1\n"
// AEP
"#define GL_ANDROID_extension_pack_es31a 1\n"
@ -408,7 +412,7 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble += "#define GL_NV_shader_noperspective_interpolation 1\n";
}
} else {
} else { // !isEsProfile()
preamble =
"#define GL_FRAGMENT_PRECISION_HIGH 1\n"
"#define GL_ARB_texture_rectangle 1\n"
@ -463,6 +467,7 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_EXT_buffer_reference_uvec2 1\n"
"#define GL_EXT_demote_to_helper_invocation 1\n"
"#define GL_EXT_debug_printf 1\n"
"#define GL_EXT_fragment_shading_rate 1\n"
// GL_KHR_shader_subgroup
"#define GL_KHR_shader_subgroup_basic 1\n"
@ -474,6 +479,7 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_KHR_shader_subgroup_clustered 1\n"
"#define GL_KHR_shader_subgroup_quad 1\n"
"#define GL_EXT_shader_image_int64 1\n"
"#define GL_EXT_shader_atomic_int64 1\n"
"#define GL_EXT_shader_realtime_clock 1\n"
"#define GL_EXT_ray_tracing 1\n"
@ -558,6 +564,11 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_GOOGLE_include_directive 1\n"
"#define GL_KHR_blend_equation_advanced 1\n"
;
// other general extensions
preamble +=
"#define GL_EXT_terminate_invocation 1\n"
;
#endif
// #define VULKAN XXXX

View file

@ -199,6 +199,8 @@ const char* const E_GL_EXT_ray_query = "GL_EXT_ray_query"
const char* const E_GL_EXT_ray_flags_primitive_culling = "GL_EXT_ray_flags_primitive_culling";
const char* const E_GL_EXT_blend_func_extended = "GL_EXT_blend_func_extended";
const char* const E_GL_EXT_shader_implicit_conversions = "GL_EXT_shader_implicit_conversions";
const char* const E_GL_EXT_fragment_shading_rate = "GL_EXT_fragment_shading_rate";
const char* const E_GL_EXT_shader_image_int64 = "GL_EXT_shader_image_int64";
// Arrays of extensions for the above viewportEXTs duplications
@ -297,6 +299,7 @@ const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shad
const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16";
const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64";
const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16";
const char* const E_GL_EXT_terminate_invocation = "GL_EXT_terminate_invocation";
const char* const E_GL_EXT_shader_atomic_float = "GL_EXT_shader_atomic_float";

View file

@ -242,6 +242,18 @@ extern int yylex(YYSTYPE*, TParseContext&);
%token <lex> F16IMAGECUBE F16IMAGE1DARRAY F16IMAGE2DARRAY F16IMAGECUBEARRAY
%token <lex> F16IMAGEBUFFER F16IMAGE2DMS F16IMAGE2DMSARRAY
%token <lex> I64IMAGE1D U64IMAGE1D
%token <lex> I64IMAGE2D U64IMAGE2D
%token <lex> I64IMAGE3D U64IMAGE3D
%token <lex> I64IMAGE2DRECT U64IMAGE2DRECT
%token <lex> I64IMAGECUBE U64IMAGECUBE
%token <lex> I64IMAGEBUFFER U64IMAGEBUFFER
%token <lex> I64IMAGE1DARRAY U64IMAGE1DARRAY
%token <lex> I64IMAGE2DARRAY U64IMAGE2DARRAY
%token <lex> I64IMAGECUBEARRAY U64IMAGECUBEARRAY
%token <lex> I64IMAGE2DMS U64IMAGE2DMS
%token <lex> I64IMAGE2DMSARRAY U64IMAGE2DMSARRAY
// texture without sampler
%token <lex> TEXTURECUBEARRAY ITEXTURECUBEARRAY UTEXTURECUBEARRAY
%token <lex> TEXTURE1D ITEXTURE1D UTEXTURE1D
@ -281,6 +293,8 @@ extern int yylex(YYSTYPE*, TParseContext&);
%token <lex> CENTROID IN OUT INOUT
%token <lex> STRUCT VOID WHILE
%token <lex> BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT
%token <lex> TERMINATE_INVOCATION
%token <lex> TERMINATE_RAY IGNORE_INTERSECTION
%token <lex> UNIFORM SHARED BUFFER
%token <lex> FLAT SMOOTH LAYOUT
@ -905,7 +919,7 @@ declaration
block_structure
: type_qualifier IDENTIFIER LEFT_BRACE { parseContext.nestedBlockCheck($1.loc); } struct_declaration_list RIGHT_BRACE {
--parseContext.structNestingLevel;
--parseContext.blockNestingLevel;
parseContext.blockName = $2.string;
parseContext.globalQualifierFixCheck($1.loc, $1.qualifier);
parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers);
@ -3203,6 +3217,116 @@ type_specifier_nonarray
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint, Esd2D, true, false, true);
}
| I64IMAGE1D {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, Esd1D);
}
| U64IMAGE1D {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, Esd1D);
}
| I64IMAGE2D {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, Esd2D);
}
| U64IMAGE2D {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, Esd2D);
}
| I64IMAGE3D {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, Esd3D);
}
| U64IMAGE3D {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, Esd3D);
}
| I64IMAGE2DRECT {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, EsdRect);
}
| U64IMAGE2DRECT {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, EsdRect);
}
| I64IMAGECUBE {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, EsdCube);
}
| U64IMAGECUBE {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, EsdCube);
}
| I64IMAGEBUFFER {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, EsdBuffer);
}
| U64IMAGEBUFFER {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, EsdBuffer);
}
| I64IMAGE1DARRAY {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, Esd1D, true);
}
| U64IMAGE1DARRAY {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, Esd1D, true);
}
| I64IMAGE2DARRAY {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, Esd2D, true);
}
| U64IMAGE2DARRAY {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, Esd2D, true);
}
| I64IMAGECUBEARRAY {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, EsdCube, true);
}
| U64IMAGECUBEARRAY {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, EsdCube, true);
}
| I64IMAGE2DMS {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, Esd2D, false, false, true);
}
| U64IMAGE2DMS {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, Esd2D, false, false, true);
}
| I64IMAGE2DMSARRAY {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtInt64, Esd2D, true, false, true);
}
| U64IMAGE2DMSARRAY {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
$$.sampler.setImage(EbtUint64, Esd2D, true, false, true);
}
| SAMPLEREXTERNALOES { // GL_OES_EGL_image_external
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtSampler;
@ -3805,6 +3929,20 @@ jump_statement
parseContext.requireStage($1.loc, EShLangFragment, "discard");
$$ = parseContext.intermediate.addBranch(EOpKill, $1.loc);
}
| TERMINATE_INVOCATION SEMICOLON {
parseContext.requireStage($1.loc, EShLangFragment, "terminateInvocation");
$$ = parseContext.intermediate.addBranch(EOpTerminateInvocation, $1.loc);
}
| TERMINATE_RAY SEMICOLON {
parseContext.requireStage($1.loc, EShLangAnyHit, "terminateRayEXT");
$$ = parseContext.intermediate.addBranch(EOpTerminateRayKHR, $1.loc);
}
| IGNORE_INTERSECTION SEMICOLON {
parseContext.requireStage($1.loc, EShLangAnyHit, "ignoreIntersectionEXT");
$$ = parseContext.intermediate.addBranch(EOpIgnoreIntersectionKHR, $1.loc);
}
;
// Grammar Note: No 'goto'. Gotos are not supported.

File diff suppressed because it is too large Load diff

View file

@ -1,8 +1,9 @@
/* A Bison parser, made by GNU Bison 3.0.4. */
/* A Bison parser, made by GNU Bison 3.7.4. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2020 Free Software Foundation,
Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -30,6 +31,10 @@
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* DO NOT RELY ON FEATURES THAT ARE NOT DOCUMENTED in the manual,
especially those whose name start with YY_ or yy_. They are
private implementation details that can be changed or removed. */
#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
/* Debug traces. */
@ -40,437 +45,466 @@
extern int yydebug;
#endif
/* Token type. */
/* Token kinds. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
enum yytokentype
{
CONST = 258,
BOOL = 259,
INT = 260,
UINT = 261,
FLOAT = 262,
BVEC2 = 263,
BVEC3 = 264,
BVEC4 = 265,
IVEC2 = 266,
IVEC3 = 267,
IVEC4 = 268,
UVEC2 = 269,
UVEC3 = 270,
UVEC4 = 271,
VEC2 = 272,
VEC3 = 273,
VEC4 = 274,
MAT2 = 275,
MAT3 = 276,
MAT4 = 277,
MAT2X2 = 278,
MAT2X3 = 279,
MAT2X4 = 280,
MAT3X2 = 281,
MAT3X3 = 282,
MAT3X4 = 283,
MAT4X2 = 284,
MAT4X3 = 285,
MAT4X4 = 286,
SAMPLER2D = 287,
SAMPLER3D = 288,
SAMPLERCUBE = 289,
SAMPLER2DSHADOW = 290,
SAMPLERCUBESHADOW = 291,
SAMPLER2DARRAY = 292,
SAMPLER2DARRAYSHADOW = 293,
ISAMPLER2D = 294,
ISAMPLER3D = 295,
ISAMPLERCUBE = 296,
ISAMPLER2DARRAY = 297,
USAMPLER2D = 298,
USAMPLER3D = 299,
USAMPLERCUBE = 300,
USAMPLER2DARRAY = 301,
SAMPLER = 302,
SAMPLERSHADOW = 303,
TEXTURE2D = 304,
TEXTURE3D = 305,
TEXTURECUBE = 306,
TEXTURE2DARRAY = 307,
ITEXTURE2D = 308,
ITEXTURE3D = 309,
ITEXTURECUBE = 310,
ITEXTURE2DARRAY = 311,
UTEXTURE2D = 312,
UTEXTURE3D = 313,
UTEXTURECUBE = 314,
UTEXTURE2DARRAY = 315,
ATTRIBUTE = 316,
VARYING = 317,
FLOAT16_T = 318,
FLOAT32_T = 319,
DOUBLE = 320,
FLOAT64_T = 321,
INT64_T = 322,
UINT64_T = 323,
INT32_T = 324,
UINT32_T = 325,
INT16_T = 326,
UINT16_T = 327,
INT8_T = 328,
UINT8_T = 329,
I64VEC2 = 330,
I64VEC3 = 331,
I64VEC4 = 332,
U64VEC2 = 333,
U64VEC3 = 334,
U64VEC4 = 335,
I32VEC2 = 336,
I32VEC3 = 337,
I32VEC4 = 338,
U32VEC2 = 339,
U32VEC3 = 340,
U32VEC4 = 341,
I16VEC2 = 342,
I16VEC3 = 343,
I16VEC4 = 344,
U16VEC2 = 345,
U16VEC3 = 346,
U16VEC4 = 347,
I8VEC2 = 348,
I8VEC3 = 349,
I8VEC4 = 350,
U8VEC2 = 351,
U8VEC3 = 352,
U8VEC4 = 353,
DVEC2 = 354,
DVEC3 = 355,
DVEC4 = 356,
DMAT2 = 357,
DMAT3 = 358,
DMAT4 = 359,
F16VEC2 = 360,
F16VEC3 = 361,
F16VEC4 = 362,
F16MAT2 = 363,
F16MAT3 = 364,
F16MAT4 = 365,
F32VEC2 = 366,
F32VEC3 = 367,
F32VEC4 = 368,
F32MAT2 = 369,
F32MAT3 = 370,
F32MAT4 = 371,
F64VEC2 = 372,
F64VEC3 = 373,
F64VEC4 = 374,
F64MAT2 = 375,
F64MAT3 = 376,
F64MAT4 = 377,
DMAT2X2 = 378,
DMAT2X3 = 379,
DMAT2X4 = 380,
DMAT3X2 = 381,
DMAT3X3 = 382,
DMAT3X4 = 383,
DMAT4X2 = 384,
DMAT4X3 = 385,
DMAT4X4 = 386,
F16MAT2X2 = 387,
F16MAT2X3 = 388,
F16MAT2X4 = 389,
F16MAT3X2 = 390,
F16MAT3X3 = 391,
F16MAT3X4 = 392,
F16MAT4X2 = 393,
F16MAT4X3 = 394,
F16MAT4X4 = 395,
F32MAT2X2 = 396,
F32MAT2X3 = 397,
F32MAT2X4 = 398,
F32MAT3X2 = 399,
F32MAT3X3 = 400,
F32MAT3X4 = 401,
F32MAT4X2 = 402,
F32MAT4X3 = 403,
F32MAT4X4 = 404,
F64MAT2X2 = 405,
F64MAT2X3 = 406,
F64MAT2X4 = 407,
F64MAT3X2 = 408,
F64MAT3X3 = 409,
F64MAT3X4 = 410,
F64MAT4X2 = 411,
F64MAT4X3 = 412,
F64MAT4X4 = 413,
ATOMIC_UINT = 414,
ACCSTRUCTNV = 415,
ACCSTRUCTEXT = 416,
RAYQUERYEXT = 417,
FCOOPMATNV = 418,
ICOOPMATNV = 419,
UCOOPMATNV = 420,
SAMPLERCUBEARRAY = 421,
SAMPLERCUBEARRAYSHADOW = 422,
ISAMPLERCUBEARRAY = 423,
USAMPLERCUBEARRAY = 424,
SAMPLER1D = 425,
SAMPLER1DARRAY = 426,
SAMPLER1DARRAYSHADOW = 427,
ISAMPLER1D = 428,
SAMPLER1DSHADOW = 429,
SAMPLER2DRECT = 430,
SAMPLER2DRECTSHADOW = 431,
ISAMPLER2DRECT = 432,
USAMPLER2DRECT = 433,
SAMPLERBUFFER = 434,
ISAMPLERBUFFER = 435,
USAMPLERBUFFER = 436,
SAMPLER2DMS = 437,
ISAMPLER2DMS = 438,
USAMPLER2DMS = 439,
SAMPLER2DMSARRAY = 440,
ISAMPLER2DMSARRAY = 441,
USAMPLER2DMSARRAY = 442,
SAMPLEREXTERNALOES = 443,
SAMPLEREXTERNAL2DY2YEXT = 444,
ISAMPLER1DARRAY = 445,
USAMPLER1D = 446,
USAMPLER1DARRAY = 447,
F16SAMPLER1D = 448,
F16SAMPLER2D = 449,
F16SAMPLER3D = 450,
F16SAMPLER2DRECT = 451,
F16SAMPLERCUBE = 452,
F16SAMPLER1DARRAY = 453,
F16SAMPLER2DARRAY = 454,
F16SAMPLERCUBEARRAY = 455,
F16SAMPLERBUFFER = 456,
F16SAMPLER2DMS = 457,
F16SAMPLER2DMSARRAY = 458,
F16SAMPLER1DSHADOW = 459,
F16SAMPLER2DSHADOW = 460,
F16SAMPLER1DARRAYSHADOW = 461,
F16SAMPLER2DARRAYSHADOW = 462,
F16SAMPLER2DRECTSHADOW = 463,
F16SAMPLERCUBESHADOW = 464,
F16SAMPLERCUBEARRAYSHADOW = 465,
IMAGE1D = 466,
IIMAGE1D = 467,
UIMAGE1D = 468,
IMAGE2D = 469,
IIMAGE2D = 470,
UIMAGE2D = 471,
IMAGE3D = 472,
IIMAGE3D = 473,
UIMAGE3D = 474,
IMAGE2DRECT = 475,
IIMAGE2DRECT = 476,
UIMAGE2DRECT = 477,
IMAGECUBE = 478,
IIMAGECUBE = 479,
UIMAGECUBE = 480,
IMAGEBUFFER = 481,
IIMAGEBUFFER = 482,
UIMAGEBUFFER = 483,
IMAGE1DARRAY = 484,
IIMAGE1DARRAY = 485,
UIMAGE1DARRAY = 486,
IMAGE2DARRAY = 487,
IIMAGE2DARRAY = 488,
UIMAGE2DARRAY = 489,
IMAGECUBEARRAY = 490,
IIMAGECUBEARRAY = 491,
UIMAGECUBEARRAY = 492,
IMAGE2DMS = 493,
IIMAGE2DMS = 494,
UIMAGE2DMS = 495,
IMAGE2DMSARRAY = 496,
IIMAGE2DMSARRAY = 497,
UIMAGE2DMSARRAY = 498,
F16IMAGE1D = 499,
F16IMAGE2D = 500,
F16IMAGE3D = 501,
F16IMAGE2DRECT = 502,
F16IMAGECUBE = 503,
F16IMAGE1DARRAY = 504,
F16IMAGE2DARRAY = 505,
F16IMAGECUBEARRAY = 506,
F16IMAGEBUFFER = 507,
F16IMAGE2DMS = 508,
F16IMAGE2DMSARRAY = 509,
TEXTURECUBEARRAY = 510,
ITEXTURECUBEARRAY = 511,
UTEXTURECUBEARRAY = 512,
TEXTURE1D = 513,
ITEXTURE1D = 514,
UTEXTURE1D = 515,
TEXTURE1DARRAY = 516,
ITEXTURE1DARRAY = 517,
UTEXTURE1DARRAY = 518,
TEXTURE2DRECT = 519,
ITEXTURE2DRECT = 520,
UTEXTURE2DRECT = 521,
TEXTUREBUFFER = 522,
ITEXTUREBUFFER = 523,
UTEXTUREBUFFER = 524,
TEXTURE2DMS = 525,
ITEXTURE2DMS = 526,
UTEXTURE2DMS = 527,
TEXTURE2DMSARRAY = 528,
ITEXTURE2DMSARRAY = 529,
UTEXTURE2DMSARRAY = 530,
F16TEXTURE1D = 531,
F16TEXTURE2D = 532,
F16TEXTURE3D = 533,
F16TEXTURE2DRECT = 534,
F16TEXTURECUBE = 535,
F16TEXTURE1DARRAY = 536,
F16TEXTURE2DARRAY = 537,
F16TEXTURECUBEARRAY = 538,
F16TEXTUREBUFFER = 539,
F16TEXTURE2DMS = 540,
F16TEXTURE2DMSARRAY = 541,
SUBPASSINPUT = 542,
SUBPASSINPUTMS = 543,
ISUBPASSINPUT = 544,
ISUBPASSINPUTMS = 545,
USUBPASSINPUT = 546,
USUBPASSINPUTMS = 547,
F16SUBPASSINPUT = 548,
F16SUBPASSINPUTMS = 549,
LEFT_OP = 550,
RIGHT_OP = 551,
INC_OP = 552,
DEC_OP = 553,
LE_OP = 554,
GE_OP = 555,
EQ_OP = 556,
NE_OP = 557,
AND_OP = 558,
OR_OP = 559,
XOR_OP = 560,
MUL_ASSIGN = 561,
DIV_ASSIGN = 562,
ADD_ASSIGN = 563,
MOD_ASSIGN = 564,
LEFT_ASSIGN = 565,
RIGHT_ASSIGN = 566,
AND_ASSIGN = 567,
XOR_ASSIGN = 568,
OR_ASSIGN = 569,
SUB_ASSIGN = 570,
STRING_LITERAL = 571,
LEFT_PAREN = 572,
RIGHT_PAREN = 573,
LEFT_BRACKET = 574,
RIGHT_BRACKET = 575,
LEFT_BRACE = 576,
RIGHT_BRACE = 577,
DOT = 578,
COMMA = 579,
COLON = 580,
EQUAL = 581,
SEMICOLON = 582,
BANG = 583,
DASH = 584,
TILDE = 585,
PLUS = 586,
STAR = 587,
SLASH = 588,
PERCENT = 589,
LEFT_ANGLE = 590,
RIGHT_ANGLE = 591,
VERTICAL_BAR = 592,
CARET = 593,
AMPERSAND = 594,
QUESTION = 595,
INVARIANT = 596,
HIGH_PRECISION = 597,
MEDIUM_PRECISION = 598,
LOW_PRECISION = 599,
PRECISION = 600,
PACKED = 601,
RESOURCE = 602,
SUPERP = 603,
FLOATCONSTANT = 604,
INTCONSTANT = 605,
UINTCONSTANT = 606,
BOOLCONSTANT = 607,
IDENTIFIER = 608,
TYPE_NAME = 609,
CENTROID = 610,
IN = 611,
OUT = 612,
INOUT = 613,
STRUCT = 614,
VOID = 615,
WHILE = 616,
BREAK = 617,
CONTINUE = 618,
DO = 619,
ELSE = 620,
FOR = 621,
IF = 622,
DISCARD = 623,
RETURN = 624,
SWITCH = 625,
CASE = 626,
DEFAULT = 627,
UNIFORM = 628,
SHARED = 629,
BUFFER = 630,
FLAT = 631,
SMOOTH = 632,
LAYOUT = 633,
DOUBLECONSTANT = 634,
INT16CONSTANT = 635,
UINT16CONSTANT = 636,
FLOAT16CONSTANT = 637,
INT32CONSTANT = 638,
UINT32CONSTANT = 639,
INT64CONSTANT = 640,
UINT64CONSTANT = 641,
SUBROUTINE = 642,
DEMOTE = 643,
PAYLOADNV = 644,
PAYLOADINNV = 645,
HITATTRNV = 646,
CALLDATANV = 647,
CALLDATAINNV = 648,
PAYLOADEXT = 649,
PAYLOADINEXT = 650,
HITATTREXT = 651,
CALLDATAEXT = 652,
CALLDATAINEXT = 653,
PATCH = 654,
SAMPLE = 655,
NONUNIFORM = 656,
COHERENT = 657,
VOLATILE = 658,
RESTRICT = 659,
READONLY = 660,
WRITEONLY = 661,
DEVICECOHERENT = 662,
QUEUEFAMILYCOHERENT = 663,
WORKGROUPCOHERENT = 664,
SUBGROUPCOHERENT = 665,
NONPRIVATE = 666,
SHADERCALLCOHERENT = 667,
NOPERSPECTIVE = 668,
EXPLICITINTERPAMD = 669,
PERVERTEXNV = 670,
PERPRIMITIVENV = 671,
PERVIEWNV = 672,
PERTASKNV = 673,
PRECISE = 674
YYEMPTY = -2,
YYEOF = 0, /* "end of file" */
YYerror = 256, /* error */
YYUNDEF = 257, /* "invalid token" */
CONST = 258, /* CONST */
BOOL = 259, /* BOOL */
INT = 260, /* INT */
UINT = 261, /* UINT */
FLOAT = 262, /* FLOAT */
BVEC2 = 263, /* BVEC2 */
BVEC3 = 264, /* BVEC3 */
BVEC4 = 265, /* BVEC4 */
IVEC2 = 266, /* IVEC2 */
IVEC3 = 267, /* IVEC3 */
IVEC4 = 268, /* IVEC4 */
UVEC2 = 269, /* UVEC2 */
UVEC3 = 270, /* UVEC3 */
UVEC4 = 271, /* UVEC4 */
VEC2 = 272, /* VEC2 */
VEC3 = 273, /* VEC3 */
VEC4 = 274, /* VEC4 */
MAT2 = 275, /* MAT2 */
MAT3 = 276, /* MAT3 */
MAT4 = 277, /* MAT4 */
MAT2X2 = 278, /* MAT2X2 */
MAT2X3 = 279, /* MAT2X3 */
MAT2X4 = 280, /* MAT2X4 */
MAT3X2 = 281, /* MAT3X2 */
MAT3X3 = 282, /* MAT3X3 */
MAT3X4 = 283, /* MAT3X4 */
MAT4X2 = 284, /* MAT4X2 */
MAT4X3 = 285, /* MAT4X3 */
MAT4X4 = 286, /* MAT4X4 */
SAMPLER2D = 287, /* SAMPLER2D */
SAMPLER3D = 288, /* SAMPLER3D */
SAMPLERCUBE = 289, /* SAMPLERCUBE */
SAMPLER2DSHADOW = 290, /* SAMPLER2DSHADOW */
SAMPLERCUBESHADOW = 291, /* SAMPLERCUBESHADOW */
SAMPLER2DARRAY = 292, /* SAMPLER2DARRAY */
SAMPLER2DARRAYSHADOW = 293, /* SAMPLER2DARRAYSHADOW */
ISAMPLER2D = 294, /* ISAMPLER2D */
ISAMPLER3D = 295, /* ISAMPLER3D */
ISAMPLERCUBE = 296, /* ISAMPLERCUBE */
ISAMPLER2DARRAY = 297, /* ISAMPLER2DARRAY */
USAMPLER2D = 298, /* USAMPLER2D */
USAMPLER3D = 299, /* USAMPLER3D */
USAMPLERCUBE = 300, /* USAMPLERCUBE */
USAMPLER2DARRAY = 301, /* USAMPLER2DARRAY */
SAMPLER = 302, /* SAMPLER */
SAMPLERSHADOW = 303, /* SAMPLERSHADOW */
TEXTURE2D = 304, /* TEXTURE2D */
TEXTURE3D = 305, /* TEXTURE3D */
TEXTURECUBE = 306, /* TEXTURECUBE */
TEXTURE2DARRAY = 307, /* TEXTURE2DARRAY */
ITEXTURE2D = 308, /* ITEXTURE2D */
ITEXTURE3D = 309, /* ITEXTURE3D */
ITEXTURECUBE = 310, /* ITEXTURECUBE */
ITEXTURE2DARRAY = 311, /* ITEXTURE2DARRAY */
UTEXTURE2D = 312, /* UTEXTURE2D */
UTEXTURE3D = 313, /* UTEXTURE3D */
UTEXTURECUBE = 314, /* UTEXTURECUBE */
UTEXTURE2DARRAY = 315, /* UTEXTURE2DARRAY */
ATTRIBUTE = 316, /* ATTRIBUTE */
VARYING = 317, /* VARYING */
FLOAT16_T = 318, /* FLOAT16_T */
FLOAT32_T = 319, /* FLOAT32_T */
DOUBLE = 320, /* DOUBLE */
FLOAT64_T = 321, /* FLOAT64_T */
INT64_T = 322, /* INT64_T */
UINT64_T = 323, /* UINT64_T */
INT32_T = 324, /* INT32_T */
UINT32_T = 325, /* UINT32_T */
INT16_T = 326, /* INT16_T */
UINT16_T = 327, /* UINT16_T */
INT8_T = 328, /* INT8_T */
UINT8_T = 329, /* UINT8_T */
I64VEC2 = 330, /* I64VEC2 */
I64VEC3 = 331, /* I64VEC3 */
I64VEC4 = 332, /* I64VEC4 */
U64VEC2 = 333, /* U64VEC2 */
U64VEC3 = 334, /* U64VEC3 */
U64VEC4 = 335, /* U64VEC4 */
I32VEC2 = 336, /* I32VEC2 */
I32VEC3 = 337, /* I32VEC3 */
I32VEC4 = 338, /* I32VEC4 */
U32VEC2 = 339, /* U32VEC2 */
U32VEC3 = 340, /* U32VEC3 */
U32VEC4 = 341, /* U32VEC4 */
I16VEC2 = 342, /* I16VEC2 */
I16VEC3 = 343, /* I16VEC3 */
I16VEC4 = 344, /* I16VEC4 */
U16VEC2 = 345, /* U16VEC2 */
U16VEC3 = 346, /* U16VEC3 */
U16VEC4 = 347, /* U16VEC4 */
I8VEC2 = 348, /* I8VEC2 */
I8VEC3 = 349, /* I8VEC3 */
I8VEC4 = 350, /* I8VEC4 */
U8VEC2 = 351, /* U8VEC2 */
U8VEC3 = 352, /* U8VEC3 */
U8VEC4 = 353, /* U8VEC4 */
DVEC2 = 354, /* DVEC2 */
DVEC3 = 355, /* DVEC3 */
DVEC4 = 356, /* DVEC4 */
DMAT2 = 357, /* DMAT2 */
DMAT3 = 358, /* DMAT3 */
DMAT4 = 359, /* DMAT4 */
F16VEC2 = 360, /* F16VEC2 */
F16VEC3 = 361, /* F16VEC3 */
F16VEC4 = 362, /* F16VEC4 */
F16MAT2 = 363, /* F16MAT2 */
F16MAT3 = 364, /* F16MAT3 */
F16MAT4 = 365, /* F16MAT4 */
F32VEC2 = 366, /* F32VEC2 */
F32VEC3 = 367, /* F32VEC3 */
F32VEC4 = 368, /* F32VEC4 */
F32MAT2 = 369, /* F32MAT2 */
F32MAT3 = 370, /* F32MAT3 */
F32MAT4 = 371, /* F32MAT4 */
F64VEC2 = 372, /* F64VEC2 */
F64VEC3 = 373, /* F64VEC3 */
F64VEC4 = 374, /* F64VEC4 */
F64MAT2 = 375, /* F64MAT2 */
F64MAT3 = 376, /* F64MAT3 */
F64MAT4 = 377, /* F64MAT4 */
DMAT2X2 = 378, /* DMAT2X2 */
DMAT2X3 = 379, /* DMAT2X3 */
DMAT2X4 = 380, /* DMAT2X4 */
DMAT3X2 = 381, /* DMAT3X2 */
DMAT3X3 = 382, /* DMAT3X3 */
DMAT3X4 = 383, /* DMAT3X4 */
DMAT4X2 = 384, /* DMAT4X2 */
DMAT4X3 = 385, /* DMAT4X3 */
DMAT4X4 = 386, /* DMAT4X4 */
F16MAT2X2 = 387, /* F16MAT2X2 */
F16MAT2X3 = 388, /* F16MAT2X3 */
F16MAT2X4 = 389, /* F16MAT2X4 */
F16MAT3X2 = 390, /* F16MAT3X2 */
F16MAT3X3 = 391, /* F16MAT3X3 */
F16MAT3X4 = 392, /* F16MAT3X4 */
F16MAT4X2 = 393, /* F16MAT4X2 */
F16MAT4X3 = 394, /* F16MAT4X3 */
F16MAT4X4 = 395, /* F16MAT4X4 */
F32MAT2X2 = 396, /* F32MAT2X2 */
F32MAT2X3 = 397, /* F32MAT2X3 */
F32MAT2X4 = 398, /* F32MAT2X4 */
F32MAT3X2 = 399, /* F32MAT3X2 */
F32MAT3X3 = 400, /* F32MAT3X3 */
F32MAT3X4 = 401, /* F32MAT3X4 */
F32MAT4X2 = 402, /* F32MAT4X2 */
F32MAT4X3 = 403, /* F32MAT4X3 */
F32MAT4X4 = 404, /* F32MAT4X4 */
F64MAT2X2 = 405, /* F64MAT2X2 */
F64MAT2X3 = 406, /* F64MAT2X3 */
F64MAT2X4 = 407, /* F64MAT2X4 */
F64MAT3X2 = 408, /* F64MAT3X2 */
F64MAT3X3 = 409, /* F64MAT3X3 */
F64MAT3X4 = 410, /* F64MAT3X4 */
F64MAT4X2 = 411, /* F64MAT4X2 */
F64MAT4X3 = 412, /* F64MAT4X3 */
F64MAT4X4 = 413, /* F64MAT4X4 */
ATOMIC_UINT = 414, /* ATOMIC_UINT */
ACCSTRUCTNV = 415, /* ACCSTRUCTNV */
ACCSTRUCTEXT = 416, /* ACCSTRUCTEXT */
RAYQUERYEXT = 417, /* RAYQUERYEXT */
FCOOPMATNV = 418, /* FCOOPMATNV */
ICOOPMATNV = 419, /* ICOOPMATNV */
UCOOPMATNV = 420, /* UCOOPMATNV */
SAMPLERCUBEARRAY = 421, /* SAMPLERCUBEARRAY */
SAMPLERCUBEARRAYSHADOW = 422, /* SAMPLERCUBEARRAYSHADOW */
ISAMPLERCUBEARRAY = 423, /* ISAMPLERCUBEARRAY */
USAMPLERCUBEARRAY = 424, /* USAMPLERCUBEARRAY */
SAMPLER1D = 425, /* SAMPLER1D */
SAMPLER1DARRAY = 426, /* SAMPLER1DARRAY */
SAMPLER1DARRAYSHADOW = 427, /* SAMPLER1DARRAYSHADOW */
ISAMPLER1D = 428, /* ISAMPLER1D */
SAMPLER1DSHADOW = 429, /* SAMPLER1DSHADOW */
SAMPLER2DRECT = 430, /* SAMPLER2DRECT */
SAMPLER2DRECTSHADOW = 431, /* SAMPLER2DRECTSHADOW */
ISAMPLER2DRECT = 432, /* ISAMPLER2DRECT */
USAMPLER2DRECT = 433, /* USAMPLER2DRECT */
SAMPLERBUFFER = 434, /* SAMPLERBUFFER */
ISAMPLERBUFFER = 435, /* ISAMPLERBUFFER */
USAMPLERBUFFER = 436, /* USAMPLERBUFFER */
SAMPLER2DMS = 437, /* SAMPLER2DMS */
ISAMPLER2DMS = 438, /* ISAMPLER2DMS */
USAMPLER2DMS = 439, /* USAMPLER2DMS */
SAMPLER2DMSARRAY = 440, /* SAMPLER2DMSARRAY */
ISAMPLER2DMSARRAY = 441, /* ISAMPLER2DMSARRAY */
USAMPLER2DMSARRAY = 442, /* USAMPLER2DMSARRAY */
SAMPLEREXTERNALOES = 443, /* SAMPLEREXTERNALOES */
SAMPLEREXTERNAL2DY2YEXT = 444, /* SAMPLEREXTERNAL2DY2YEXT */
ISAMPLER1DARRAY = 445, /* ISAMPLER1DARRAY */
USAMPLER1D = 446, /* USAMPLER1D */
USAMPLER1DARRAY = 447, /* USAMPLER1DARRAY */
F16SAMPLER1D = 448, /* F16SAMPLER1D */
F16SAMPLER2D = 449, /* F16SAMPLER2D */
F16SAMPLER3D = 450, /* F16SAMPLER3D */
F16SAMPLER2DRECT = 451, /* F16SAMPLER2DRECT */
F16SAMPLERCUBE = 452, /* F16SAMPLERCUBE */
F16SAMPLER1DARRAY = 453, /* F16SAMPLER1DARRAY */
F16SAMPLER2DARRAY = 454, /* F16SAMPLER2DARRAY */
F16SAMPLERCUBEARRAY = 455, /* F16SAMPLERCUBEARRAY */
F16SAMPLERBUFFER = 456, /* F16SAMPLERBUFFER */
F16SAMPLER2DMS = 457, /* F16SAMPLER2DMS */
F16SAMPLER2DMSARRAY = 458, /* F16SAMPLER2DMSARRAY */
F16SAMPLER1DSHADOW = 459, /* F16SAMPLER1DSHADOW */
F16SAMPLER2DSHADOW = 460, /* F16SAMPLER2DSHADOW */
F16SAMPLER1DARRAYSHADOW = 461, /* F16SAMPLER1DARRAYSHADOW */
F16SAMPLER2DARRAYSHADOW = 462, /* F16SAMPLER2DARRAYSHADOW */
F16SAMPLER2DRECTSHADOW = 463, /* F16SAMPLER2DRECTSHADOW */
F16SAMPLERCUBESHADOW = 464, /* F16SAMPLERCUBESHADOW */
F16SAMPLERCUBEARRAYSHADOW = 465, /* F16SAMPLERCUBEARRAYSHADOW */
IMAGE1D = 466, /* IMAGE1D */
IIMAGE1D = 467, /* IIMAGE1D */
UIMAGE1D = 468, /* UIMAGE1D */
IMAGE2D = 469, /* IMAGE2D */
IIMAGE2D = 470, /* IIMAGE2D */
UIMAGE2D = 471, /* UIMAGE2D */
IMAGE3D = 472, /* IMAGE3D */
IIMAGE3D = 473, /* IIMAGE3D */
UIMAGE3D = 474, /* UIMAGE3D */
IMAGE2DRECT = 475, /* IMAGE2DRECT */
IIMAGE2DRECT = 476, /* IIMAGE2DRECT */
UIMAGE2DRECT = 477, /* UIMAGE2DRECT */
IMAGECUBE = 478, /* IMAGECUBE */
IIMAGECUBE = 479, /* IIMAGECUBE */
UIMAGECUBE = 480, /* UIMAGECUBE */
IMAGEBUFFER = 481, /* IMAGEBUFFER */
IIMAGEBUFFER = 482, /* IIMAGEBUFFER */
UIMAGEBUFFER = 483, /* UIMAGEBUFFER */
IMAGE1DARRAY = 484, /* IMAGE1DARRAY */
IIMAGE1DARRAY = 485, /* IIMAGE1DARRAY */
UIMAGE1DARRAY = 486, /* UIMAGE1DARRAY */
IMAGE2DARRAY = 487, /* IMAGE2DARRAY */
IIMAGE2DARRAY = 488, /* IIMAGE2DARRAY */
UIMAGE2DARRAY = 489, /* UIMAGE2DARRAY */
IMAGECUBEARRAY = 490, /* IMAGECUBEARRAY */
IIMAGECUBEARRAY = 491, /* IIMAGECUBEARRAY */
UIMAGECUBEARRAY = 492, /* UIMAGECUBEARRAY */
IMAGE2DMS = 493, /* IMAGE2DMS */
IIMAGE2DMS = 494, /* IIMAGE2DMS */
UIMAGE2DMS = 495, /* UIMAGE2DMS */
IMAGE2DMSARRAY = 496, /* IMAGE2DMSARRAY */
IIMAGE2DMSARRAY = 497, /* IIMAGE2DMSARRAY */
UIMAGE2DMSARRAY = 498, /* UIMAGE2DMSARRAY */
F16IMAGE1D = 499, /* F16IMAGE1D */
F16IMAGE2D = 500, /* F16IMAGE2D */
F16IMAGE3D = 501, /* F16IMAGE3D */
F16IMAGE2DRECT = 502, /* F16IMAGE2DRECT */
F16IMAGECUBE = 503, /* F16IMAGECUBE */
F16IMAGE1DARRAY = 504, /* F16IMAGE1DARRAY */
F16IMAGE2DARRAY = 505, /* F16IMAGE2DARRAY */
F16IMAGECUBEARRAY = 506, /* F16IMAGECUBEARRAY */
F16IMAGEBUFFER = 507, /* F16IMAGEBUFFER */
F16IMAGE2DMS = 508, /* F16IMAGE2DMS */
F16IMAGE2DMSARRAY = 509, /* F16IMAGE2DMSARRAY */
I64IMAGE1D = 510, /* I64IMAGE1D */
U64IMAGE1D = 511, /* U64IMAGE1D */
I64IMAGE2D = 512, /* I64IMAGE2D */
U64IMAGE2D = 513, /* U64IMAGE2D */
I64IMAGE3D = 514, /* I64IMAGE3D */
U64IMAGE3D = 515, /* U64IMAGE3D */
I64IMAGE2DRECT = 516, /* I64IMAGE2DRECT */
U64IMAGE2DRECT = 517, /* U64IMAGE2DRECT */
I64IMAGECUBE = 518, /* I64IMAGECUBE */
U64IMAGECUBE = 519, /* U64IMAGECUBE */
I64IMAGEBUFFER = 520, /* I64IMAGEBUFFER */
U64IMAGEBUFFER = 521, /* U64IMAGEBUFFER */
I64IMAGE1DARRAY = 522, /* I64IMAGE1DARRAY */
U64IMAGE1DARRAY = 523, /* U64IMAGE1DARRAY */
I64IMAGE2DARRAY = 524, /* I64IMAGE2DARRAY */
U64IMAGE2DARRAY = 525, /* U64IMAGE2DARRAY */
I64IMAGECUBEARRAY = 526, /* I64IMAGECUBEARRAY */
U64IMAGECUBEARRAY = 527, /* U64IMAGECUBEARRAY */
I64IMAGE2DMS = 528, /* I64IMAGE2DMS */
U64IMAGE2DMS = 529, /* U64IMAGE2DMS */
I64IMAGE2DMSARRAY = 530, /* I64IMAGE2DMSARRAY */
U64IMAGE2DMSARRAY = 531, /* U64IMAGE2DMSARRAY */
TEXTURECUBEARRAY = 532, /* TEXTURECUBEARRAY */
ITEXTURECUBEARRAY = 533, /* ITEXTURECUBEARRAY */
UTEXTURECUBEARRAY = 534, /* UTEXTURECUBEARRAY */
TEXTURE1D = 535, /* TEXTURE1D */
ITEXTURE1D = 536, /* ITEXTURE1D */
UTEXTURE1D = 537, /* UTEXTURE1D */
TEXTURE1DARRAY = 538, /* TEXTURE1DARRAY */
ITEXTURE1DARRAY = 539, /* ITEXTURE1DARRAY */
UTEXTURE1DARRAY = 540, /* UTEXTURE1DARRAY */
TEXTURE2DRECT = 541, /* TEXTURE2DRECT */
ITEXTURE2DRECT = 542, /* ITEXTURE2DRECT */
UTEXTURE2DRECT = 543, /* UTEXTURE2DRECT */
TEXTUREBUFFER = 544, /* TEXTUREBUFFER */
ITEXTUREBUFFER = 545, /* ITEXTUREBUFFER */
UTEXTUREBUFFER = 546, /* UTEXTUREBUFFER */
TEXTURE2DMS = 547, /* TEXTURE2DMS */
ITEXTURE2DMS = 548, /* ITEXTURE2DMS */
UTEXTURE2DMS = 549, /* UTEXTURE2DMS */
TEXTURE2DMSARRAY = 550, /* TEXTURE2DMSARRAY */
ITEXTURE2DMSARRAY = 551, /* ITEXTURE2DMSARRAY */
UTEXTURE2DMSARRAY = 552, /* UTEXTURE2DMSARRAY */
F16TEXTURE1D = 553, /* F16TEXTURE1D */
F16TEXTURE2D = 554, /* F16TEXTURE2D */
F16TEXTURE3D = 555, /* F16TEXTURE3D */
F16TEXTURE2DRECT = 556, /* F16TEXTURE2DRECT */
F16TEXTURECUBE = 557, /* F16TEXTURECUBE */
F16TEXTURE1DARRAY = 558, /* F16TEXTURE1DARRAY */
F16TEXTURE2DARRAY = 559, /* F16TEXTURE2DARRAY */
F16TEXTURECUBEARRAY = 560, /* F16TEXTURECUBEARRAY */
F16TEXTUREBUFFER = 561, /* F16TEXTUREBUFFER */
F16TEXTURE2DMS = 562, /* F16TEXTURE2DMS */
F16TEXTURE2DMSARRAY = 563, /* F16TEXTURE2DMSARRAY */
SUBPASSINPUT = 564, /* SUBPASSINPUT */
SUBPASSINPUTMS = 565, /* SUBPASSINPUTMS */
ISUBPASSINPUT = 566, /* ISUBPASSINPUT */
ISUBPASSINPUTMS = 567, /* ISUBPASSINPUTMS */
USUBPASSINPUT = 568, /* USUBPASSINPUT */
USUBPASSINPUTMS = 569, /* USUBPASSINPUTMS */
F16SUBPASSINPUT = 570, /* F16SUBPASSINPUT */
F16SUBPASSINPUTMS = 571, /* F16SUBPASSINPUTMS */
LEFT_OP = 572, /* LEFT_OP */
RIGHT_OP = 573, /* RIGHT_OP */
INC_OP = 574, /* INC_OP */
DEC_OP = 575, /* DEC_OP */
LE_OP = 576, /* LE_OP */
GE_OP = 577, /* GE_OP */
EQ_OP = 578, /* EQ_OP */
NE_OP = 579, /* NE_OP */
AND_OP = 580, /* AND_OP */
OR_OP = 581, /* OR_OP */
XOR_OP = 582, /* XOR_OP */
MUL_ASSIGN = 583, /* MUL_ASSIGN */
DIV_ASSIGN = 584, /* DIV_ASSIGN */
ADD_ASSIGN = 585, /* ADD_ASSIGN */
MOD_ASSIGN = 586, /* MOD_ASSIGN */
LEFT_ASSIGN = 587, /* LEFT_ASSIGN */
RIGHT_ASSIGN = 588, /* RIGHT_ASSIGN */
AND_ASSIGN = 589, /* AND_ASSIGN */
XOR_ASSIGN = 590, /* XOR_ASSIGN */
OR_ASSIGN = 591, /* OR_ASSIGN */
SUB_ASSIGN = 592, /* SUB_ASSIGN */
STRING_LITERAL = 593, /* STRING_LITERAL */
LEFT_PAREN = 594, /* LEFT_PAREN */
RIGHT_PAREN = 595, /* RIGHT_PAREN */
LEFT_BRACKET = 596, /* LEFT_BRACKET */
RIGHT_BRACKET = 597, /* RIGHT_BRACKET */
LEFT_BRACE = 598, /* LEFT_BRACE */
RIGHT_BRACE = 599, /* RIGHT_BRACE */
DOT = 600, /* DOT */
COMMA = 601, /* COMMA */
COLON = 602, /* COLON */
EQUAL = 603, /* EQUAL */
SEMICOLON = 604, /* SEMICOLON */
BANG = 605, /* BANG */
DASH = 606, /* DASH */
TILDE = 607, /* TILDE */
PLUS = 608, /* PLUS */
STAR = 609, /* STAR */
SLASH = 610, /* SLASH */
PERCENT = 611, /* PERCENT */
LEFT_ANGLE = 612, /* LEFT_ANGLE */
RIGHT_ANGLE = 613, /* RIGHT_ANGLE */
VERTICAL_BAR = 614, /* VERTICAL_BAR */
CARET = 615, /* CARET */
AMPERSAND = 616, /* AMPERSAND */
QUESTION = 617, /* QUESTION */
INVARIANT = 618, /* INVARIANT */
HIGH_PRECISION = 619, /* HIGH_PRECISION */
MEDIUM_PRECISION = 620, /* MEDIUM_PRECISION */
LOW_PRECISION = 621, /* LOW_PRECISION */
PRECISION = 622, /* PRECISION */
PACKED = 623, /* PACKED */
RESOURCE = 624, /* RESOURCE */
SUPERP = 625, /* SUPERP */
FLOATCONSTANT = 626, /* FLOATCONSTANT */
INTCONSTANT = 627, /* INTCONSTANT */
UINTCONSTANT = 628, /* UINTCONSTANT */
BOOLCONSTANT = 629, /* BOOLCONSTANT */
IDENTIFIER = 630, /* IDENTIFIER */
TYPE_NAME = 631, /* TYPE_NAME */
CENTROID = 632, /* CENTROID */
IN = 633, /* IN */
OUT = 634, /* OUT */
INOUT = 635, /* INOUT */
STRUCT = 636, /* STRUCT */
VOID = 637, /* VOID */
WHILE = 638, /* WHILE */
BREAK = 639, /* BREAK */
CONTINUE = 640, /* CONTINUE */
DO = 641, /* DO */
ELSE = 642, /* ELSE */
FOR = 643, /* FOR */
IF = 644, /* IF */
DISCARD = 645, /* DISCARD */
RETURN = 646, /* RETURN */
SWITCH = 647, /* SWITCH */
CASE = 648, /* CASE */
DEFAULT = 649, /* DEFAULT */
TERMINATE_INVOCATION = 650, /* TERMINATE_INVOCATION */
TERMINATE_RAY = 651, /* TERMINATE_RAY */
IGNORE_INTERSECTION = 652, /* IGNORE_INTERSECTION */
UNIFORM = 653, /* UNIFORM */
SHARED = 654, /* SHARED */
BUFFER = 655, /* BUFFER */
FLAT = 656, /* FLAT */
SMOOTH = 657, /* SMOOTH */
LAYOUT = 658, /* LAYOUT */
DOUBLECONSTANT = 659, /* DOUBLECONSTANT */
INT16CONSTANT = 660, /* INT16CONSTANT */
UINT16CONSTANT = 661, /* UINT16CONSTANT */
FLOAT16CONSTANT = 662, /* FLOAT16CONSTANT */
INT32CONSTANT = 663, /* INT32CONSTANT */
UINT32CONSTANT = 664, /* UINT32CONSTANT */
INT64CONSTANT = 665, /* INT64CONSTANT */
UINT64CONSTANT = 666, /* UINT64CONSTANT */
SUBROUTINE = 667, /* SUBROUTINE */
DEMOTE = 668, /* DEMOTE */
PAYLOADNV = 669, /* PAYLOADNV */
PAYLOADINNV = 670, /* PAYLOADINNV */
HITATTRNV = 671, /* HITATTRNV */
CALLDATANV = 672, /* CALLDATANV */
CALLDATAINNV = 673, /* CALLDATAINNV */
PAYLOADEXT = 674, /* PAYLOADEXT */
PAYLOADINEXT = 675, /* PAYLOADINEXT */
HITATTREXT = 676, /* HITATTREXT */
CALLDATAEXT = 677, /* CALLDATAEXT */
CALLDATAINEXT = 678, /* CALLDATAINEXT */
PATCH = 679, /* PATCH */
SAMPLE = 680, /* SAMPLE */
NONUNIFORM = 681, /* NONUNIFORM */
COHERENT = 682, /* COHERENT */
VOLATILE = 683, /* VOLATILE */
RESTRICT = 684, /* RESTRICT */
READONLY = 685, /* READONLY */
WRITEONLY = 686, /* WRITEONLY */
DEVICECOHERENT = 687, /* DEVICECOHERENT */
QUEUEFAMILYCOHERENT = 688, /* QUEUEFAMILYCOHERENT */
WORKGROUPCOHERENT = 689, /* WORKGROUPCOHERENT */
SUBGROUPCOHERENT = 690, /* SUBGROUPCOHERENT */
NONPRIVATE = 691, /* NONPRIVATE */
SHADERCALLCOHERENT = 692, /* SHADERCALLCOHERENT */
NOPERSPECTIVE = 693, /* NOPERSPECTIVE */
EXPLICITINTERPAMD = 694, /* EXPLICITINTERPAMD */
PERVERTEXNV = 695, /* PERVERTEXNV */
PERPRIMITIVENV = 696, /* PERPRIMITIVENV */
PERVIEWNV = 697, /* PERVIEWNV */
PERTASKNV = 698, /* PERTASKNV */
PRECISE = 699 /* PRECISE */
};
typedef enum yytokentype yytoken_kind_t;
#endif
/* Value type. */
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
union YYSTYPE
{
#line 97 "MachineIndependent/glslang.y" /* yacc.c:1909 */
#line 97 "MachineIndependent/glslang.y"
struct {
glslang::TSourceLoc loc;
@ -506,9 +540,9 @@ union YYSTYPE
glslang::TArraySizes* typeParameters;
} interm;
#line 510 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
};
#line 544 "MachineIndependent/glslang_tab.cpp.h"
};
typedef union YYSTYPE YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define YYSTYPE_IS_DECLARED 1

View file

@ -438,6 +438,9 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpConvUint64ToPtr: out.debug << "Convert uint64_t to pointer"; break;
case EOpConvPtrToUint64: out.debug << "Convert pointer to uint64_t"; break;
case EOpConvUint64ToAccStruct: out.debug << "Convert uint64_t to acceleration structure"; break;
case EOpConvUvec2ToAccStruct: out.debug << "Convert uvec2 to acceleration strucuture "; break;
case EOpRadians: out.debug << "radians"; break;
case EOpDegrees: out.debug << "degrees"; break;
case EOpSin: out.debug << "sine"; break;
@ -829,6 +832,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break;
case EOpConstructReference: out.debug << "Construct reference"; break;
case EOpConstructCooperativeMatrix: out.debug << "Construct cooperative matrix"; break;
case EOpConstructAccStruct: out.debug << "Construct acceleration structure"; break;
case EOpLessThan: out.debug << "Compare Less Than"; break;
case EOpGreaterThan: out.debug << "Compare Greater Than"; break;
@ -1079,11 +1083,15 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpSubpassLoad: out.debug << "subpassLoad"; break;
case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
case EOpTrace: out.debug << "traceNV"; break;
case EOpTraceNV: out.debug << "traceNV"; break;
case EOpTraceKHR: out.debug << "traceRayKHR"; break;
case EOpReportIntersection: out.debug << "reportIntersectionNV"; break;
case EOpIgnoreIntersection: out.debug << "ignoreIntersectionNV"; break;
case EOpTerminateRay: out.debug << "terminateRayNV"; break;
case EOpExecuteCallable: out.debug << "executeCallableNV"; break;
case EOpIgnoreIntersectionNV: out.debug << "ignoreIntersectionNV"; break;
case EOpIgnoreIntersectionKHR: out.debug << "ignoreIntersectionKHR"; break;
case EOpTerminateRayNV: out.debug << "terminateRayNV"; break;
case EOpTerminateRayKHR: out.debug << "terminateRayKHR"; break;
case EOpExecuteCallableNV: out.debug << "executeCallableNV"; break;
case EOpExecuteCallableKHR: out.debug << "executeCallableKHR"; break;
case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break;
case EOpRayQueryInitialize: out.debug << "rayQueryInitializeEXT"; break;
@ -1321,6 +1329,9 @@ static void OutputConstantUnion(TInfoSink& out, const TIntermTyped* node, const
out.debug << buf << "\n";
}
break;
case EbtString:
out.debug << "\"" << constUnion[i].getSConst()->c_str() << "\"\n";
break;
default:
out.info.message(EPrefixInternalError, "Unknown constant", node->getLoc());
break;
@ -1406,14 +1417,17 @@ bool TOutputTraverser::visitBranch(TVisit /* visit*/, TIntermBranch* node)
OutputTreeText(out, node, depth);
switch (node->getFlowOp()) {
case EOpKill: out.debug << "Branch: Kill"; break;
case EOpBreak: out.debug << "Branch: Break"; break;
case EOpContinue: out.debug << "Branch: Continue"; break;
case EOpReturn: out.debug << "Branch: Return"; break;
case EOpCase: out.debug << "case: "; break;
case EOpDemote: out.debug << "Demote"; break;
case EOpDefault: out.debug << "default: "; break;
default: out.debug << "Branch: Unknown Branch"; break;
case EOpKill: out.debug << "Branch: Kill"; break;
case EOpTerminateInvocation: out.debug << "Branch: TerminateInvocation"; break;
case EOpIgnoreIntersectionKHR: out.debug << "Branch: IgnoreIntersectionKHR"; break;
case EOpTerminateRayKHR: out.debug << "Branch: TerminateRayKHR"; break;
case EOpBreak: out.debug << "Branch: Break"; break;
case EOpContinue: out.debug << "Branch: Continue"; break;
case EOpReturn: out.debug << "Branch: Return"; break;
case EOpCase: out.debug << "case: "; break;
case EOpDemote: out.debug << "Demote"; break;
case EOpDefault: out.debug << "default: "; break;
default: out.debug << "Branch: Unknown Branch"; break;
}
if (node->getExpression()) {

View file

@ -37,9 +37,11 @@
#include "../Include/Common.h"
#include "../Include/InfoSink.h"
#include "../Include/Types.h"
#include "gl_types.h"
#include "iomapper.h"
#include "SymbolTable.h"
//
// Map IO bindings.
@ -82,17 +84,17 @@ public:
// If a global is being visited, then we should also traverse it incase it's evaluation
// ends up visiting inputs we want to tag as live
else if (base->getQualifier().storage == EvqGlobal)
addGlobalReference(base->getName());
addGlobalReference(base->getAccessName());
if (target) {
TVarEntryInfo ent = {base->getId(), base, ! traverseAll};
ent.stage = intermediate.getStage();
TVarLiveMap::iterator at = target->find(
ent.symbol->getName()); // std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById());
ent.symbol->getAccessName()); // std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById());
if (at != target->end() && at->second.id == ent.id)
at->second.live = at->second.live || ! traverseAll; // update live state
else
(*target)[ent.symbol->getName()] = ent;
(*target)[ent.symbol->getAccessName()] = ent;
}
}
@ -125,7 +127,8 @@ public:
return;
TVarEntryInfo ent = { base->getId() };
TVarLiveMap::const_iterator at = source->find(base->getName());
// Fix a defect, when block has no instance name, we need to find its block name
TVarLiveMap::const_iterator at = source->find(base->getAccessName());
if (at == source->end())
return;
@ -181,7 +184,7 @@ struct TNotifyInOutAdaptor
inline void operator()(std::pair<const TString, TVarEntryInfo>& entKey)
{
resolver.notifyInOut(stage, entKey.second);
resolver.notifyInOut(entKey.second.stage, entKey.second);
}
private:
@ -189,12 +192,13 @@ private:
};
struct TResolverUniformAdaptor {
TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e)
TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TVarLiveMap* uniform[EShLangCount], TInfoSink& i, bool& e)
: stage(s)
, resolver(r)
, infoSink(i)
, error(e)
{
memcpy(uniformVarMap, uniform, EShLangCount * (sizeof(TVarLiveMap*)));
}
inline void operator()(std::pair<const TString, TVarEntryInfo>& entKey) {
@ -206,9 +210,9 @@ struct TResolverUniformAdaptor {
ent.newIndex = -1;
const bool isValid = resolver.validateBinding(stage, ent);
if (isValid) {
resolver.resolveBinding(stage, ent);
resolver.resolveSet(stage, ent);
resolver.resolveUniformLocation(stage, ent);
resolver.resolveBinding(ent.stage, ent);
resolver.resolveSet(ent.stage, ent);
resolver.resolveUniformLocation(ent.stage, ent);
if (ent.newBinding != -1) {
if (ent.newBinding >= int(TQualifier::layoutBindingEnd)) {
@ -217,6 +221,17 @@ struct TResolverUniformAdaptor {
infoSink.info.message(EPrefixInternalError, err.c_str());
error = true;
}
if (ent.symbol->getQualifier().hasBinding()) {
for (uint32_t idx = EShLangVertex; idx < EShLangCount; ++idx) {
if (idx == ent.stage || uniformVarMap[idx] == nullptr)
continue;
auto entKey2 = uniformVarMap[idx]->find(entKey.first);
if (entKey2 != uniformVarMap[idx]->end()) {
entKey2->second.newBinding = ent.newBinding;
}
}
}
}
if (ent.newSet != -1) {
if (ent.newSet >= int(TQualifier::layoutSetEnd)) {
@ -225,6 +240,16 @@ struct TResolverUniformAdaptor {
infoSink.info.message(EPrefixInternalError, err.c_str());
error = true;
}
if (ent.symbol->getQualifier().hasSet()) {
for (uint32_t idx = EShLangVertex; idx < EShLangCount; ++idx) {
if ((idx == stage) || (uniformVarMap[idx] == nullptr))
continue;
auto entKey2 = uniformVarMap[idx]->find(entKey.first);
if (entKey2 != uniformVarMap[idx]->end()) {
entKey2->second.newSet = ent.newSet;
}
}
}
}
} else {
TString errorMsg = "Invalid binding: " + entKey.first;
@ -239,7 +264,7 @@ struct TResolverUniformAdaptor {
TIoMapResolver& resolver;
TInfoSink& infoSink;
bool& error;
TVarLiveMap* uniformVarMap[EShLangCount];
private:
TResolverUniformAdaptor& operator=(TResolverUniformAdaptor&) = delete;
};
@ -261,7 +286,7 @@ struct TResolverInOutAdaptor {
ent.newBinding = -1;
ent.newSet = -1;
ent.newIndex = -1;
const bool isValid = resolver.validateInOut(stage, ent);
const bool isValid = resolver.validateInOut(ent.stage, ent);
if (isValid) {
resolver.resolveInOutLocation(stage, ent);
resolver.resolveInOutComponent(stage, ent);
@ -296,17 +321,116 @@ private:
struct TSymbolValidater
{
TSymbolValidater(TIoMapResolver& r, TInfoSink& i, TVarLiveMap* in[EShLangCount], TVarLiveMap* out[EShLangCount],
TVarLiveMap* uniform[EShLangCount], bool& hadError)
TVarLiveMap* uniform[EShLangCount], bool& hadError, EProfile profile, int version)
: preStage(EShLangCount)
, currentStage(EShLangCount)
, nextStage(EShLangCount)
, resolver(r)
, infoSink(i)
, hadError(hadError)
, profile(profile)
, version(version)
{
memcpy(inVarMaps, in, EShLangCount * (sizeof(TVarLiveMap*)));
memcpy(outVarMaps, out, EShLangCount * (sizeof(TVarLiveMap*)));
memcpy(uniformVarMap, uniform, EShLangCount * (sizeof(TVarLiveMap*)));
std::map<TString, TString> anonymousMemberMap;
std::vector<TRange> usedUniformLocation;
std::vector<TString> usedUniformName;
usedUniformLocation.clear();
usedUniformName.clear();
for (int i = 0; i < EShLangCount; i++) {
if (uniformVarMap[i]) {
for (auto uniformVar : *uniformVarMap[i])
{
TIntermSymbol* pSymbol = uniformVar.second.symbol;
TQualifier qualifier = uniformVar.second.symbol->getQualifier();
TString symbolName = pSymbol->getAccessName();
// All the uniform needs multi-stage location check (block/default)
int uniformLocation = qualifier.layoutLocation;
if (uniformLocation != TQualifier::layoutLocationEnd) {
// Total size of current uniform, could be block, struct or other types.
int size = TIntermediate::computeTypeUniformLocationSize(pSymbol->getType());
TRange locationRange(uniformLocation, uniformLocation + size - 1);
// Combine location and component ranges
int overlapLocation = -1;
bool diffLocation = false;
// Check for collisions, except for vertex inputs on desktop targeting OpenGL
overlapLocation = checkLocationOverlap(locationRange, usedUniformLocation, symbolName, usedUniformName, diffLocation);
// Overlap locations of uniforms, regardless of components (multi stages)
if (overlapLocation == -1) {
usedUniformLocation.push_back(locationRange);
usedUniformName.push_back(symbolName);
}
else if (overlapLocation >= 0) {
if (diffLocation == true) {
TString err = ("Uniform location should be equal for same uniforms: " +std::to_string(overlapLocation)).c_str();
infoSink.info.message(EPrefixInternalError, err.c_str());
hadError = true;
break;
}
else {
TString err = ("Uniform location overlaps across stages: " + std::to_string(overlapLocation)).c_str();
infoSink.info.message(EPrefixInternalError, err.c_str());
hadError = true;
break;
}
}
}
if ((uniformVar.second.symbol->getBasicType() == EbtBlock) &&
IsAnonymous(uniformVar.second.symbol->getName()))
{
auto blockType = uniformVar.second.symbol->getType().getStruct();
for (size_t memberIdx = 0; memberIdx < blockType->size(); ++memberIdx) {
auto memberName = (*blockType)[memberIdx].type->getFieldName();
if (anonymousMemberMap.find(memberName) != anonymousMemberMap.end())
{
if (anonymousMemberMap[memberName] != uniformVar.second.symbol->getType().getTypeName())
{
TString err = "Invalid block member name: " + memberName;
infoSink.info.message(EPrefixInternalError, err.c_str());
hadError = true;
break;
}
}
else
{
anonymousMemberMap[memberName] = uniformVar.second.symbol->getType().getTypeName();
}
}
}
if (hadError)
break;
}
}
}
}
// In case we need to new an intermediate, which costs too much
int checkLocationOverlap(const TRange& locationRange, std::vector<TRange>& usedUniformLocation, const TString symbolName, std::vector<TString>& usedUniformName, bool& diffLocation)
{
for (size_t r = 0; r < usedUniformLocation.size(); ++r) {
if (usedUniformName[r] == symbolName) {
diffLocation = true;
return (usedUniformLocation[r].start == locationRange.start &&
usedUniformLocation[r].last == locationRange.last)
? -2 : std::max(locationRange.start, usedUniformLocation[r].start);
}
if (locationRange.overlap(usedUniformLocation[r])) {
// there is a collision; pick one
return std::max(locationRange.start, usedUniformLocation[r].start);
}
}
return -1; // no collision
}
inline void operator()(std::pair<const TString, TVarEntryInfo>& entKey) {
@ -339,11 +463,24 @@ struct TSymbolValidater
// validate stage in;
if (preStage == EShLangCount)
return;
if (name == "gl_PerVertex")
if (TSymbolTable::isBuiltInSymbol(base->getId()))
return;
if (outVarMaps[preStage] != nullptr) {
auto ent2 = outVarMaps[preStage]->find(name);
uint32_t location = base->getType().getQualifier().layoutLocation;
if (ent2 == outVarMaps[preStage]->end() &&
location != glslang::TQualifier::layoutLocationEnd) {
for (auto var = outVarMaps[preStage]->begin(); var != ent2; var++) {
if (var->second.symbol->getType().getQualifier().layoutLocation == location) {
ent2 = var;
break;
}
}
}
if (ent2 != outVarMaps[preStage]->end()) {
auto& type1 = base->getType();
auto& type2 = ent2->second.symbol->getType();
hadError = hadError || typeCheck(&type1, &type2, name.c_str(), false);
if (ent2->second.symbol->getType().getQualifier().isArrayedIo(preStage)) {
TType subType(ent2->second.symbol->getType(), 0);
subType.appendMangledName(mangleName2);
@ -351,23 +488,49 @@ struct TSymbolValidater
else {
ent2->second.symbol->getType().appendMangledName(mangleName2);
}
if (mangleName1 == mangleName2)
if (mangleName1 == mangleName2) {
// For ES 3.0 only, other versions have no such restrictions
// According to ES 3.0 spec: The type and presence of the interpolation qualifiers and
// storage qualifiers of variables with the same name declared in all linked shaders must
// match, otherwise the link command will fail.
if (profile == EEsProfile && version == 300) {
// Don't need to check smooth qualifier, as it uses the default interpolation mode
if (ent1.stage == EShLangFragment && type1.isBuiltIn() == false) {
if (type1.getQualifier().flat != type2.getQualifier().flat ||
type1.getQualifier().nopersp != type2.getQualifier().nopersp) {
TString err = "Interpolation qualifier mismatch : " + entKey.first;
infoSink.info.message(EPrefixInternalError, err.c_str());
hadError = true;
}
}
}
return;
}
else {
TString err = "Invalid In/Out variable type : " + entKey.first;
infoSink.info.message(EPrefixInternalError, err.c_str());
hadError = true;
}
}
else if (!base->getType().isBuiltIn()) {
// According to spec: A link error is generated if any statically referenced input variable
// or block does not have a matching output
if (profile == EEsProfile && ent1.live) {
hadError = true;
TString errorStr = name + ": not been declare as a output variable in pre shader stage.";
infoSink.info.message(EPrefixError, errorStr.c_str());
}
}
return;
}
} else if (base->getQualifier().storage == EvqVaryingOut) {
// validate stage out;
if (nextStage == EShLangCount)
return;
if (name == "gl_PerVertex")
if (TSymbolTable::isBuiltInSymbol(base->getId()))
return;
if (outVarMaps[nextStage] != nullptr) {
if (inVarMaps[nextStage] != nullptr) {
auto ent2 = inVarMaps[nextStage]->find(name);
if (ent2 != inVarMaps[nextStage]->end()) {
if (ent2->second.symbol->getType().getQualifier().isArrayedIo(nextStage)) {
@ -400,11 +563,50 @@ struct TSymbolValidater
hadError = true;
}
mangleName2.clear();
// validate instance name of blocks
if (hadError == false &&
base->getType().getBasicType() == EbtBlock &&
IsAnonymous(base->getName()) != IsAnonymous(ent2->second.symbol->getName())) {
TString err = "Matched uniform block names must also either all be lacking "
"an instance name or all having an instance name: " + entKey.first;
infoSink.info.message(EPrefixInternalError, err.c_str());
hadError = true;
}
// validate uniform block member qualifier and member names
auto& type1 = base->getType();
auto& type2 = ent2->second.symbol->getType();
if (hadError == false && base->getType().getBasicType() == EbtBlock) {
hadError = hadError || typeCheck(&type1, &type2, name.c_str(), true);
}
else {
hadError = hadError || typeCheck(&type1, &type2, name.c_str(), false);
}
}
else if (base->getBasicType() == EbtBlock)
{
if (IsAnonymous(base->getName()))
{
// The name of anonymous block member can't same with default uniform variable.
auto blockType1 = base->getType().getStruct();
for (size_t memberIdx = 0; memberIdx < blockType1->size(); ++memberIdx) {
auto memberName = (*blockType1)[memberIdx].type->getFieldName();
if (uniformVarMap[i]->find(memberName) != uniformVarMap[i]->end())
{
TString err = "Invalid Uniform variable name : " + memberName;
infoSink.info.message(EPrefixInternalError, err.c_str());
hadError = true;
break;
}
}
}
}
}
}
}
}
TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount], *uniformVarMap[EShLangCount];
// Use for mark pre stage, to get more interface symbol information.
EShLanguage preStage, currentStage, nextStage;
@ -412,9 +614,118 @@ struct TSymbolValidater
TIoMapResolver& resolver;
TInfoSink& infoSink;
bool& hadError;
EProfile profile;
int version;
private:
TSymbolValidater& operator=(TSymbolValidater&) = delete;
bool qualifierCheck(const TType* const type1, const TType* const type2, const std::string& name, bool isBlock)
{
bool hasError = false;
const TQualifier& qualifier1 = type1->getQualifier();
const TQualifier& qualifier2 = type2->getQualifier();
if (((isBlock == false) &&
(type1->getQualifier().storage == EvqUniform && type2->getQualifier().storage == EvqUniform)) ||
(type1->getQualifier().storage == EvqGlobal && type2->getQualifier().storage == EvqGlobal)) {
if (qualifier1.precision != qualifier2.precision) {
hasError = true;
std::string errorStr = name + ": have precision conflict cross stage.";
infoSink.info.message(EPrefixError, errorStr.c_str());
}
if (qualifier1.hasFormat() && qualifier2.hasFormat()) {
if (qualifier1.layoutFormat != qualifier2.layoutFormat) {
hasError = true;
std::string errorStr = name + ": have layout format conflict cross stage.";
infoSink.info.message(EPrefixError, errorStr.c_str());
}
}
}
if (isBlock == true) {
if (qualifier1.layoutPacking != qualifier2.layoutPacking) {
hasError = true;
std::string errorStr = name + ": have layoutPacking conflict cross stage.";
infoSink.info.message(EPrefixError, errorStr.c_str());
}
if (qualifier1.layoutMatrix != qualifier2.layoutMatrix) {
hasError = true;
std::string errorStr = name + ": have layoutMatrix conflict cross stage.";
infoSink.info.message(EPrefixError, errorStr.c_str());
}
if (qualifier1.layoutOffset != qualifier2.layoutOffset) {
hasError = true;
std::string errorStr = name + ": have layoutOffset conflict cross stage.";
infoSink.info.message(EPrefixError, errorStr.c_str());
}
if (qualifier1.layoutAlign != qualifier2.layoutAlign) {
hasError = true;
std::string errorStr = name + ": have layoutAlign conflict cross stage.";
infoSink.info.message(EPrefixError, errorStr.c_str());
}
}
return hasError;
}
bool typeCheck(const TType* const type1, const TType* const type2, const std::string& name, bool isBlock)
{
bool hasError = false;
if (!(type1->isStruct() && type2->isStruct())) {
hasError = hasError || qualifierCheck(type1, type2, name, isBlock);
}
else {
if (type1->getBasicType() == EbtBlock && type2->getBasicType() == EbtBlock)
isBlock = true;
const TTypeList* typeList1 = type1->getStruct();
const TTypeList* typeList2 = type2->getStruct();
std::string newName = name;
size_t memberCount = typeList1->size();
size_t index2 = 0;
for (size_t index = 0; index < memberCount; index++, index2++) {
// Skip inactive member
if (typeList1->at(index).type->getBasicType() == EbtVoid)
continue;
while (index2 < typeList2->size() && typeList2->at(index2).type->getBasicType() == EbtVoid) {
++index2;
}
// TypeList1 has more members in list
if (index2 == typeList2->size()) {
std::string errorStr = name + ": struct mismatch.";
infoSink.info.message(EPrefixError, errorStr.c_str());
hasError = true;
break;
}
if (typeList1->at(index).type->getFieldName() != typeList2->at(index2).type->getFieldName()) {
std::string errorStr = name + ": member name mismatch.";
infoSink.info.message(EPrefixError, errorStr.c_str());
hasError = true;
}
else {
newName = typeList1->at(index).type->getFieldName().c_str();
}
hasError = hasError || typeCheck(typeList1->at(index).type, typeList2->at(index2).type, newName, isBlock);
}
while (index2 < typeList2->size())
{
// TypeList2 has more members
if (typeList2->at(index2).type->getBasicType() != EbtVoid) {
std::string errorStr = name + ": struct mismatch.";
infoSink.info.message(EPrefixError, errorStr.c_str());
hasError = true;
break;
}
++index2;
}
}
return hasError;
}
};
struct TSlotCollector {
@ -500,7 +811,7 @@ int TDefaultIoResolverBase::resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent
int TDefaultIoResolverBase::resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) {
const TType& type = ent.symbol->getType();
const char* name = ent.symbol->getName().c_str();
const char* name = ent.symbol->getAccessName().c_str();
// kick out of not doing this
if (! doAutoLocationMapping()) {
return ent.newLocation = -1;
@ -609,7 +920,7 @@ TDefaultGlslIoResolver::TDefaultGlslIoResolver(const TIntermediate& intermediate
int TDefaultGlslIoResolver::resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) {
const TType& type = ent.symbol->getType();
const TString& name = getAccessName(ent.symbol);
const TString& name = ent.symbol->getAccessName();
if (currentStage != stage) {
preStage = currentStage;
currentStage = stage;
@ -693,7 +1004,7 @@ int TDefaultGlslIoResolver::resolveInOutLocation(EShLanguage stage, TVarEntryInf
int TDefaultGlslIoResolver::resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) {
const TType& type = ent.symbol->getType();
const TString& name = getAccessName(ent.symbol);
const TString& name = ent.symbol->getAccessName();
// kick out of not doing this
if (! doAutoLocationMapping()) {
return ent.newLocation = -1;
@ -764,7 +1075,7 @@ int TDefaultGlslIoResolver::resolveUniformLocation(EShLanguage /*stage*/, TVarEn
int TDefaultGlslIoResolver::resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) {
const TType& type = ent.symbol->getType();
const TString& name = getAccessName(ent.symbol);
const TString& name = ent.symbol->getAccessName();
// On OpenGL arrays of opaque types take a separate binding for each element
int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1;
TResourceType resource = getResourceType(type);
@ -839,7 +1150,7 @@ void TDefaultGlslIoResolver::endCollect(EShLanguage /*stage*/) {
void TDefaultGlslIoResolver::reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) {
const TType& type = ent.symbol->getType();
const TString& name = getAccessName(ent.symbol);
const TString& name = ent.symbol->getAccessName();
TStorageQualifier storage = type.getQualifier().storage;
EShLanguage stage(EShLangCount);
switch (storage) {
@ -899,7 +1210,7 @@ void TDefaultGlslIoResolver::reserverStorageSlot(TVarEntryInfo& ent, TInfoSink&
void TDefaultGlslIoResolver::reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) {
const TType& type = ent.symbol->getType();
const TString& name = getAccessName(ent.symbol);
const TString& name = ent.symbol->getAccessName();
int resource = getResourceType(type);
if (type.getQualifier().hasBinding()) {
TVarSlotMap& varSlotMap = resourceSlotMap[resource];
@ -922,13 +1233,6 @@ void TDefaultGlslIoResolver::reserverResourceSlot(TVarEntryInfo& ent, TInfoSink&
}
}
const TString& TDefaultGlslIoResolver::getAccessName(const TIntermSymbol* symbol)
{
return symbol->getBasicType() == EbtBlock ?
symbol->getType().getTypeName() :
symbol->getName();
}
//TDefaultGlslIoResolver end
/*
@ -1117,25 +1421,23 @@ bool TIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSi
}
// sort entries by priority. see TVarEntryInfo::TOrderByPriority for info.
std::for_each(inVarMap.begin(), inVarMap.end(),
[&inVector](TVarLivePair p) { inVector.push_back(p); });
for (auto& var : inVarMap) { inVector.push_back(var); }
std::sort(inVector.begin(), inVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool {
return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second);
});
std::for_each(outVarMap.begin(), outVarMap.end(),
[&outVector](TVarLivePair p) { outVector.push_back(p); });
for (auto& var : outVarMap) { outVector.push_back(var); }
std::sort(outVector.begin(), outVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool {
return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second);
});
std::for_each(uniformVarMap.begin(), uniformVarMap.end(),
[&uniformVector](TVarLivePair p) { uniformVector.push_back(p); });
for (auto& var : uniformVarMap) { uniformVector.push_back(var); }
std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool {
return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second);
});
bool hadError = false;
TVarLiveMap* dummyUniformVarMap[EShLangCount] = {};
TNotifyInOutAdaptor inOutNotify(stage, *resolver);
TNotifyUniformAdaptor uniformNotify(stage, *resolver);
TResolverUniformAdaptor uniformResolve(stage, *resolver, infoSink, hadError);
TResolverUniformAdaptor uniformResolve(stage, *resolver, dummyUniformVarMap, infoSink, hadError);
TResolverInOutAdaptor inOutResolve(stage, *resolver, infoSink, hadError);
resolver->beginNotifications(stage);
std::for_each(inVector.begin(), inVector.end(), inOutNotify);
@ -1143,22 +1445,22 @@ bool TIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSi
std::for_each(uniformVector.begin(), uniformVector.end(), uniformNotify);
resolver->endNotifications(stage);
resolver->beginResolve(stage);
std::for_each(inVector.begin(), inVector.end(), inOutResolve);
for (auto& var : inVector) { inOutResolve(var); }
std::for_each(inVector.begin(), inVector.end(), [&inVarMap](TVarLivePair p) {
auto at = inVarMap.find(p.second.symbol->getName());
if (at != inVarMap.end())
auto at = inVarMap.find(p.second.symbol->getAccessName());
if (at != inVarMap.end() && p.second.id == at->second.id)
at->second = p.second;
});
std::for_each(outVector.begin(), outVector.end(), inOutResolve);
for (auto& var : outVector) { inOutResolve(var); }
std::for_each(outVector.begin(), outVector.end(), [&outVarMap](TVarLivePair p) {
auto at = outVarMap.find(p.second.symbol->getName());
if (at != outVarMap.end())
auto at = outVarMap.find(p.second.symbol->getAccessName());
if (at != outVarMap.end() && p.second.id == at->second.id)
at->second = p.second;
});
std::for_each(uniformVector.begin(), uniformVector.end(), uniformResolve);
std::for_each(uniformVector.begin(), uniformVector.end(), [&uniformVarMap](TVarLivePair p) {
auto at = uniformVarMap.find(p.second.symbol->getName());
if (at != uniformVarMap.end())
auto at = uniformVarMap.find(p.second.symbol->getAccessName());
if (at != uniformVarMap.end() && p.second.id == at->second.id)
at->second = p.second;
});
resolver->endResolve(stage);
@ -1174,9 +1476,14 @@ bool TIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSi
//
// Returns false if the input is too malformed to do this.
bool TGlslIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSink& infoSink, TIoMapResolver* resolver) {
bool somethingToDo = !intermediate.getResourceSetBinding().empty() ||
intermediate.getAutoMapBindings() ||
intermediate.getAutoMapLocations();
// Profile and version are use for symbol validate.
profile = intermediate.getProfile();
version = intermediate.getVersion();
bool somethingToDo = ! intermediate.getResourceSetBinding().empty() || intermediate.getAutoMapBindings() ||
intermediate.getAutoMapLocations();
// Restrict the stricter condition to further check 'somethingToDo' only if 'somethingToDo' has not been set, reduce
// unnecessary or insignificant for-loop operation after 'somethingToDo' have been true.
for (int res = 0; (res < EResCount && !somethingToDo); ++res) {
@ -1236,31 +1543,30 @@ bool TGlslIoMapper::doMap(TIoMapResolver* resolver, TInfoSink& infoSink) {
resolver->endResolve(EShLangCount);
if (!hadError) {
//Resolve uniform location, ubo/ssbo/opaque bindings across stages
TResolverUniformAdaptor uniformResolve(EShLangCount, *resolver, infoSink, hadError);
TResolverUniformAdaptor uniformResolve(EShLangCount, *resolver, uniformVarMap, infoSink, hadError);
TResolverInOutAdaptor inOutResolve(EShLangCount, *resolver, infoSink, hadError);
TSymbolValidater symbolValidater(*resolver, infoSink, inVarMaps, outVarMaps, uniformVarMap, hadError);
TSymbolValidater symbolValidater(*resolver, infoSink, inVarMaps,
outVarMaps, uniformVarMap, hadError, profile, version);
TVarLiveVector uniformVector;
resolver->beginResolve(EShLangCount);
for (int stage = EShLangVertex; stage < EShLangCount; stage++) {
if (inVarMaps[stage] != nullptr) {
inOutResolve.setStage(EShLanguage(stage));
std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), symbolValidater);
std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), inOutResolve);
std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), symbolValidater);
std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), inOutResolve);
for (auto& var : *(inVarMaps[stage])) { symbolValidater(var); }
for (auto& var : *(inVarMaps[stage])) { inOutResolve(var); }
for (auto& var : *(outVarMaps[stage])) { symbolValidater(var); }
for (auto& var : *(outVarMaps[stage])) { inOutResolve(var); }
}
if (uniformVarMap[stage] != nullptr) {
uniformResolve.setStage(EShLanguage(stage));
// sort entries by priority. see TVarEntryInfo::TOrderByPriority for info.
std::for_each(uniformVarMap[stage]->begin(), uniformVarMap[stage]->end(),
[&uniformVector](TVarLivePair p) { uniformVector.push_back(p); });
for (auto& var : *(uniformVarMap[stage])) { uniformVector.push_back(var); }
}
}
std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool {
return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second);
});
std::for_each(uniformVector.begin(), uniformVector.end(), symbolValidater);
std::for_each(uniformVector.begin(), uniformVector.end(), uniformResolve);
for (auto& var : uniformVector) { symbolValidater(var); }
for (auto& var : uniformVector) { uniformResolve(var); }
std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool {
return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second);
});
@ -1269,14 +1575,18 @@ bool TGlslIoMapper::doMap(TIoMapResolver* resolver, TInfoSink& infoSink) {
if (intermediates[stage] != nullptr) {
// traverse each stage, set new location to each input/output and unifom symbol, set new binding to
// ubo, ssbo and opaque symbols
TVarLiveMap** pUniformVarMap = uniformVarMap;
TVarLiveMap** pUniformVarMap = uniformResolve.uniformVarMap;
std::for_each(uniformVector.begin(), uniformVector.end(), [pUniformVarMap, stage](TVarLivePair p) {
auto at = pUniformVarMap[stage]->find(p.second.symbol->getName());
if (at != pUniformVarMap[stage]->end())
auto at = pUniformVarMap[stage]->find(p.second.symbol->getAccessName());
if (at != pUniformVarMap[stage]->end() && at->second.id == p.second.id){
int resolvedBinding = at->second.newBinding;
at->second = p.second;
if (resolvedBinding > 0)
at->second.newBinding = resolvedBinding;
}
});
TVarSetTraverser iter_iomap(*intermediates[stage], *inVarMaps[stage], *outVarMaps[stage],
*uniformVarMap[stage]);
*uniformResolve.uniformVarMap[stage]);
intermediates[stage]->getTreeRoot()->traverse(&iter_iomap);
}
}

View file

@ -203,7 +203,6 @@ public:
void endCollect(EShLanguage) override;
void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override;
void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override;
const TString& getAccessName(const TIntermSymbol*);
// in/out symbol and uniform symbol are stored in the same resourceSlotMap, the storage key is used to identify each type of symbol.
// We use stage and storage qualifier to construct a storage key. it can help us identify the same storage resource used in different stage.
// if a resource is a program resource and we don't need know it usage stage, we can use same stage to build storage key.
@ -263,10 +262,12 @@ public:
class TGlslIoMapper : public TIoMapper {
public:
TGlslIoMapper() {
memset(inVarMaps, 0, sizeof(TVarLiveMap*) * EShLangCount);
memset(outVarMaps, 0, sizeof(TVarLiveMap*) * EShLangCount);
memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * EShLangCount);
memset(intermediates, 0, sizeof(TIntermediate*) * EShLangCount);
memset(inVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(outVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(intermediates, 0, sizeof(TIntermediate*) * (EShLangCount + 1));
profile = ENoProfile;
version = 0;
}
virtual ~TGlslIoMapper() {
for (size_t stage = 0; stage < EShLangCount; stage++) {
@ -293,6 +294,8 @@ public:
*uniformVarMap[EShLangCount];
TIntermediate* intermediates[EShLangCount];
bool hadError = false;
EProfile profile;
int version;
};
} // end namespace glslang

View file

@ -196,12 +196,14 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
MERGE_TRUE(pointMode);
for (int i = 0; i < 3; ++i) {
if (!localSizeNotDefault[i] && unit.localSizeNotDefault[i]) {
localSize[i] = unit.localSize[i];
localSizeNotDefault[i] = true;
if (unit.localSizeNotDefault[i]) {
if (!localSizeNotDefault[i]) {
localSize[i] = unit.localSize[i];
localSizeNotDefault[i] = true;
}
else if (localSize[i] != unit.localSize[i])
error(infoSink, "Contradictory local size");
}
else if (localSize[i] != unit.localSize[i])
error(infoSink, "Contradictory local size");
if (localSizeSpecId[i] == TQualifier::layoutNotSet)
localSizeSpecId[i] = unit.localSizeSpecId[i];
@ -736,10 +738,10 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
// "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
// implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) {
if (xfbBuffers[b].stride > (unsigned int)(4 * resources->maxTransformFeedbackInterleavedComponents)) {
error(infoSink, "xfb_stride is too large:");
infoSink.info.prefix(EPrefixError);
infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n";
infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources->maxTransformFeedbackInterleavedComponents << "\n";
}
}
@ -1055,8 +1057,8 @@ bool TIntermediate::userOutputUsed() const
return found;
}
// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions
// as the accumulation is done.
// Accumulate locations used for inputs, outputs, and uniforms, payload and callable data
// and check for collisions as the accumulation is done.
//
// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
//
@ -1068,6 +1070,7 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
typeCollision = false;
int set;
int setRT;
if (qualifier.isPipeInput())
set = 0;
else if (qualifier.isPipeOutput())
@ -1076,11 +1079,17 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
set = 2;
else if (qualifier.storage == EvqBuffer)
set = 3;
else if (qualifier.isAnyPayload())
setRT = 0;
else if (qualifier.isAnyCallable())
setRT = 1;
else
return -1;
int size;
if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) {
size = 1;
} else if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) {
if (type.isSizedArray())
size = type.getCumulativeArraySize();
else
@ -1108,10 +1117,17 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
// (A vertex shader input will show using only one location, even for a dvec3/4.)
//
// So, for the case of dvec3, we need two independent ioRanges.
//
// For raytracing IO (payloads and callabledata) each declaration occupies a single
// slot irrespective of type.
int collision = -1; // no collision
#ifndef GLSLANG_WEB
if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
if (qualifier.isAnyPayload() || qualifier.isAnyCallable()) {
TRange range(qualifier.layoutLocation, qualifier.layoutLocation);
collision = checkLocationRT(setRT, qualifier.layoutLocation);
if (collision < 0)
usedIoRT[setRT].push_back(range);
} else if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
(qualifier.isPipeInput() || qualifier.isPipeOutput())) {
// Dealing with dvec3 in/out split across two locations.
// Need two io-ranges.
@ -1187,6 +1203,16 @@ int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TTyp
return -1; // no collision
}
int TIntermediate::checkLocationRT(int set, int location) {
TRange range(location, location);
for (size_t r = 0; r < usedIoRT[set].size(); ++r) {
if (range.overlap(usedIoRT[set][r])) {
return range.start;
}
}
return -1; // no collision
}
// Accumulate bindings and offsets, and check for collisions
// as the accumulation is done.
//

View file

@ -259,6 +259,23 @@ private:
unsigned int features;
};
// MustBeAssigned wraps a T, asserting that it has been assigned with
// operator =() before attempting to read with operator T() or operator ->().
// Used to catch cases where fields are read before they have been assigned.
template<typename T>
class MustBeAssigned
{
public:
MustBeAssigned() = default;
MustBeAssigned(const T& v) : value(v) {}
operator const T&() const { assert(isSet); return value; }
const T* operator ->() const { assert(isSet); return &value; }
MustBeAssigned& operator = (const T& v) { value = v; isSet = true; return *this; }
private:
T value;
bool isSet = false;
};
//
// Set of helper functions to help parse and build the tree.
//
@ -270,6 +287,7 @@ public:
profile(p), version(v),
#endif
treeRoot(0),
resources(TBuiltInResource{}),
numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false),
invertY(false),
useStorageBuffer(false),
@ -398,6 +416,9 @@ public:
EShLanguage getStage() const { return language; }
void addRequestedExtension(const char* extension) { requestedExtensions.insert(extension); }
const std::set<std::string>& getRequestedExtensions() const { return requestedExtensions; }
bool isRayTracingStage() const {
return language >= EShLangRayGen && language <= EShLangCallableNV;
}
void setTreeRoot(TIntermNode* r) { treeRoot = r; }
TIntermNode* getTreeRoot() const { return treeRoot; }
@ -406,6 +427,7 @@ public:
int getNumErrors() const { return numErrors; }
void addPushConstantCount() { ++numPushConstants; }
void setLimits(const TBuiltInResource& r) { resources = r; }
const TBuiltInResource& getLimits() const { return resources; }
bool postProcess(TIntermNode*, EShLanguage);
void removeTree();
@ -512,6 +534,7 @@ public:
// Linkage related
void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&);
void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&);
TIntermAggregate* findLinkerObjects() const;
void setUseStorageBuffer() { useStorageBuffer = true; }
bool usingStorageBuffer() const { return useStorageBuffer; }
@ -847,6 +870,7 @@ public:
int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision);
int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision);
int checkLocationRT(int set, int location);
int addUsedOffsets(int binding, int offset, int numOffsets);
bool addUsedConstantId(int id);
static int computeTypeLocationSize(const TType&, EShLanguage);
@ -922,7 +946,6 @@ protected:
void checkCallGraphCycles(TInfoSink&);
void checkCallGraphBodies(TInfoSink&, bool keepUncalled);
void inOutLocationCheck(TInfoSink&);
TIntermAggregate* findLinkerObjects() const;
bool userOutputUsed() const;
bool isSpecializationOperation(const TIntermOperator&) const;
bool isNonuniformPropagating(TOperator) const;
@ -955,7 +978,7 @@ protected:
SpvVersion spvVersion;
TIntermNode* treeRoot;
std::set<std::string> requestedExtensions; // cumulation of all enabled or required extensions; not connected to what subset of the shader used them
TBuiltInResource resources;
MustBeAssigned<TBuiltInResource> resources;
int numEntryPoints;
int numErrors;
int numPushConstants;
@ -1031,6 +1054,8 @@ protected:
std::unordered_set<int> usedConstantId; // specialization constant ids used
std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters
std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers
std::vector<TRange> usedIoRT[2]; // sets of used location, one for rayPayload/rayPayloadIN and other
// for callableData/callableDataIn
// set of names of statically read/written I/O that might need extra checking
std::set<TString> ioAccessed;
// source code of shader, useful as part of debug information

View file

@ -455,6 +455,7 @@ int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, boo
token = scanToken(ppToken);
}
} else {
token = tokenPaste(token, *ppToken);
token = evalToToken(token, shortCircuit, res, err, ppToken);
return eval(token, precedence, shortCircuit, res, err, ppToken);
}

View file

@ -658,14 +658,17 @@ public:
blocks.back().numMembers = countAggregateMembers(type);
EShLanguageMask& stages = blocks.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
if (updateStageMasks) {
EShLanguageMask& stages = blocks.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
else {
blockIndex = it->second;
EShLanguageMask& stages = blocks[blockIndex].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
if (updateStageMasks) {
EShLanguageMask& stages = blocks[blockIndex].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
}

View file

@ -1,15 +0,0 @@
diff --git a/thirdparty/glslang/glslang/Include/Common.h b/thirdparty/glslang/glslang/Include/Common.h
index 733a790cfd..2c511bc1c5 100644
--- a/thirdparty/glslang/glslang/Include/Common.h
+++ b/thirdparty/glslang/glslang/Include/Common.h
@@ -50,7 +50,9 @@ std::string to_string(const T& val) {
}
#endif
-#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API
+// -- GODOT start --
+#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) /* || defined MINGW_HAS_SECURE_API */
+// -- GODOT end --
#include <basetsd.h>
#ifndef snprintf
#define snprintf sprintf_s

View file

@ -121,6 +121,7 @@ typedef enum {
VK_ICD_WSI_PLATFORM_METAL,
VK_ICD_WSI_PLATFORM_DIRECTFB,
VK_ICD_WSI_PLATFORM_VI,
VK_ICD_WSI_PLATFORM_GGP,
} VkIcdWsiPlatform;
typedef struct {
@ -196,6 +197,13 @@ typedef struct {
} VkIcdSurfaceIOS;
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_GGP
typedef struct {
VkIcdSurfaceBase base;
GgpStreamDescriptor streamDescriptor;
} VkIcdSurfaceGgp;
#endif // VK_USE_PLATFORM_GGP
typedef struct {
VkIcdSurfaceBase base;
VkDisplayModeKHR displayMode;

File diff suppressed because it is too large Load diff

View file

@ -49,409 +49,6 @@ typedef struct VkPhysicalDevicePortabilitySubsetPropertiesKHR {
} VkPhysicalDevicePortabilitySubsetPropertiesKHR;
#define VK_KHR_deferred_host_operations 1
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR)
#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 3
#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations"
typedef struct VkDeferredOperationInfoKHR {
VkStructureType sType;
const void* pNext;
VkDeferredOperationKHR operationHandle;
} VkDeferredOperationInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation);
typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator);
typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation);
typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation);
typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR(
VkDevice device,
const VkAllocationCallbacks* pAllocator,
VkDeferredOperationKHR* pDeferredOperation);
VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR(
VkDevice device,
VkDeferredOperationKHR operation,
const VkAllocationCallbacks* pAllocator);
VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR(
VkDevice device,
VkDeferredOperationKHR operation);
VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR(
VkDevice device,
VkDeferredOperationKHR operation);
VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR(
VkDevice device,
VkDeferredOperationKHR operation);
#endif
#define VK_KHR_pipeline_library 1
#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1
#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library"
typedef struct VkPipelineLibraryCreateInfoKHR {
VkStructureType sType;
const void* pNext;
uint32_t libraryCount;
const VkPipeline* pLibraries;
} VkPipelineLibraryCreateInfoKHR;
#define VK_KHR_ray_tracing 1
#define VK_KHR_RAY_TRACING_SPEC_VERSION 8
#define VK_KHR_RAY_TRACING_EXTENSION_NAME "VK_KHR_ray_tracing"
typedef enum VkAccelerationStructureBuildTypeKHR {
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0,
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1,
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2,
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF
} VkAccelerationStructureBuildTypeKHR;
typedef union VkDeviceOrHostAddressKHR {
VkDeviceAddress deviceAddress;
void* hostAddress;
} VkDeviceOrHostAddressKHR;
typedef union VkDeviceOrHostAddressConstKHR {
VkDeviceAddress deviceAddress;
const void* hostAddress;
} VkDeviceOrHostAddressConstKHR;
typedef struct VkAccelerationStructureBuildOffsetInfoKHR {
uint32_t primitiveCount;
uint32_t primitiveOffset;
uint32_t firstVertex;
uint32_t transformOffset;
} VkAccelerationStructureBuildOffsetInfoKHR;
typedef struct VkRayTracingShaderGroupCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkRayTracingShaderGroupTypeKHR type;
uint32_t generalShader;
uint32_t closestHitShader;
uint32_t anyHitShader;
uint32_t intersectionShader;
const void* pShaderGroupCaptureReplayHandle;
} VkRayTracingShaderGroupCreateInfoKHR;
typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
uint32_t maxPayloadSize;
uint32_t maxAttributeSize;
uint32_t maxCallableSize;
} VkRayTracingPipelineInterfaceCreateInfoKHR;
typedef struct VkRayTracingPipelineCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkPipelineCreateFlags flags;
uint32_t stageCount;
const VkPipelineShaderStageCreateInfo* pStages;
uint32_t groupCount;
const VkRayTracingShaderGroupCreateInfoKHR* pGroups;
uint32_t maxRecursionDepth;
VkPipelineLibraryCreateInfoKHR libraries;
const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface;
VkPipelineLayout layout;
VkPipeline basePipelineHandle;
int32_t basePipelineIndex;
} VkRayTracingPipelineCreateInfoKHR;
typedef struct VkAccelerationStructureGeometryTrianglesDataKHR {
VkStructureType sType;
const void* pNext;
VkFormat vertexFormat;
VkDeviceOrHostAddressConstKHR vertexData;
VkDeviceSize vertexStride;
VkIndexType indexType;
VkDeviceOrHostAddressConstKHR indexData;
VkDeviceOrHostAddressConstKHR transformData;
} VkAccelerationStructureGeometryTrianglesDataKHR;
typedef struct VkAccelerationStructureGeometryAabbsDataKHR {
VkStructureType sType;
const void* pNext;
VkDeviceOrHostAddressConstKHR data;
VkDeviceSize stride;
} VkAccelerationStructureGeometryAabbsDataKHR;
typedef struct VkAccelerationStructureGeometryInstancesDataKHR {
VkStructureType sType;
const void* pNext;
VkBool32 arrayOfPointers;
VkDeviceOrHostAddressConstKHR data;
} VkAccelerationStructureGeometryInstancesDataKHR;
typedef union VkAccelerationStructureGeometryDataKHR {
VkAccelerationStructureGeometryTrianglesDataKHR triangles;
VkAccelerationStructureGeometryAabbsDataKHR aabbs;
VkAccelerationStructureGeometryInstancesDataKHR instances;
} VkAccelerationStructureGeometryDataKHR;
typedef struct VkAccelerationStructureGeometryKHR {
VkStructureType sType;
const void* pNext;
VkGeometryTypeKHR geometryType;
VkAccelerationStructureGeometryDataKHR geometry;
VkGeometryFlagsKHR flags;
} VkAccelerationStructureGeometryKHR;
typedef struct VkAccelerationStructureBuildGeometryInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureTypeKHR type;
VkBuildAccelerationStructureFlagsKHR flags;
VkBool32 update;
VkAccelerationStructureKHR srcAccelerationStructure;
VkAccelerationStructureKHR dstAccelerationStructure;
VkBool32 geometryArrayOfPointers;
uint32_t geometryCount;
const VkAccelerationStructureGeometryKHR* const* ppGeometries;
VkDeviceOrHostAddressKHR scratchData;
} VkAccelerationStructureBuildGeometryInfoKHR;
typedef struct VkAccelerationStructureCreateGeometryTypeInfoKHR {
VkStructureType sType;
const void* pNext;
VkGeometryTypeKHR geometryType;
uint32_t maxPrimitiveCount;
VkIndexType indexType;
uint32_t maxVertexCount;
VkFormat vertexFormat;
VkBool32 allowsTransforms;
} VkAccelerationStructureCreateGeometryTypeInfoKHR;
typedef struct VkAccelerationStructureCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkDeviceSize compactedSize;
VkAccelerationStructureTypeKHR type;
VkBuildAccelerationStructureFlagsKHR flags;
uint32_t maxGeometryCount;
const VkAccelerationStructureCreateGeometryTypeInfoKHR* pGeometryInfos;
VkDeviceAddress deviceAddress;
} VkAccelerationStructureCreateInfoKHR;
typedef struct VkAccelerationStructureMemoryRequirementsInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureMemoryRequirementsTypeKHR type;
VkAccelerationStructureBuildTypeKHR buildType;
VkAccelerationStructureKHR accelerationStructure;
} VkAccelerationStructureMemoryRequirementsInfoKHR;
typedef struct VkPhysicalDeviceRayTracingFeaturesKHR {
VkStructureType sType;
void* pNext;
VkBool32 rayTracing;
VkBool32 rayTracingShaderGroupHandleCaptureReplay;
VkBool32 rayTracingShaderGroupHandleCaptureReplayMixed;
VkBool32 rayTracingAccelerationStructureCaptureReplay;
VkBool32 rayTracingIndirectTraceRays;
VkBool32 rayTracingIndirectAccelerationStructureBuild;
VkBool32 rayTracingHostAccelerationStructureCommands;
VkBool32 rayQuery;
VkBool32 rayTracingPrimitiveCulling;
} VkPhysicalDeviceRayTracingFeaturesKHR;
typedef struct VkPhysicalDeviceRayTracingPropertiesKHR {
VkStructureType sType;
void* pNext;
uint32_t shaderGroupHandleSize;
uint32_t maxRecursionDepth;
uint32_t maxShaderGroupStride;
uint32_t shaderGroupBaseAlignment;
uint64_t maxGeometryCount;
uint64_t maxInstanceCount;
uint64_t maxPrimitiveCount;
uint32_t maxDescriptorSetAccelerationStructures;
uint32_t shaderGroupHandleCaptureReplaySize;
} VkPhysicalDeviceRayTracingPropertiesKHR;
typedef struct VkAccelerationStructureDeviceAddressInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureKHR accelerationStructure;
} VkAccelerationStructureDeviceAddressInfoKHR;
typedef struct VkAccelerationStructureVersionKHR {
VkStructureType sType;
const void* pNext;
const uint8_t* versionData;
} VkAccelerationStructureVersionKHR;
typedef struct VkStridedBufferRegionKHR {
VkBuffer buffer;
VkDeviceSize offset;
VkDeviceSize stride;
VkDeviceSize size;
} VkStridedBufferRegionKHR;
typedef struct VkTraceRaysIndirectCommandKHR {
uint32_t width;
uint32_t height;
uint32_t depth;
} VkTraceRaysIndirectCommandKHR;
typedef struct VkCopyAccelerationStructureToMemoryInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureKHR src;
VkDeviceOrHostAddressKHR dst;
VkCopyAccelerationStructureModeKHR mode;
} VkCopyAccelerationStructureToMemoryInfoKHR;
typedef struct VkCopyMemoryToAccelerationStructureInfoKHR {
VkStructureType sType;
const void* pNext;
VkDeviceOrHostAddressConstKHR src;
VkAccelerationStructureKHR dst;
VkCopyAccelerationStructureModeKHR mode;
} VkCopyMemoryToAccelerationStructureInfoKHR;
typedef struct VkCopyAccelerationStructureInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureKHR src;
VkAccelerationStructureKHR dst;
VkCopyAccelerationStructureModeKHR mode;
} VkCopyAccelerationStructureInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure);
typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsKHR)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, VkMemoryRequirements2* pMemoryRequirements);
typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos);
typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureIndirectKHR)(VkCommandBuffer commandBuffer, const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, VkBuffer indirectBuffer, VkDeviceSize indirectOffset, uint32_t indirectStride);
typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructureKHR)(VkDevice device, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos);
typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, const VkCopyAccelerationStructureInfoKHR* pInfo);
typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);
typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);
typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride);
typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo);
typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);
typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);
typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth);
typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo);
typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData);
typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, VkBuffer buffer, VkDeviceSize offset);
typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionKHR* version);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR(
VkDevice device,
const VkAccelerationStructureCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkAccelerationStructureKHR* pAccelerationStructure);
VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsKHR(
VkDevice device,
const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo,
VkMemoryRequirements2* pMemoryRequirements);
VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureKHR(
VkCommandBuffer commandBuffer,
uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos);
VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureIndirectKHR(
VkCommandBuffer commandBuffer,
const VkAccelerationStructureBuildGeometryInfoKHR* pInfo,
VkBuffer indirectBuffer,
VkDeviceSize indirectOffset,
uint32_t indirectStride);
VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructureKHR(
VkDevice device,
uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos);
VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR(
VkDevice device,
const VkCopyAccelerationStructureInfoKHR* pInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR(
VkDevice device,
const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR(
VkDevice device,
const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR(
VkDevice device,
uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR* pAccelerationStructures,
VkQueryType queryType,
size_t dataSize,
void* pData,
size_t stride);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR(
VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureInfoKHR* pInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR(
VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR(
VkCommandBuffer commandBuffer,
const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR(
VkCommandBuffer commandBuffer,
const VkStridedBufferRegionKHR* pRaygenShaderBindingTable,
const VkStridedBufferRegionKHR* pMissShaderBindingTable,
const VkStridedBufferRegionKHR* pHitShaderBindingTable,
const VkStridedBufferRegionKHR* pCallableShaderBindingTable,
uint32_t width,
uint32_t height,
uint32_t depth);
VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkRayTracingPipelineCreateInfoKHR* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines);
VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR(
VkDevice device,
const VkAccelerationStructureDeviceAddressInfoKHR* pInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(
VkDevice device,
VkPipeline pipeline,
uint32_t firstGroup,
uint32_t groupCount,
size_t dataSize,
void* pData);
VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR(
VkCommandBuffer commandBuffer,
const VkStridedBufferRegionKHR* pRaygenShaderBindingTable,
const VkStridedBufferRegionKHR* pMissShaderBindingTable,
const VkStridedBufferRegionKHR* pHitShaderBindingTable,
const VkStridedBufferRegionKHR* pCallableShaderBindingTable,
VkBuffer buffer,
VkDeviceSize offset);
VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR(
VkDevice device,
const VkAccelerationStructureVersionKHR* version);
#endif
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -253,7 +253,7 @@ void *loader_device_heap_realloc(const struct loader_device *device, void *pMemo
}
// Environment variables
#if defined(__linux__) || defined(__APPLE__)
#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__)
static inline bool IsHighIntegrity() {
return geteuid() != getuid() || getegid() != getgid();
@ -277,6 +277,8 @@ static inline char *loader_secure_getenv(const char *name, const struct loader_i
// This algorithm is derived from glibc code that sets an internal
// variable (__libc_enable_secure) if the process is running under setuid or setgid.
return IsHighIntegrity() ? NULL : loader_getenv(name, inst);
#elif defined(__Fuchsia__)
return loader_getenv(name, inst);
#else
// Linux
#if defined(HAVE_SECURE_GETENV) && !defined(USE_UNSAFE_FILE_SEARCH)
@ -287,13 +289,12 @@ static inline char *loader_secure_getenv(const char *name, const struct loader_i
out = __secure_getenv(name);
#else
out = loader_getenv(name, inst);
#if !defined(USE_UNSAFE_FILE_SEARCH)
loader_log(inst, LOADER_INFO_BIT, 0, "Loader is using non-secure environment variable lookup for %s", name);
#endif
#endif
if (out == NULL) {
loader_log(inst, LOADER_INFO_BIT, 0,
"Loader is running with elevated permissions. Environment variable %s will be ignored.", name);
}
return out;
#endif
}
static inline void loader_free_getenv(char *val, const struct loader_instance *inst) {
@ -355,8 +356,8 @@ static inline char *loader_getenv(const char *name, const struct loader_instance
static inline char *loader_secure_getenv(const char *name, const struct loader_instance *inst) {
#if !defined(USE_UNSAFE_FILE_SEARCH)
if (IsHighIntegrity()) {
loader_log(inst, LOADER_INFO_BIT, 0,
"Loader is running with elevated permissions. Environment variable %s will be ignored.", name);
loader_log(inst, LOADER_INFO_BIT, 0, "Loader is running with elevated permissions. Environment variable %s will be ignored",
name);
return NULL;
}
#endif
@ -2317,7 +2318,11 @@ static VkResult loader_scanned_icd_add(const struct loader_instance *inst, struc
// TODO implement smarter opening/closing of libraries. For now this
// function leaves libraries open and the scanned_icd_clear closes them
#if defined(__Fuchsia__)
handle = loader_platform_open_driver(filename);
#else
handle = loader_platform_open_library(filename);
#endif
if (NULL == handle) {
loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, loader_platform_open_library_error(filename));
goto out;
@ -2647,7 +2652,12 @@ static VkResult loader_get_json(const struct loader_instance *inst, const char *
res = VK_ERROR_INITIALIZATION_FAILED;
goto out;
}
fseek(file, 0, SEEK_END);
// NOTE: We can't just use fseek(file, 0, SEEK_END) because that isn't guaranteed to be supported on all systems
do {
// We're just seeking the end of the file, so this buffer is never used
char buffer[256];
fread(buffer, 1, sizeof(buffer), file);
} while (!feof(file));
len = ftell(file);
fseek(file, 0, SEEK_SET);
json_buf = (char *)loader_stack_alloc(len + 1);
@ -3189,30 +3199,31 @@ static VkResult loaderReadLayerJson(const struct loader_instance *inst, struct l
name);
} else {
props->num_blacklist_layers = cJSON_GetArraySize(blacklisted_layers);
// Allocate the blacklist array
props->blacklist_layer_names = loader_instance_heap_alloc(
inst, sizeof(char[MAX_STRING_SIZE]) * props->num_blacklist_layers, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (props->blacklist_layer_names == NULL) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
// Copy the blacklisted layers into the array
for (i = 0; i < (int)props->num_blacklist_layers; ++i) {
cJSON *black_layer = cJSON_GetArrayItem(blacklisted_layers, i);
if (black_layer == NULL) {
continue;
}
temp = cJSON_Print(black_layer);
if (temp == NULL) {
if (props->num_blacklist_layers > 0) {
// Allocate the blacklist array
props->blacklist_layer_names = loader_instance_heap_alloc(
inst, sizeof(char[MAX_STRING_SIZE]) * props->num_blacklist_layers, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (props->blacklist_layer_names == NULL) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
temp[strlen(temp) - 1] = '\0';
strncpy(props->blacklist_layer_names[i], temp + 1, MAX_STRING_SIZE - 1);
props->blacklist_layer_names[i][MAX_STRING_SIZE - 1] = '\0';
cJSON_Free(temp);
// Copy the blacklisted layers into the array
for (i = 0; i < (int)props->num_blacklist_layers; ++i) {
cJSON *black_layer = cJSON_GetArrayItem(blacklisted_layers, i);
if (black_layer == NULL) {
continue;
}
temp = cJSON_Print(black_layer);
if (temp == NULL) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
temp[strlen(temp) - 1] = '\0';
strncpy(props->blacklist_layer_names[i], temp + 1, MAX_STRING_SIZE - 1);
props->blacklist_layer_names[i][MAX_STRING_SIZE - 1] = '\0';
cJSON_Free(temp);
}
}
}
}
@ -3226,28 +3237,29 @@ static VkResult loaderReadLayerJson(const struct loader_instance *inst, struct l
}
int count = cJSON_GetArraySize(override_paths);
props->num_override_paths = count;
if (count > 0) {
// Allocate buffer for override paths
props->override_paths =
loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == props->override_paths) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
// Allocate buffer for override paths
props->override_paths =
loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == props->override_paths) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
// Copy the override paths into the array
for (i = 0; i < count; i++) {
cJSON *override_path = cJSON_GetArrayItem(override_paths, i);
if (NULL != override_path) {
temp = cJSON_Print(override_path);
if (NULL == temp) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
// Copy the override paths into the array
for (i = 0; i < count; i++) {
cJSON *override_path = cJSON_GetArrayItem(override_paths, i);
if (NULL != override_path) {
temp = cJSON_Print(override_path);
if (NULL == temp) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
temp[strlen(temp) - 1] = '\0';
strncpy(props->override_paths[i], temp + 1, MAX_STRING_SIZE - 1);
props->override_paths[i][MAX_STRING_SIZE - 1] = '\0';
cJSON_Free(temp);
}
temp[strlen(temp) - 1] = '\0';
strncpy(props->override_paths[i], temp + 1, MAX_STRING_SIZE - 1);
props->override_paths[i][MAX_STRING_SIZE - 1] = '\0';
cJSON_Free(temp);
}
}
}
@ -3953,12 +3965,14 @@ static VkResult ReadDataFilesInSearchPaths(const struct loader_instance *inst, e
if (xdgdatadirs == NULL) {
xdgdata_alloc = false;
}
#if !defined(__Fuchsia__)
if (xdgconfdirs == NULL || xdgconfdirs[0] == '\0') {
xdgconfdirs = FALLBACK_CONFIG_DIRS;
}
if (xdgdatadirs == NULL || xdgdatadirs[0] == '\0') {
xdgdatadirs = FALLBACK_DATA_DIRS;
}
#endif
// Only use HOME if XDG_DATA_HOME is not present on the system
if (NULL == xdgdatahome) {
@ -4306,7 +4320,6 @@ out:
static VkResult ReadDataFilesInRegistry(const struct loader_instance *inst, enum loader_data_files_type data_file_type,
bool warn_if_not_present, char *registry_location, struct loader_data_files *out_files) {
VkResult vk_result = VK_SUCCESS;
bool is_icd = (data_file_type == LOADER_DATA_FILE_MANIFEST_ICD);
char *search_path = NULL;
// These calls look at the PNP/Device section of the registry.
@ -6044,7 +6057,7 @@ VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, c
VkLoaderFeatureFlags feature_flags = 0;
#if defined(_WIN32)
IDXGIFactory6* dxgi_factory = NULL;
HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, &dxgi_factory);
HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, (void **)&dxgi_factory);
if (hres == S_OK) {
feature_flags |= VK_LOADER_FEATURE_PHYSICAL_DEVICE_SORTING;
dxgi_factory->lpVtbl->Release(dxgi_factory);
@ -7073,7 +7086,7 @@ VkResult ReadSortedPhysicalDevices(struct loader_instance *inst, struct LoaderSo
uint32_t sorted_alloc = 0;
struct loader_icd_term *icd_term = NULL;
IDXGIFactory6* dxgi_factory = NULL;
HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, &dxgi_factory);
HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, (void **)&dxgi_factory);
if (hres != S_OK) {
loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Failed to create DXGI factory 6. Physical devices will not be sorted");
}
@ -7090,7 +7103,7 @@ VkResult ReadSortedPhysicalDevices(struct loader_instance *inst, struct LoaderSo
*sorted_count = 0;
for (uint32_t i = 0; ; ++i) {
IDXGIAdapter1* adapter;
hres = dxgi_factory->lpVtbl->EnumAdapterByGpuPreference(dxgi_factory, i, DXGI_GPU_PREFERENCE_UNSPECIFIED, &IID_IDXGIAdapter1, &adapter);
hres = dxgi_factory->lpVtbl->EnumAdapterByGpuPreference(dxgi_factory, i, DXGI_GPU_PREFERENCE_UNSPECIFIED, &IID_IDXGIAdapter1, (void **)&adapter);
if (hres == DXGI_ERROR_NOT_FOUND) {
break; // No more adapters
}

View file

@ -337,10 +337,16 @@ struct loader_instance {
#endif
#ifdef VK_USE_PLATFORM_IOS_MVK
bool wsi_ios_surface_enabled;
#endif
#ifdef VK_USE_PLATFORM_GGP
bool wsi_ggp_surface_enabled;
#endif
bool wsi_headless_surface_enabled;
#if defined(VK_USE_PLATFORM_METAL_EXT)
bool wsi_metal_surface_enabled;
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
bool wsi_imagepipe_surface_enabled;
#endif
bool wsi_display_enabled;
bool wsi_display_props2_enabled;

View file

@ -91,24 +91,15 @@ static VKAPI_ATTR void VKAPI_CALL StubCmdDrawIndexedIndirectCountKHR(VkCommandBu
static VKAPI_ATTR VkResult VKAPI_CALL StubGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t* pValue) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo) { return VK_SUCCESS; };
static VKAPI_ATTR void VKAPI_CALL StubCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { };
static VKAPI_ATTR VkDeviceAddress VKAPI_CALL StubGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo* pInfo) { return 0L; };
static VKAPI_ATTR uint64_t VKAPI_CALL StubGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo* pInfo) { return 0L; };
static VKAPI_ATTR uint64_t VKAPI_CALL StubGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo) { return 0L; };
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubCreateDeferredOperationKHR(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubDestroyDeferredOperationKHR(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR uint32_t VKAPI_CALL StubGetDeferredOperationMaxConcurrencyKHR(VkDevice device, VkDeferredOperationKHR operation) { return 0; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubGetDeferredOperationResultKHR(VkDevice device, VkDeferredOperationKHR operation) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubDeferredOperationJoinKHR(VkDevice device, VkDeferredOperationKHR operation) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutableStatisticsKHR(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutableInternalRepresentationsKHR(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations) { return VK_SUCCESS; };
@ -164,20 +155,17 @@ static VKAPI_ATTR void VKAPI_CALL StubCmdBindShadingRateImageNV(VkCommandBuffer
static VKAPI_ATTR void VKAPI_CALL StubCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes) { };
static VKAPI_ATTR void VKAPI_CALL StubCmdSetCoarseSampleOrderNV(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubCreateAccelerationStructureNV(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure) { return VK_SUCCESS; };
static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator) { };
static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator) { };
static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator) { };
static VKAPI_ATTR void VKAPI_CALL StubGetAccelerationStructureMemoryRequirementsNV(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubBindAccelerationStructureMemoryKHR(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR* pBindInfos) { return VK_SUCCESS; };
static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkBuffer scratch, VkDeviceSize scratchOffset) { };
static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureKHR dst, VkAccelerationStructureKHR src, VkCopyAccelerationStructureModeKHR mode) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) { return VK_SUCCESS; };
static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) { };
static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode) { };
static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysNV(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingShaderGroupHandlesNV(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureKHR accelerationStructure, size_t dataSize, void* pData) { return VK_SUCCESS; };
static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { };
static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData) { return VK_SUCCESS; };
static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubCompileDeferredNV(VkDevice device, VkPipeline pipeline, uint32_t shader) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryHostPointerPropertiesEXT(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties) { return VK_SUCCESS; };
static VKAPI_ATTR void VKAPI_CALL StubCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) { };
@ -232,60 +220,29 @@ static VKAPI_ATTR VkResult VKAPI_CALL StubCreatePrivateDataSlotEXT(VkDevice devi
static VKAPI_ATTR void VKAPI_CALL StubDestroyPrivateDataSlotEXT(VkDevice device, VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubSetPrivateDataEXT(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data) { return VK_SUCCESS; };
static VKAPI_ATTR void VKAPI_CALL StubGetPrivateDataEXT(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData) { };
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdSetFragmentShadingRateEnumNV(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubCreateAccelerationStructureKHR(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubGetAccelerationStructureMemoryRequirementsKHR(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, VkMemoryRequirements2* pMemoryRequirements) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureIndirectKHR(VkCommandBuffer commandBuffer, const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, VkBuffer indirectBuffer, VkDeviceSize indirectOffset, uint32_t indirectStride) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubBuildAccelerationStructureKHR(VkDevice device, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureKHR(VkDevice device, const VkCopyAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureToMemoryKHR(VkDevice device, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubCopyMemoryToAccelerationStructureKHR(VkDevice device, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator) { };
static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { };
static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubBuildAccelerationStructuresKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureToMemoryKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubCopyMemoryToAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureToMemoryKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdCopyMemoryToAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRayTracingPipelinesKHR(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkDeviceAddress VKAPI_CALL StubGetAccelerationStructureDeviceAddressKHR(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo) { return 0L; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { };
static VKAPI_ATTR void VKAPI_CALL StubGetDeviceAccelerationStructureCompatibilityKHR(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility) { };
static VKAPI_ATTR void VKAPI_CALL StubGetAccelerationStructureBuildSizesKHR(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) { };
static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) { };
static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return VK_SUCCESS; };
static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, VkBuffer buffer, VkDeviceSize offset) { };
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR VkResult VKAPI_CALL StubGetDeviceAccelerationStructureCompatibilityKHR(VkDevice device, const VkAccelerationStructureVersionKHR* version) { return VK_SUCCESS; };
#endif // VK_ENABLE_BETA_EXTENSIONS
static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress) { };
static VKAPI_ATTR VkDeviceSize VKAPI_CALL StubGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader) { return 0L; };
static VKAPI_ATTR void VKAPI_CALL StubCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize) { };
@ -554,32 +511,24 @@ static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDisp
if (table->WaitSemaphoresKHR == nullptr) { table->WaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)StubWaitSemaphoresKHR; }
table->SignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR) gpa(device, "vkSignalSemaphoreKHR");
if (table->SignalSemaphoreKHR == nullptr) { table->SignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)StubSignalSemaphoreKHR; }
table->CmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR) gpa(device, "vkCmdSetFragmentShadingRateKHR");
if (table->CmdSetFragmentShadingRateKHR == nullptr) { table->CmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)StubCmdSetFragmentShadingRateKHR; }
table->GetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR) gpa(device, "vkGetBufferDeviceAddressKHR");
if (table->GetBufferDeviceAddressKHR == nullptr) { table->GetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)StubGetBufferDeviceAddressKHR; }
table->GetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR) gpa(device, "vkGetBufferOpaqueCaptureAddressKHR");
if (table->GetBufferOpaqueCaptureAddressKHR == nullptr) { table->GetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)StubGetBufferOpaqueCaptureAddressKHR; }
table->GetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR) gpa(device, "vkGetDeviceMemoryOpaqueCaptureAddressKHR");
if (table->GetDeviceMemoryOpaqueCaptureAddressKHR == nullptr) { table->GetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)StubGetDeviceMemoryOpaqueCaptureAddressKHR; }
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR) gpa(device, "vkCreateDeferredOperationKHR");
if (table->CreateDeferredOperationKHR == nullptr) { table->CreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)StubCreateDeferredOperationKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->DestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR) gpa(device, "vkDestroyDeferredOperationKHR");
if (table->DestroyDeferredOperationKHR == nullptr) { table->DestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)StubDestroyDeferredOperationKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->GetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR) gpa(device, "vkGetDeferredOperationMaxConcurrencyKHR");
if (table->GetDeferredOperationMaxConcurrencyKHR == nullptr) { table->GetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)StubGetDeferredOperationMaxConcurrencyKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->GetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR) gpa(device, "vkGetDeferredOperationResultKHR");
if (table->GetDeferredOperationResultKHR == nullptr) { table->GetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)StubGetDeferredOperationResultKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->DeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR) gpa(device, "vkDeferredOperationJoinKHR");
if (table->DeferredOperationJoinKHR == nullptr) { table->DeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)StubDeferredOperationJoinKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
table->GetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR) gpa(device, "vkGetPipelineExecutablePropertiesKHR");
if (table->GetPipelineExecutablePropertiesKHR == nullptr) { table->GetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)StubGetPipelineExecutablePropertiesKHR; }
table->GetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR) gpa(device, "vkGetPipelineExecutableStatisticsKHR");
@ -692,14 +641,10 @@ static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDisp
if (table->CmdSetCoarseSampleOrderNV == nullptr) { table->CmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)StubCmdSetCoarseSampleOrderNV; }
table->CreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV) gpa(device, "vkCreateAccelerationStructureNV");
if (table->CreateAccelerationStructureNV == nullptr) { table->CreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)StubCreateAccelerationStructureNV; }
table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR) gpa(device, "vkDestroyAccelerationStructureKHR");
if (table->DestroyAccelerationStructureKHR == nullptr) { table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)StubDestroyAccelerationStructureKHR; }
table->DestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV) gpa(device, "vkDestroyAccelerationStructureNV");
if (table->DestroyAccelerationStructureNV == nullptr) { table->DestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)StubDestroyAccelerationStructureNV; }
table->GetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV) gpa(device, "vkGetAccelerationStructureMemoryRequirementsNV");
if (table->GetAccelerationStructureMemoryRequirementsNV == nullptr) { table->GetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)StubGetAccelerationStructureMemoryRequirementsNV; }
table->BindAccelerationStructureMemoryKHR = (PFN_vkBindAccelerationStructureMemoryKHR) gpa(device, "vkBindAccelerationStructureMemoryKHR");
if (table->BindAccelerationStructureMemoryKHR == nullptr) { table->BindAccelerationStructureMemoryKHR = (PFN_vkBindAccelerationStructureMemoryKHR)StubBindAccelerationStructureMemoryKHR; }
table->BindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV) gpa(device, "vkBindAccelerationStructureMemoryNV");
if (table->BindAccelerationStructureMemoryNV == nullptr) { table->BindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)StubBindAccelerationStructureMemoryNV; }
table->CmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV) gpa(device, "vkCmdBuildAccelerationStructureNV");
@ -716,8 +661,6 @@ static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDisp
if (table->GetRayTracingShaderGroupHandlesNV == nullptr) { table->GetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)StubGetRayTracingShaderGroupHandlesNV; }
table->GetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV) gpa(device, "vkGetAccelerationStructureHandleNV");
if (table->GetAccelerationStructureHandleNV == nullptr) { table->GetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)StubGetAccelerationStructureHandleNV; }
table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR) gpa(device, "vkCmdWriteAccelerationStructuresPropertiesKHR");
if (table->CmdWriteAccelerationStructuresPropertiesKHR == nullptr) { table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)StubCmdWriteAccelerationStructuresPropertiesKHR; }
table->CmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV) gpa(device, "vkCmdWriteAccelerationStructuresPropertiesNV");
if (table->CmdWriteAccelerationStructuresPropertiesNV == nullptr) { table->CmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)StubCmdWriteAccelerationStructuresPropertiesNV; }
table->CompileDeferredNV = (PFN_vkCompileDeferredNV) gpa(device, "vkCompileDeferredNV");
@ -822,78 +765,52 @@ static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDisp
if (table->SetPrivateDataEXT == nullptr) { table->SetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)StubSetPrivateDataEXT; }
table->GetPrivateDataEXT = (PFN_vkGetPrivateDataEXT) gpa(device, "vkGetPrivateDataEXT");
if (table->GetPrivateDataEXT == nullptr) { table->GetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)StubGetPrivateDataEXT; }
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV) gpa(device, "vkCmdSetFragmentShadingRateEnumNV");
if (table->CmdSetFragmentShadingRateEnumNV == nullptr) { table->CmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)StubCmdSetFragmentShadingRateEnumNV; }
table->CreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR) gpa(device, "vkCreateAccelerationStructureKHR");
if (table->CreateAccelerationStructureKHR == nullptr) { table->CreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)StubCreateAccelerationStructureKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->GetAccelerationStructureMemoryRequirementsKHR = (PFN_vkGetAccelerationStructureMemoryRequirementsKHR) gpa(device, "vkGetAccelerationStructureMemoryRequirementsKHR");
if (table->GetAccelerationStructureMemoryRequirementsKHR == nullptr) { table->GetAccelerationStructureMemoryRequirementsKHR = (PFN_vkGetAccelerationStructureMemoryRequirementsKHR)StubGetAccelerationStructureMemoryRequirementsKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdBuildAccelerationStructureKHR = (PFN_vkCmdBuildAccelerationStructureKHR) gpa(device, "vkCmdBuildAccelerationStructureKHR");
if (table->CmdBuildAccelerationStructureKHR == nullptr) { table->CmdBuildAccelerationStructureKHR = (PFN_vkCmdBuildAccelerationStructureKHR)StubCmdBuildAccelerationStructureKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdBuildAccelerationStructureIndirectKHR = (PFN_vkCmdBuildAccelerationStructureIndirectKHR) gpa(device, "vkCmdBuildAccelerationStructureIndirectKHR");
if (table->CmdBuildAccelerationStructureIndirectKHR == nullptr) { table->CmdBuildAccelerationStructureIndirectKHR = (PFN_vkCmdBuildAccelerationStructureIndirectKHR)StubCmdBuildAccelerationStructureIndirectKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->BuildAccelerationStructureKHR = (PFN_vkBuildAccelerationStructureKHR) gpa(device, "vkBuildAccelerationStructureKHR");
if (table->BuildAccelerationStructureKHR == nullptr) { table->BuildAccelerationStructureKHR = (PFN_vkBuildAccelerationStructureKHR)StubBuildAccelerationStructureKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR) gpa(device, "vkDestroyAccelerationStructureKHR");
if (table->DestroyAccelerationStructureKHR == nullptr) { table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)StubDestroyAccelerationStructureKHR; }
table->CmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR) gpa(device, "vkCmdBuildAccelerationStructuresKHR");
if (table->CmdBuildAccelerationStructuresKHR == nullptr) { table->CmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)StubCmdBuildAccelerationStructuresKHR; }
table->CmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR) gpa(device, "vkCmdBuildAccelerationStructuresIndirectKHR");
if (table->CmdBuildAccelerationStructuresIndirectKHR == nullptr) { table->CmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)StubCmdBuildAccelerationStructuresIndirectKHR; }
table->BuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR) gpa(device, "vkBuildAccelerationStructuresKHR");
if (table->BuildAccelerationStructuresKHR == nullptr) { table->BuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)StubBuildAccelerationStructuresKHR; }
table->CopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR) gpa(device, "vkCopyAccelerationStructureKHR");
if (table->CopyAccelerationStructureKHR == nullptr) { table->CopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)StubCopyAccelerationStructureKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR) gpa(device, "vkCopyAccelerationStructureToMemoryKHR");
if (table->CopyAccelerationStructureToMemoryKHR == nullptr) { table->CopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)StubCopyAccelerationStructureToMemoryKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR) gpa(device, "vkCopyMemoryToAccelerationStructureKHR");
if (table->CopyMemoryToAccelerationStructureKHR == nullptr) { table->CopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)StubCopyMemoryToAccelerationStructureKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->WriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR) gpa(device, "vkWriteAccelerationStructuresPropertiesKHR");
if (table->WriteAccelerationStructuresPropertiesKHR == nullptr) { table->WriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)StubWriteAccelerationStructuresPropertiesKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR) gpa(device, "vkCmdCopyAccelerationStructureKHR");
if (table->CmdCopyAccelerationStructureKHR == nullptr) { table->CmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)StubCmdCopyAccelerationStructureKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR) gpa(device, "vkCmdCopyAccelerationStructureToMemoryKHR");
if (table->CmdCopyAccelerationStructureToMemoryKHR == nullptr) { table->CmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)StubCmdCopyAccelerationStructureToMemoryKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR) gpa(device, "vkCmdCopyMemoryToAccelerationStructureKHR");
if (table->CmdCopyMemoryToAccelerationStructureKHR == nullptr) { table->CmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)StubCmdCopyMemoryToAccelerationStructureKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR) gpa(device, "vkCmdTraceRaysKHR");
if (table->CmdTraceRaysKHR == nullptr) { table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)StubCmdTraceRaysKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR) gpa(device, "vkCreateRayTracingPipelinesKHR");
if (table->CreateRayTracingPipelinesKHR == nullptr) { table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)StubCreateRayTracingPipelinesKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR) gpa(device, "vkGetAccelerationStructureDeviceAddressKHR");
if (table->GetAccelerationStructureDeviceAddressKHR == nullptr) { table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)StubGetAccelerationStructureDeviceAddressKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR) gpa(device, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR");
if (table->GetRayTracingCaptureReplayShaderGroupHandlesKHR == nullptr) { table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)StubGetRayTracingCaptureReplayShaderGroupHandlesKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR) gpa(device, "vkCmdTraceRaysIndirectKHR");
if (table->CmdTraceRaysIndirectKHR == nullptr) { table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)StubCmdTraceRaysIndirectKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR) gpa(device, "vkCmdWriteAccelerationStructuresPropertiesKHR");
if (table->CmdWriteAccelerationStructuresPropertiesKHR == nullptr) { table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)StubCmdWriteAccelerationStructuresPropertiesKHR; }
table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR) gpa(device, "vkGetDeviceAccelerationStructureCompatibilityKHR");
if (table->GetDeviceAccelerationStructureCompatibilityKHR == nullptr) { table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)StubGetDeviceAccelerationStructureCompatibilityKHR; }
#endif // VK_ENABLE_BETA_EXTENSIONS
table->GetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR) gpa(device, "vkGetAccelerationStructureBuildSizesKHR");
if (table->GetAccelerationStructureBuildSizesKHR == nullptr) { table->GetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)StubGetAccelerationStructureBuildSizesKHR; }
table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR) gpa(device, "vkCmdTraceRaysKHR");
if (table->CmdTraceRaysKHR == nullptr) { table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)StubCmdTraceRaysKHR; }
table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR) gpa(device, "vkCreateRayTracingPipelinesKHR");
if (table->CreateRayTracingPipelinesKHR == nullptr) { table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)StubCreateRayTracingPipelinesKHR; }
table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR) gpa(device, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR");
if (table->GetRayTracingCaptureReplayShaderGroupHandlesKHR == nullptr) { table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)StubGetRayTracingCaptureReplayShaderGroupHandlesKHR; }
table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR) gpa(device, "vkCmdTraceRaysIndirectKHR");
if (table->CmdTraceRaysIndirectKHR == nullptr) { table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)StubCmdTraceRaysIndirectKHR; }
table->GetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR) gpa(device, "vkGetRayTracingShaderGroupStackSizeKHR");
if (table->GetRayTracingShaderGroupStackSizeKHR == nullptr) { table->GetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)StubGetRayTracingShaderGroupStackSizeKHR; }
table->CmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR) gpa(device, "vkCmdSetRayTracingPipelineStackSizeKHR");
if (table->CmdSetRayTracingPipelineStackSizeKHR == nullptr) { table->CmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)StubCmdSetRayTracingPipelineStackSizeKHR; }
}
@ -982,6 +899,7 @@ static inline void layer_init_instance_dispatch_table(VkInstance instance, VkLay
table->GetPhysicalDeviceDisplayPlaneProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR) gpa(instance, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR");
table->GetDisplayModeProperties2KHR = (PFN_vkGetDisplayModeProperties2KHR) gpa(instance, "vkGetDisplayModeProperties2KHR");
table->GetDisplayPlaneCapabilities2KHR = (PFN_vkGetDisplayPlaneCapabilities2KHR) gpa(instance, "vkGetDisplayPlaneCapabilities2KHR");
table->GetPhysicalDeviceFragmentShadingRatesKHR = (PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR) gpa(instance, "vkGetPhysicalDeviceFragmentShadingRatesKHR");
table->CreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT) gpa(instance, "vkCreateDebugReportCallbackEXT");
table->DestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT) gpa(instance, "vkDestroyDebugReportCallbackEXT");
table->DebugReportMessageEXT = (PFN_vkDebugReportMessageEXT) gpa(instance, "vkDebugReportMessageEXT");

View file

@ -154,6 +154,9 @@ typedef struct VkLayerInstanceDispatchTable_ {
PFN_vkGetDisplayModeProperties2KHR GetDisplayModeProperties2KHR;
PFN_vkGetDisplayPlaneCapabilities2KHR GetDisplayPlaneCapabilities2KHR;
// ---- VK_KHR_fragment_shading_rate extension commands
PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR GetPhysicalDeviceFragmentShadingRatesKHR;
// ---- VK_EXT_debug_report extension commands
PFN_vkCreateDebugReportCallbackEXT CreateDebugReportCallbackEXT;
PFN_vkDestroyDebugReportCallbackEXT DestroyDebugReportCallbackEXT;
@ -506,27 +509,20 @@ typedef struct VkLayerDispatchTable_ {
PFN_vkWaitSemaphoresKHR WaitSemaphoresKHR;
PFN_vkSignalSemaphoreKHR SignalSemaphoreKHR;
// ---- VK_KHR_fragment_shading_rate extension commands
PFN_vkCmdSetFragmentShadingRateKHR CmdSetFragmentShadingRateKHR;
// ---- VK_KHR_buffer_device_address extension commands
PFN_vkGetBufferDeviceAddressKHR GetBufferDeviceAddressKHR;
PFN_vkGetBufferOpaqueCaptureAddressKHR GetBufferOpaqueCaptureAddressKHR;
PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR GetDeviceMemoryOpaqueCaptureAddressKHR;
// ---- VK_KHR_deferred_host_operations extension commands
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCreateDeferredOperationKHR CreateDeferredOperationKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkDestroyDeferredOperationKHR DestroyDeferredOperationKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkGetDeferredOperationMaxConcurrencyKHR GetDeferredOperationMaxConcurrencyKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkGetDeferredOperationResultKHR GetDeferredOperationResultKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkDeferredOperationJoinKHR DeferredOperationJoinKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
// ---- VK_KHR_pipeline_executable_properties extension commands
PFN_vkGetPipelineExecutablePropertiesKHR GetPipelineExecutablePropertiesKHR;
@ -632,10 +628,8 @@ typedef struct VkLayerDispatchTable_ {
// ---- VK_NV_ray_tracing extension commands
PFN_vkCreateAccelerationStructureNV CreateAccelerationStructureNV;
PFN_vkDestroyAccelerationStructureKHR DestroyAccelerationStructureKHR;
PFN_vkDestroyAccelerationStructureNV DestroyAccelerationStructureNV;
PFN_vkGetAccelerationStructureMemoryRequirementsNV GetAccelerationStructureMemoryRequirementsNV;
PFN_vkBindAccelerationStructureMemoryKHR BindAccelerationStructureMemoryKHR;
PFN_vkBindAccelerationStructureMemoryNV BindAccelerationStructureMemoryNV;
PFN_vkCmdBuildAccelerationStructureNV CmdBuildAccelerationStructureNV;
PFN_vkCmdCopyAccelerationStructureNV CmdCopyAccelerationStructureNV;
@ -644,7 +638,6 @@ typedef struct VkLayerDispatchTable_ {
PFN_vkGetRayTracingShaderGroupHandlesKHR GetRayTracingShaderGroupHandlesKHR;
PFN_vkGetRayTracingShaderGroupHandlesNV GetRayTracingShaderGroupHandlesNV;
PFN_vkGetAccelerationStructureHandleNV GetAccelerationStructureHandleNV;
PFN_vkCmdWriteAccelerationStructuresPropertiesKHR CmdWriteAccelerationStructuresPropertiesKHR;
PFN_vkCmdWriteAccelerationStructuresPropertiesNV CmdWriteAccelerationStructuresPropertiesNV;
PFN_vkCompileDeferredNV CompileDeferredNV;
@ -731,61 +724,34 @@ typedef struct VkLayerDispatchTable_ {
PFN_vkSetPrivateDataEXT SetPrivateDataEXT;
PFN_vkGetPrivateDataEXT GetPrivateDataEXT;
// ---- VK_KHR_ray_tracing extension commands
#ifdef VK_ENABLE_BETA_EXTENSIONS
// ---- VK_NV_fragment_shading_rate_enums extension commands
PFN_vkCmdSetFragmentShadingRateEnumNV CmdSetFragmentShadingRateEnumNV;
// ---- VK_KHR_acceleration_structure extension commands
PFN_vkCreateAccelerationStructureKHR CreateAccelerationStructureKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkGetAccelerationStructureMemoryRequirementsKHR GetAccelerationStructureMemoryRequirementsKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCmdBuildAccelerationStructureKHR CmdBuildAccelerationStructureKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCmdBuildAccelerationStructureIndirectKHR CmdBuildAccelerationStructureIndirectKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkBuildAccelerationStructureKHR BuildAccelerationStructureKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkDestroyAccelerationStructureKHR DestroyAccelerationStructureKHR;
PFN_vkCmdBuildAccelerationStructuresKHR CmdBuildAccelerationStructuresKHR;
PFN_vkCmdBuildAccelerationStructuresIndirectKHR CmdBuildAccelerationStructuresIndirectKHR;
PFN_vkBuildAccelerationStructuresKHR BuildAccelerationStructuresKHR;
PFN_vkCopyAccelerationStructureKHR CopyAccelerationStructureKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCopyAccelerationStructureToMemoryKHR CopyAccelerationStructureToMemoryKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCopyMemoryToAccelerationStructureKHR CopyMemoryToAccelerationStructureKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkWriteAccelerationStructuresPropertiesKHR WriteAccelerationStructuresPropertiesKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCmdCopyAccelerationStructureKHR CmdCopyAccelerationStructureKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCmdCopyAccelerationStructureToMemoryKHR CmdCopyAccelerationStructureToMemoryKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCmdCopyMemoryToAccelerationStructureKHR CmdCopyMemoryToAccelerationStructureKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCmdTraceRaysKHR CmdTraceRaysKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCreateRayTracingPipelinesKHR CreateRayTracingPipelinesKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkGetAccelerationStructureDeviceAddressKHR GetAccelerationStructureDeviceAddressKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR GetRayTracingCaptureReplayShaderGroupHandlesKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCmdTraceRaysIndirectKHR CmdTraceRaysIndirectKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
#ifdef VK_ENABLE_BETA_EXTENSIONS
PFN_vkCmdWriteAccelerationStructuresPropertiesKHR CmdWriteAccelerationStructuresPropertiesKHR;
PFN_vkGetDeviceAccelerationStructureCompatibilityKHR GetDeviceAccelerationStructureCompatibilityKHR;
#endif // VK_ENABLE_BETA_EXTENSIONS
PFN_vkGetAccelerationStructureBuildSizesKHR GetAccelerationStructureBuildSizesKHR;
// ---- VK_KHR_ray_tracing_pipeline extension commands
PFN_vkCmdTraceRaysKHR CmdTraceRaysKHR;
PFN_vkCreateRayTracingPipelinesKHR CreateRayTracingPipelinesKHR;
PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR GetRayTracingCaptureReplayShaderGroupHandlesKHR;
PFN_vkCmdTraceRaysIndirectKHR CmdTraceRaysIndirectKHR;
PFN_vkGetRayTracingShaderGroupStackSizeKHR GetRayTracingShaderGroupStackSizeKHR;
PFN_vkCmdSetRayTracingPipelineStackSizeKHR CmdSetRayTracingPipelineStackSizeKHR;
} VkLayerDispatchTable;

File diff suppressed because it is too large Load diff

View file

@ -334,6 +334,9 @@ struct loader_icd_term_dispatch {
PFN_vkGetDisplayModeProperties2KHR GetDisplayModeProperties2KHR;
PFN_vkGetDisplayPlaneCapabilities2KHR GetDisplayPlaneCapabilities2KHR;
// ---- VK_KHR_fragment_shading_rate extension commands
PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR GetPhysicalDeviceFragmentShadingRatesKHR;
// ---- VK_EXT_debug_report extension commands
PFN_vkCreateDebugReportCallbackEXT CreateDebugReportCallbackEXT;
PFN_vkDestroyDebugReportCallbackEXT DestroyDebugReportCallbackEXT;
@ -446,14 +449,12 @@ union loader_instance_extension_enables {
uint8_t khr_external_semaphore_capabilities : 1;
uint8_t khr_external_fence_capabilities : 1;
uint8_t ext_debug_report : 1;
uint8_t ggp_stream_descriptor_surface : 1;
uint8_t nv_external_memory_capabilities : 1;
uint8_t nn_vi_surface : 1;
uint8_t ext_direct_mode_display : 1;
uint8_t ext_acquire_xlib_display : 1;
uint8_t ext_display_surface_counter : 1;
uint8_t ext_debug_utils : 1;
uint8_t fuchsia_imagepipe_surface : 1;
};
uint64_t padding[4];
};

View file

@ -28,10 +28,14 @@
#include <winsock2.h>
#endif // _WIN32
#if defined(__Fuchsia__)
#include "dlopen_fuchsia.h"
#endif // defined(__Fuchsia__)
#include "vulkan/vk_platform.h"
#include "vulkan/vk_sdk_platform.h"
#if defined(__linux__) || defined(__APPLE__)
#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__)
/* Linux-specific common code: */
// Headers:
@ -118,6 +122,8 @@ static inline char *loader_platform_executable_path(char *buffer, size_t size) {
buffer[ret] = '\0';
return buffer;
}
#elif defined(__Fuchsia__)
static inline char *loader_platform_executable_path(char *buffer, size_t size) { return NULL; }
#endif // defined (__APPLE__)
// Compatability with compilers that don't support __has_feature
@ -131,14 +137,32 @@ static inline char *loader_platform_executable_path(char *buffer, size_t size) {
// Dynamic Loading of libraries:
typedef void *loader_platform_dl_handle;
static inline loader_platform_dl_handle loader_platform_open_library(const char *libPath) {
// When loading the library, we use RTLD_LAZY so that not all symbols have to be
// resolved at this time (which improves performance). Note that if not all symbols
// can be resolved, this could cause crashes later. Use the LD_BIND_NOW environment
// variable to force all symbols to be resolved here.
return dlopen(libPath, RTLD_LAZY | RTLD_LOCAL);
#define LOADER_DLOPEN_MODE (RTLD_LAZY | RTLD_LOCAL)
#if defined(__Fuchsia__)
static inline loader_platform_dl_handle loader_platform_open_driver(const char *libPath) {
return dlopen_fuchsia(libPath, LOADER_DLOPEN_MODE, true);
}
static inline loader_platform_dl_handle loader_platform_open_library(const char *libPath) {
return dlopen_fuchsia(libPath, LOADER_DLOPEN_MODE, false);
}
#else
static inline loader_platform_dl_handle loader_platform_open_library(const char *libPath) {
return dlopen(libPath, LOADER_DLOPEN_MODE);
}
#endif
static inline const char *loader_platform_open_library_error(const char *libPath) {
#ifdef __Fuchsia__
return dlerror_fuchsia();
#else
return dlerror();
#endif
}
static inline const char *loader_platform_open_library_error(const char *libPath) { return dlerror(); }
static inline void loader_platform_close_library(loader_platform_dl_handle library) { dlclose(library); }
static inline void *loader_platform_get_proc_address(loader_platform_dl_handle library, const char *name) {
assert(library);

View file

@ -72,15 +72,15 @@ typedef enum VulkanObjectType {
kVulkanObjectTypeDebugReportCallbackEXT = 33,
kVulkanObjectTypeDebugUtilsMessengerEXT = 34,
kVulkanObjectTypeValidationCacheEXT = 35,
kVulkanObjectTypeAccelerationStructureKHR = 36,
kVulkanObjectTypeAccelerationStructureNV = 36,
kVulkanObjectTypePerformanceConfigurationINTEL = 37,
kVulkanObjectTypeIndirectCommandsLayoutNV = 38,
kVulkanObjectTypePrivateDataSlotEXT = 39,
kVulkanObjectTypeMax = 40,
kVulkanObjectTypeAccelerationStructureKHR = 40,
kVulkanObjectTypeMax = 41,
// Aliases for backwards compatibilty of "promoted" types
kVulkanObjectTypeDescriptorUpdateTemplateKHR = kVulkanObjectTypeDescriptorUpdateTemplate,
kVulkanObjectTypeSamplerYcbcrConversionKHR = kVulkanObjectTypeSamplerYcbcrConversion,
kVulkanObjectTypeAccelerationStructureNV = kVulkanObjectTypeAccelerationStructureKHR,
} VulkanObjectType;
// Array of object name strings for OBJECT_TYPE enum conversion
@ -121,10 +121,11 @@ static const char * const object_string[kVulkanObjectTypeMax] = {
"DebugReportCallbackEXT",
"DebugUtilsMessengerEXT",
"ValidationCacheEXT",
"AccelerationStructureKHR",
"AccelerationStructureNV",
"PerformanceConfigurationINTEL",
"IndirectCommandsLayoutNV",
"PrivateDataSlotEXT",
"AccelerationStructureKHR",
};
// Helper array to get Vulkan VK_EXT_debug_report object type enum from the internal layers version
@ -165,10 +166,11 @@ const VkDebugReportObjectTypeEXT get_debug_report_enum[] = {
VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, // kVulkanObjectTypeDebugReportCallbackEXT
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypeDebugUtilsMessengerEXT
VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, // kVulkanObjectTypeValidationCacheEXT
VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT, // kVulkanObjectTypeAccelerationStructureKHR
VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT, // kVulkanObjectTypeAccelerationStructureNV
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypePerformanceConfigurationINTEL
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypeIndirectCommandsLayoutNV
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypePrivateDataSlotEXT
VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT, // kVulkanObjectTypeAccelerationStructureKHR
};
// Helper array to get Official Vulkan VkObjectType enum from the internal layers version
@ -209,10 +211,11 @@ const VkObjectType get_object_type_enum[] = {
VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT, // kVulkanObjectTypeDebugReportCallbackEXT
VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT, // kVulkanObjectTypeDebugUtilsMessengerEXT
VK_OBJECT_TYPE_VALIDATION_CACHE_EXT, // kVulkanObjectTypeValidationCacheEXT
VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, // kVulkanObjectTypeAccelerationStructureKHR
VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV, // kVulkanObjectTypeAccelerationStructureNV
VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL, // kVulkanObjectTypePerformanceConfigurationINTEL
VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV, // kVulkanObjectTypeIndirectCommandsLayoutNV
VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT, // kVulkanObjectTypePrivateDataSlotEXT
VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, // kVulkanObjectTypeAccelerationStructureKHR
};
// Helper function to convert from VkDebugReportObjectTypeEXT to VkObjectType

View file

@ -64,6 +64,12 @@ void wsi_create_instance(struct loader_instance *ptr_instance, const VkInstanceC
#ifdef VK_USE_PLATFORM_IOS_MVK
ptr_instance->wsi_ios_surface_enabled = false;
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_GGP
ptr_instance->wsi_ggp_surface_enabled = false;
#endif // VK_USE_PLATFORM_GGP
#ifdef VK_USE_PLATFORM_FUCHSIA
ptr_instance->wsi_imagepipe_surface_enabled = false;
#endif // VK_USE_PLATFORM_FUCHSIA
ptr_instance->wsi_display_enabled = false;
ptr_instance->wsi_display_props2_enabled = false;
#ifdef VK_USE_PLATFORM_METAL_EXT
@ -123,6 +129,18 @@ void wsi_create_instance(struct loader_instance *ptr_instance, const VkInstanceC
continue;
}
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_GGP
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME) == 0) {
ptr_instance->wsi_ggp_surface_enabled = true;
continue;
}
#endif // VK_USE_PLATFORM_GGP
#ifdef VK_USE_PLATFORM_FUCHSIA
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME) == 0) {
ptr_instance->wsi_imagepipe_surface_enabled = true;
continue;
}
#endif // VK_USE_PLATFORM_FUCHSIA
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME) == 0) {
ptr_instance->wsi_headless_surface_enabled = true;
continue;
@ -1308,6 +1326,83 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateIOSSurfaceMVK(VkInstance instanc
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_GGP
// Functions for the VK_GGP_stream_descriptor_surface extension:
// This is the trampoline entrypoint for CreateStreamDescriptorSurfaceGGP
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkCreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_layer_dispatch(instance);
VkResult res;
res = disp->CreateStreamDescriptorSurfaceGGP(instance, pCreateInfo, pAllocator, pSurface);
return res;
}
// This is the instance chain terminator function for CreateStreamDescriptorSurfaceGGP
VKAPI_ATTR VkResult VKAPI_CALL
terminator_CreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
VkResult vkRes = VK_SUCCESS;
VkIcdSurface *pIcdSurface = NULL;
uint32_t i = 0;
// First, check to ensure the appropriate extension was enabled:
struct loader_instance *ptr_instance = loader_get_instance(instance);
if (!ptr_instance->wsi_ggp_surface_enabled) {
loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
"VK_GGP_stream_descriptor_surface extension not enabled. vkCreateStreamDescriptorSurfaceGGP not executed!\n");
vkRes = VK_ERROR_EXTENSION_NOT_PRESENT;
goto out;
}
// Next, if so, proceed with the implementation of this function:
pIcdSurface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->ggp_surf.base), sizeof(pIcdSurface->ggp_surf));
if (pIcdSurface == NULL) {
vkRes = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
pIcdSurface->ggp_surf.base.platform = VK_ICD_WSI_PLATFORM_GGP;
pIcdSurface->ggp_surf.streamDescriptor = pCreateInfo->streamDescriptor;
// Loop through each ICD and determine if they need to create a surface
for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) {
if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) {
if (NULL != icd_term->dispatch.CreateStreamDescriptorSurfaceGGP) {
vkRes = icd_term->dispatch.CreateStreamDescriptorSurfaceGGP(icd_term->instance, pCreateInfo, pAllocator,
&pIcdSurface->real_icd_surfaces[i]);
if (VK_SUCCESS != vkRes) {
goto out;
}
}
}
}
*pSurface = (VkSurfaceKHR)pIcdSurface;
out:
if (VK_SUCCESS != vkRes && NULL != pIcdSurface) {
if (NULL != pIcdSurface->real_icd_surfaces) {
i = 0;
for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) {
if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) {
icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator);
}
}
loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces);
}
loader_instance_heap_free(ptr_instance, pIcdSurface);
}
return vkRes;
}
#endif // VK_USE_PLATFORM_GGP
#if defined(VK_USE_PLATFORM_METAL_EXT)
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT(VkInstance instance,
@ -1936,6 +2031,86 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneCapabilities2KHR(VkPhys
pDisplayPlaneInfo->planeIndex, &pCapabilities->capabilities);
}
#ifdef VK_USE_PLATFORM_FUCHSIA
// This is the trampoline entrypoint for CreateImagePipeSurfaceFUCHSIA
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImagePipeSurfaceFUCHSIA(VkInstance instance,
const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface) {
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_layer_dispatch(instance);
VkResult res;
res = disp->CreateImagePipeSurfaceFUCHSIA(instance, pCreateInfo, pAllocator, pSurface);
return res;
}
// This is the instance chain terminator function for CreateImagePipeSurfaceFUCHSIA
VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateImagePipeSurfaceFUCHSIA(VkInstance instance,
const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSurfaceKHR *pSurface) {
VkResult vkRes = VK_SUCCESS;
VkIcdSurface *pIcdSurface = NULL;
uint32_t i = 0;
// Initialize pSurface to NULL just to be safe.
*pSurface = VK_NULL_HANDLE;
// First, check to ensure the appropriate extension was enabled:
struct loader_instance *ptr_instance = loader_get_instance(instance);
if (!ptr_instance->wsi_imagepipe_surface_enabled) {
loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
"VK_FUCHSIA_imagepipe_surface extension not enabled. "
"vkCreateImagePipeSurfaceFUCHSIA not executed!\n");
vkRes = VK_ERROR_EXTENSION_NOT_PRESENT;
goto out;
}
// Next, if so, proceed with the implementation of this function:
pIcdSurface =
AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->imagepipe_surf.base), sizeof(pIcdSurface->imagepipe_surf));
if (pIcdSurface == NULL) {
vkRes = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
pIcdSurface->imagepipe_surf.base.platform = VK_ICD_WSI_PLATFORM_FUCHSIA;
// Loop through each ICD and determine if they need to create a surface
for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) {
if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) {
if (NULL != icd_term->dispatch.CreateImagePipeSurfaceFUCHSIA) {
vkRes = icd_term->dispatch.CreateImagePipeSurfaceFUCHSIA(icd_term->instance, pCreateInfo, pAllocator,
&pIcdSurface->real_icd_surfaces[i]);
if (VK_SUCCESS != vkRes) {
goto out;
}
}
}
}
*pSurface = (VkSurfaceKHR)(pIcdSurface);
out:
if (VK_SUCCESS != vkRes && NULL != pIcdSurface) {
if (NULL != pIcdSurface->real_icd_surfaces) {
i = 0;
for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) {
if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) {
icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator);
}
}
loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces);
}
loader_instance_heap_free(ptr_instance, pIcdSurface);
}
return vkRes;
}
#endif // VK_USE_PLATFORM_FUCHSIA
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
vkGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
@ -2261,6 +2436,23 @@ bool wsi_swapchain_instance_gpa(struct loader_instance *ptr_instance, const char
return true;
}
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_GGP
// Functions for the VK_GGP_stream_descriptor_surface extension:
if (!strcmp("vkCreateStreamDescriptorSurfaceGGP", name)) {
*addr = ptr_instance->wsi_ggp_surface_enabled ? (void *)vkCreateStreamDescriptorSurfaceGGP : NULL;
return true;
}
#endif // VK_USE_PLATFORM_GGP
#ifdef VK_USE_PLATFORM_FUCHSIA
// Functions for the VK_FUCHSIA_imagepipe_surface extension:
if (!strcmp("vkCreateImagePipeSurfaceFUCHSIA", name)) {
*addr = ptr_instance->wsi_imagepipe_surface_enabled ? (void *)vkCreateImagePipeSurfaceFUCHSIA : NULL;
return true;
}
#endif // VK_USE_PLATFORM_FUCHSIA
// Functions for the VK_EXT_headless_surface extension:
if (!strcmp("vkCreateHeadlessSurfaceEXT", name)) {

View file

@ -45,6 +45,12 @@ typedef struct {
#ifdef VK_USE_PLATFORM_MACOS_MVK
VkIcdSurfaceMacOS macos_surf;
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_GGP
VkIcdSurfaceGgp ggp_surf;
#endif // VK_USE_PLATFORM_GGP
#ifdef VK_USE_PLATFORM_FUCHSIA
VkIcdSurfaceImagePipe imagepipe_surf;
#endif // VK_USE_PLATFORM_FUCHSIA
#ifdef VK_USE_PLATFORM_METAL_EXT
VkIcdSurfaceMetal metal_surf;
#endif // VK_USE_PLATFORM_METAL_EXT
@ -140,6 +146,11 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMacOSSurfaceMVK(VkInstance insta
VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface);
#endif
#ifdef VK_USE_PLATFORM_GGP
VKAPI_ATTR VkResult VKAPI_CALL
terminator_CreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface);
#endif
#if defined(VK_USE_PLATFORM_METAL_EXT)
VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMetalSurfaceEXT(VkInstance instance, const VkMetalSurfaceCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface);
@ -191,6 +202,10 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayModeProperties2KHR(VkPhysica
VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities);
#ifdef VK_USE_PLATFORM_FUCHSIA
VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateImagePipeSurfaceFUCHSIA(VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface);
#endif
VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceCapabilities2KHR(
VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,

View file

@ -61,8 +61,6 @@ static inline const char* string_VkResult(VkResult input_value)
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_INCOMPATIBLE_VERSION_KHR:
return "VK_ERROR_INCOMPATIBLE_VERSION_KHR";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT:
@ -132,8 +130,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
{
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_GEOMETRY_TYPE_INFO_KHR";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV:
@ -150,12 +148,10 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_KHR";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_KHR:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_KHR";
case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR:
return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR";
case VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR:
return "VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR";
case VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR:
@ -178,8 +174,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2";
case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
return "VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT";
case VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR:
return "VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_KHR";
case VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV:
return "VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV";
case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
return "VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO";
case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO:
@ -244,6 +240,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR";
case VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR:
return "VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR";
case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
return "VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM";
case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
return "VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET";
case VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR:
@ -278,8 +276,6 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV";
case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV";
case VK_STRUCTURE_TYPE_DEFERRED_OPERATION_INFO_KHR:
return "VK_STRUCTURE_TYPE_DEFERRED_OPERATION_INFO_KHR";
case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO:
return "VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO";
case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
@ -300,6 +296,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO";
case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
return "VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO";
case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
return "VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT";
case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
return "VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV";
case VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT:
@ -324,6 +322,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO";
case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD";
case VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT:
return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT";
case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
return "VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT";
case VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO:
@ -356,8 +356,6 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR";
case VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR:
return "VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR";
case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT:
return "VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT";
case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
return "VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT";
case VK_STRUCTURE_TYPE_EVENT_CREATE_INFO:
@ -404,6 +402,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT";
case VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2:
return "VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2";
case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
return "VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR";
case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
return "VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO";
case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO:
@ -562,6 +562,10 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT:
@ -606,6 +610,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT:
@ -642,6 +648,16 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
@ -712,10 +728,12 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_FEATURES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
@ -750,6 +768,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
@ -760,6 +780,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
@ -840,6 +862,10 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR";
case VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR:
return "VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR";
case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
return "VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV";
case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
return "VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR";
case VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR:
return "VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR";
case VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO:
@ -1036,6 +1062,8 @@ static inline const char* string_VkStructureType(VkStructureType input_value)
return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET";
case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR";
case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV";
case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT";
case VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR:
@ -1228,6 +1256,8 @@ static inline const char* string_VkObjectType(VkObjectType input_value)
{
case VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR:
return "VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR";
case VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV:
return "VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV";
case VK_OBJECT_TYPE_BUFFER:
return "VK_OBJECT_TYPE_BUFFER";
case VK_OBJECT_TYPE_BUFFER_VIEW:
@ -1888,6 +1918,8 @@ static inline const char* string_VkFormatFeatureFlagBits(VkFormatFeatureFlagBits
return "VK_FORMAT_FEATURE_DISJOINT_BIT";
case VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT:
return "VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT";
case VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR:
return "VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR";
case VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT:
return "VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT";
case VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT:
@ -2478,6 +2510,8 @@ static inline const char* string_VkQueryType(VkQueryType input_value)
{
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR";
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV:
return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV";
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR";
case VK_QUERY_TYPE_OCCLUSION:
@ -2569,14 +2603,18 @@ static inline const char* string_VkBufferUsageFlagBits(VkBufferUsageFlagBits inp
{
switch ((VkBufferUsageFlagBits)input_value)
{
case VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR:
return "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR";
case VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR:
return "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR";
case VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT:
return "VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT";
case VK_BUFFER_USAGE_INDEX_BUFFER_BIT:
return "VK_BUFFER_USAGE_INDEX_BUFFER_BIT";
case VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT:
return "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT";
case VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR:
return "VK_BUFFER_USAGE_RAY_TRACING_BIT_KHR";
case VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR:
return "VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR";
case VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT:
return "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT";
case VK_BUFFER_USAGE_STORAGE_BUFFER_BIT:
@ -2983,6 +3021,8 @@ static inline const char* string_VkPipelineCreateFlagBits(VkPipelineCreateFlagBi
return "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR";
case VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR:
return "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR";
case VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR:
return "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR";
case VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR:
return "VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR";
case VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR:
@ -3153,6 +3193,8 @@ static inline const char* string_VkDynamicState(VkDynamicState input_value)
return "VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT";
case VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV:
return "VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV";
case VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR:
return "VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR";
case VK_DYNAMIC_STATE_FRONT_FACE_EXT:
return "VK_DYNAMIC_STATE_FRONT_FACE_EXT";
case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT:
@ -3161,6 +3203,8 @@ static inline const char* string_VkDynamicState(VkDynamicState input_value)
return "VK_DYNAMIC_STATE_LINE_WIDTH";
case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
return "VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT";
case VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR:
return "VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR";
case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT:
return "VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT";
case VK_DYNAMIC_STATE_SCISSOR:
@ -3470,6 +3514,8 @@ static inline const char* string_VkDescriptorType(VkDescriptorType input_value)
{
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
return "VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR";
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV:
return "VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV";
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
return "VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER";
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
@ -5200,10 +5246,10 @@ static inline const char* string_VkPerformanceCounterDescriptionFlagBitsKHR(VkPe
{
switch ((VkPerformanceCounterDescriptionFlagBitsKHR)input_value)
{
case VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR:
return "VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR";
case VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR:
return "VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR";
case VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR:
return "VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR";
case VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR:
return "VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR";
default:
return "Unhandled VkPerformanceCounterDescriptionFlagBitsKHR";
}
@ -5423,6 +5469,25 @@ static inline std::string string_VkSemaphoreWaitFlagsKHR(VkSemaphoreWaitFlagsKHR
return ret;
}
static inline const char* string_VkFragmentShadingRateCombinerOpKHR(VkFragmentShadingRateCombinerOpKHR input_value)
{
switch ((VkFragmentShadingRateCombinerOpKHR)input_value)
{
case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR:
return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR";
case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR:
return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR";
case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR:
return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR";
case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR:
return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR";
case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR:
return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR";
default:
return "Unhandled VkFragmentShadingRateCombinerOpKHR";
}
}
static inline const char* string_VkPipelineExecutableStatisticFormatKHR(VkPipelineExecutableStatisticFormatKHR input_value)
{
switch ((VkPipelineExecutableStatisticFormatKHR)input_value)
@ -5481,6 +5546,8 @@ static inline const char* string_VkDebugReportObjectTypeEXT(VkDebugReportObjectT
{
case VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT:
return "VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT";
case VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT:
return "VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT";
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT";
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
@ -5690,8 +5757,8 @@ static inline const char* string_VkSurfaceCounterFlagBitsEXT(VkSurfaceCounterFla
{
switch ((VkSurfaceCounterFlagBitsEXT)input_value)
{
case VK_SURFACE_COUNTER_VBLANK_EXT:
return "VK_SURFACE_COUNTER_VBLANK_EXT";
case VK_SURFACE_COUNTER_VBLANK_BIT_EXT:
return "VK_SURFACE_COUNTER_VBLANK_BIT_EXT";
default:
return "Unhandled VkSurfaceCounterFlagBitsEXT";
}
@ -6074,6 +6141,8 @@ static inline const char* string_VkAccelerationStructureTypeKHR(VkAccelerationSt
{
case VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR:
return "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR";
case VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR:
return "VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR";
case VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR:
return "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR";
default:
@ -6087,6 +6156,8 @@ static inline const char* string_VkAccelerationStructureTypeNV(VkAccelerationStr
{
case VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR:
return "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR";
case VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR:
return "VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR";
case VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR:
return "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR";
default:
@ -6322,31 +6393,16 @@ static inline const char* string_VkCopyAccelerationStructureModeNV(VkCopyAcceler
}
}
static inline const char* string_VkAccelerationStructureMemoryRequirementsTypeKHR(VkAccelerationStructureMemoryRequirementsTypeKHR input_value)
{
switch ((VkAccelerationStructureMemoryRequirementsTypeKHR)input_value)
{
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR";
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR";
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR";
default:
return "Unhandled VkAccelerationStructureMemoryRequirementsTypeKHR";
}
}
static inline const char* string_VkAccelerationStructureMemoryRequirementsTypeNV(VkAccelerationStructureMemoryRequirementsTypeNV input_value)
{
switch ((VkAccelerationStructureMemoryRequirementsTypeNV)input_value)
{
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR";
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR";
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR";
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV";
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV";
case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV:
return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV";
default:
return "Unhandled VkAccelerationStructureMemoryRequirementsTypeNV";
}
@ -6762,6 +6818,25 @@ static inline std::string string_VkIndirectCommandsLayoutUsageFlagsNV(VkIndirect
return ret;
}
static inline const char* string_VkDeviceMemoryReportEventTypeEXT(VkDeviceMemoryReportEventTypeEXT input_value)
{
switch ((VkDeviceMemoryReportEventTypeEXT)input_value)
{
case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT:
return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT";
case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT:
return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT";
case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT:
return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT";
case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT:
return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT";
case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT:
return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT";
default:
return "Unhandled VkDeviceMemoryReportEventTypeEXT";
}
}
static inline const char* string_VkDeviceDiagnosticsConfigFlagBitsNV(VkDeviceDiagnosticsConfigFlagBitsNV input_value)
{
switch ((VkDeviceDiagnosticsConfigFlagBitsNV)input_value)
@ -6793,8 +6868,64 @@ static inline std::string string_VkDeviceDiagnosticsConfigFlagsNV(VkDeviceDiagno
return ret;
}
static inline const char* string_VkFragmentShadingRateTypeNV(VkFragmentShadingRateTypeNV input_value)
{
switch ((VkFragmentShadingRateTypeNV)input_value)
{
case VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV:
return "VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV";
case VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV:
return "VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV";
default:
return "Unhandled VkFragmentShadingRateTypeNV";
}
}
#ifdef VK_ENABLE_BETA_EXTENSIONS
static inline const char* string_VkFragmentShadingRateNV(VkFragmentShadingRateNV input_value)
{
switch ((VkFragmentShadingRateNV)input_value)
{
case VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV:
return "VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV";
case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV:
return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV";
case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV:
return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV";
case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV:
return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV";
case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV:
return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV";
case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV:
return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV";
case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV:
return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV";
case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV:
return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV";
case VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV:
return "VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV";
case VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV:
return "VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV";
case VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV:
return "VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV";
case VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV:
return "VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV";
default:
return "Unhandled VkFragmentShadingRateNV";
}
}
static inline const char* string_VkBuildAccelerationStructureModeKHR(VkBuildAccelerationStructureModeKHR input_value)
{
switch ((VkBuildAccelerationStructureModeKHR)input_value)
{
case VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR:
return "VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR";
case VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR:
return "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR";
default:
return "Unhandled VkBuildAccelerationStructureModeKHR";
}
}
static inline const char* string_VkAccelerationStructureBuildTypeKHR(VkAccelerationStructureBuildTypeKHR input_value)
{
@ -6810,7 +6941,63 @@ static inline const char* string_VkAccelerationStructureBuildTypeKHR(VkAccelerat
return "Unhandled VkAccelerationStructureBuildTypeKHR";
}
}
#endif // VK_ENABLE_BETA_EXTENSIONS
static inline const char* string_VkAccelerationStructureCreateFlagBitsKHR(VkAccelerationStructureCreateFlagBitsKHR input_value)
{
switch ((VkAccelerationStructureCreateFlagBitsKHR)input_value)
{
case VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR:
return "VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR";
default:
return "Unhandled VkAccelerationStructureCreateFlagBitsKHR";
}
}
static inline std::string string_VkAccelerationStructureCreateFlagsKHR(VkAccelerationStructureCreateFlagsKHR input_value)
{
std::string ret;
int index = 0;
while(input_value) {
if (input_value & 1) {
if( !ret.empty()) ret.append("|");
ret.append(string_VkAccelerationStructureCreateFlagBitsKHR(static_cast<VkAccelerationStructureCreateFlagBitsKHR>(1 << index)));
}
++index;
input_value >>= 1;
}
if( ret.empty()) ret.append(string_VkAccelerationStructureCreateFlagBitsKHR(static_cast<VkAccelerationStructureCreateFlagBitsKHR>(0)));
return ret;
}
static inline const char* string_VkAccelerationStructureCompatibilityKHR(VkAccelerationStructureCompatibilityKHR input_value)
{
switch ((VkAccelerationStructureCompatibilityKHR)input_value)
{
case VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR:
return "VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR";
case VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR:
return "VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR";
default:
return "Unhandled VkAccelerationStructureCompatibilityKHR";
}
}
static inline const char* string_VkShaderGroupShaderKHR(VkShaderGroupShaderKHR input_value)
{
switch ((VkShaderGroupShaderKHR)input_value)
{
case VK_SHADER_GROUP_SHADER_ANY_HIT_KHR:
return "VK_SHADER_GROUP_SHADER_ANY_HIT_KHR";
case VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR:
return "VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR";
case VK_SHADER_GROUP_SHADER_GENERAL_KHR:
return "VK_SHADER_GROUP_SHADER_GENERAL_KHR";
case VK_SHADER_GROUP_SHADER_INTERSECTION_KHR:
return "VK_SHADER_GROUP_SHADER_INTERSECTION_KHR";
default:
return "Unhandled VkShaderGroupShaderKHR";
}
}
static inline const char * GetPhysDevFeatureString(uint32_t index) {
const char * IndexToPhysDevFeatureString[] = {