Atomic ops for voxels

This commit is contained in:
Lubos Lenco 2017-08-03 14:01:04 +02:00
parent dc36e14050
commit 4a992ebd19
12 changed files with 279 additions and 47 deletions

View file

@ -83,8 +83,15 @@ void main() {
#endif
#ifdef _VoxelGI
vec3 wpos = p / voxelgiDimensions.x;
vec4 indirectDiffuse = indirectDiffuseLight(n, wpos);
#ifdef _VoxelGICam
const float step = voxelgiDimensions / voxelgiResolution;
vec3 eyeSnap = ivec3(eye / step) * step;
vec3 wpos = (p - eyeSnap) / voxelgiDimensions;
#else
vec3 wpos = p / voxelgiDimensions;
#endif
vec4 indirectDiffuse = indirectDiffuseLight(wpos, n);
// vec4 indirectDiffuse = traceDiffuse(wpos, n);
vec3 reflectWorld = reflect(-v, n);
vec3 indirectSpecular = traceSpecularVoxelCone(wpos, reflectWorld, n, metrough.y * 12.0);

View file

@ -247,7 +247,7 @@ void main() {
#endif
// #ifdef _VoxelGI
// if (dotNL > 0.0) visibility *= traceShadowCone(p / voxelgiResolution.x, l, distance(p, lightPos) / voxelgiResolution.x, n);
// if (dotNL > 0.0) visibility *= traceShadowCone(p / voxelgiResolution, l, distance(p, lightPos) / voxelgiResolution, n);
// #endif
fragColor.rgb *= visibility;

View file

@ -1,14 +1,37 @@
// https://github.com/Friduric/voxel-cone-tracing
// https://github.com/Cigg/Voxel-Cone-Tracing
// https://github.com/GreatBlambo/voxel_cone_tracing/
// http://simonstechblog.blogspot.com/2013/01/implementing-voxel-cone-tracing.html
// http://leifnode.com/2015/05/voxel-cone-traced-global-illumination/
// http://www.seas.upenn.edu/%7Epcozzi/OpenGLInsights/OpenGLInsights-SparseVoxelization.pdf
// https://research.nvidia.com/sites/default/files/publications/GIVoxels-pg2011-authors.pdf
uniform sampler3D voxels;
const float VOXEL_SIZE = 1.0 / voxelgiResolution.x;
const float VOXEL_SIZE = 1.0 / voxelgiResolution;
const float MAX_MIPMAP = 5.4;
const float VOXEL_RATIO = 128.0 / voxelgiResolution.x;
const float VOXEL_RATIO = 128.0 / voxelgiResolution;
// vec3 tangent(const vec3 n) {
// vec3 t1 = cross(n, vec3(0, 0, 1));
// vec3 t2 = cross(n, vec3(0, 1, 0));
// if (length(t1) > length(t2)) return normalize(t1);
// else return normalize(t2);
// }
// uvec3 face_indices(vec3 dir) {
// uvec3 ret;
// ret.x = (dir.x < 0.0) ? 0 : 1;
// ret.y = (dir.y < 0.0) ? 2 : 3;
// ret.z = (dir.z < 0.0) ? 4 : 5;
// return ret;
// }
// vec4 sample_voxel(vec3 pos, vec3 dir, uvec3 indices, float lod) {
// dir = abs(dir);
// return dir.x * textureLod(voxels[indices.x], pos, lod) +
// dir.y * textureLod(voxels[indices.y], pos, lod) +
// dir.z * textureLod(voxels[indices.z], pos, lod);
// }
vec3 orthogonal(const vec3 u) {
// Pass normalized u
@ -16,6 +39,60 @@ vec3 orthogonal(const vec3 u) {
return abs(dot(u, v)) > 0.99999 ? cross(u, vec3(0.0, 1.0, 0.0)) : cross(u, v);
}
// vec4 trace_cone(vec3 origin, vec3 dir, float aperture, float max_dist) {
// dir = normalize(dir);
// // uvec3 indices = face_indices(dir);
// vec4 sample_color = vec4(0.0);
// float dist = 3 * VOXEL_SIZE;
// float diam = dist * aperture;
// vec3 sample_position = dir * dist + origin;
// // Step until alpha > 1 or out of bounds
// while (sample_color.a < 1.0 && dist < max_dist) {
// // Choose mip level based on the diameter of the cone
// float mip = max(log2(diam * voxelgiResolution), 0);
// // vec4 mip_sample = sample_voxel(sample_position, dir, indices, mip);
// vec4 mip_sample = textureLod(voxels, sample_position * 0.5 + vec3(0.5), mip);
// // Blend mip sample with current sample color
// sample_color += (1 - sample_color.a) * mip_sample;
// float step_size = max(diam / 2, VOXEL_SIZE);
// dist += step_size;
// diam = dist * aperture;
// sample_position = dir * dist + origin;
// }
// return sample_color;
// }
// vec4 traceDiffuse(vec3 origin, vec3 normal) {
// const float TAN_22_5 = 0.55785173935;
// const float MAX_DISTANCE = 1.73205080757;
// const float angle_mix = 0.5f;
// const float aperture = TAN_22_5;
// vec4 result_diffuse = vec4(0.0);
// const vec3 o1 = normalize(tangent(normal));
// const vec3 o2 = normalize(cross(o1, normal));
// const vec3 c1 = 0.5f * (o1 + o2);
// const vec3 c2 = 0.5f * (o1 - o2);
// // Normal direction
// result_diffuse += trace_cone(origin, normal, aperture, MAX_DISTANCE);
// // 4 side cones
// result_diffuse += trace_cone(origin, mix(normal, o1, angle_mix), aperture, MAX_DISTANCE);
// result_diffuse += trace_cone(origin, mix(normal, -o1, angle_mix), aperture, MAX_DISTANCE);
// result_diffuse += trace_cone(origin, mix(normal, o2, angle_mix), aperture, MAX_DISTANCE);
// result_diffuse += trace_cone(origin, mix(normal, -o2, angle_mix), aperture, MAX_DISTANCE);
// // 4 corners
// result_diffuse += trace_cone(origin, mix(normal, c1, angle_mix), aperture, MAX_DISTANCE);
// result_diffuse += trace_cone(origin, mix(normal, -c1, angle_mix), aperture, MAX_DISTANCE);
// result_diffuse += trace_cone(origin, mix(normal, c2, angle_mix), aperture, MAX_DISTANCE);
// result_diffuse += trace_cone(origin, mix(normal, -c2, angle_mix), aperture, MAX_DISTANCE);
// return vec4(result_diffuse.rgb / 9, 1.0);
// }
vec4 traceDiffuseVoxelCone(const vec3 from, vec3 direction) {
direction = normalize(direction);
const float CONE_SPREAD = 0.325;
@ -30,18 +107,19 @@ vec4 traceDiffuseVoxelCone(const vec3 from, vec3 direction) {
float level = log2(l);
float ll = (level + 1.0) * (level + 1.0);
vec4 voxel = textureLod(voxels, c, min(MAX_MIPMAP, level));
#ifdef _Cycles
voxel.rgb = min(voxel.rgb * 0.9, vec3(0.9)) + max((voxel.rgb - 0.9) * 200.0, 0.0); // Higher range to allow emission
#endif
acc += 0.075 * ll * voxel * pow(1.0 - voxel.a, 2.0);
dist += ll * VOXEL_SIZE * 2.0;
}
return acc;
}
vec4 indirectDiffuseLight(const vec3 normal, const vec3 wpos) {
vec4 indirectDiffuseLight(const vec3 wpos, const vec3 normal) {
const float ANGLE_MIX = 0.5; // Angle mix (1.0f -> orthogonal direction, 0.0f -> direction of normal)
const float w[3] = { 1.0, 1.0, 1.0 }; // Cone weights
// Find a base for the side cones with the normal as one of its base vectors
const vec3 ortho = normalize(orthogonal(normal));
const vec3 ortho2 = normalize(cross(ortho, normal));
@ -49,40 +127,35 @@ vec4 indirectDiffuseLight(const vec3 normal, const vec3 wpos) {
const vec3 corner = 0.5 * (ortho + ortho2);
const vec3 corner2 = 0.5 * (ortho - ortho2);
// Find start position of trace (start with a bit of offset)
const float ISQRT2 = 0.707106;
const vec3 N_OFFSET = normal * (1.0 + 4.0 * ISQRT2) * VOXEL_SIZE;
const vec3 C_ORIGIN = wpos + N_OFFSET;
// Accumulate indirect diffuse light
vec4 acc = vec4(0.0);
const vec3 origin = wpos + normal * 3.8 * VOXEL_SIZE;
// We offset forward in normal direction, and backward in cone direction
// Backward in cone direction improves GI, and forward direction removes artifacts
const float CONE_OFFSET = -0.01;
const float CONE_OFFSET = 0.01;
// Trace front cone
acc += w[0] * traceDiffuseVoxelCone(C_ORIGIN + CONE_OFFSET * normal, normal);
vec4 acc = traceDiffuseVoxelCone(origin + CONE_OFFSET * normal, normal);
// Trace 4 side cones
const vec3 s1 = mix(normal, ortho, ANGLE_MIX);
const vec3 s2 = mix(normal, -ortho, ANGLE_MIX);
const vec3 s3 = mix(normal, ortho2, ANGLE_MIX);
const vec3 s4 = mix(normal, -ortho2, ANGLE_MIX);
acc += w[1] * traceDiffuseVoxelCone(C_ORIGIN + CONE_OFFSET * ortho, s1);
acc += w[1] * traceDiffuseVoxelCone(C_ORIGIN - CONE_OFFSET * ortho, s2);
acc += w[1] * traceDiffuseVoxelCone(C_ORIGIN + CONE_OFFSET * ortho2, s3);
acc += w[1] * traceDiffuseVoxelCone(C_ORIGIN - CONE_OFFSET * ortho2, s4);
acc += traceDiffuseVoxelCone(origin + CONE_OFFSET * ortho, s1);
acc += traceDiffuseVoxelCone(origin - CONE_OFFSET * ortho, s2);
acc += traceDiffuseVoxelCone(origin + CONE_OFFSET * ortho2, s3);
acc += traceDiffuseVoxelCone(origin - CONE_OFFSET * ortho2, s4);
// Trace 4 corner cones
const vec3 c1 = mix(normal, corner, ANGLE_MIX);
const vec3 c2 = mix(normal, -corner, ANGLE_MIX);
const vec3 c3 = mix(normal, corner2, ANGLE_MIX);
const vec3 c4 = mix(normal, -corner2, ANGLE_MIX);
acc += w[2] * traceDiffuseVoxelCone(C_ORIGIN + CONE_OFFSET * corner, c1);
acc += w[2] * traceDiffuseVoxelCone(C_ORIGIN - CONE_OFFSET * corner, c2);
acc += w[2] * traceDiffuseVoxelCone(C_ORIGIN + CONE_OFFSET * corner2, c3);
acc += w[2] * traceDiffuseVoxelCone(C_ORIGIN - CONE_OFFSET * corner2, c4);
acc += traceDiffuseVoxelCone(origin + CONE_OFFSET * corner, c1);
acc += traceDiffuseVoxelCone(origin - CONE_OFFSET * corner, c2);
acc += traceDiffuseVoxelCone(origin + CONE_OFFSET * corner2, c3);
acc += traceDiffuseVoxelCone(origin - CONE_OFFSET * corner2, c4);
return acc + vec4(0.001);
return acc;
}
vec3 traceSpecularVoxelCone(vec3 from, vec3 direction, const vec3 normal, const float specularDiffusion) {

View file

@ -0,0 +1,86 @@
// Courtesy of
// https://github.com/GreatBlambo/voxel_cone_tracing
// https://www.seas.upenn.edu/~pcozzi/OpenGLInsights/OpenGLInsights-SparseVoxelization.pdf
uint convVec4ToRGBA8(vec4 val) {
return (uint(val.w) & 0x000000FF) << 24U
| (uint(val.z) & 0x000000FF) << 16U
| (uint(val.y) & 0x000000FF) << 8U
| (uint(val.x) & 0x000000FF);
}
vec4 convRGBA8ToVec4(uint val) {
return vec4(float((val & 0x000000FF)),
float((val & 0x0000FF00) >> 8U),
float((val & 0x00FF0000) >> 16U),
float((val & 0xFF000000) >> 24U));
}
uint encUnsignedNibble(uint m, uint n) {
return (m & 0xFEFEFEFE)
| (n & 0x00000001)
| (n & 0x00000002) << 7U
| (n & 0x00000004) << 14U
| (n & 0x00000008) << 21U;
}
uint decUnsignedNibble(uint m) {
return (m & 0x00000001)
| (m & 0x00000100) >> 7U
| (m & 0x00010000) >> 14U
| (m & 0x01000000) >> 21U;
}
// void imageAtomicRGBA8Avg(layout(r32ui) uimage3D img, ivec3 coords, vec4 val) {
// // LSBs are used for the sample counter of the moving average.
// val *= 255.0;
// uint newVal = encUnsignedNibble(convVec4ToRGBA8(val), 1);
// uint prevStoredVal = 0;
// uint currStoredVal;
// int counter = 0;
// // Loop as long as destination value gets changed by other threads
// while ((currStoredVal = imageAtomicCompSwap(img, coords, prevStoredVal, newVal)) != prevStoredVal && counter < 16) {
// vec4 rval = convRGBA8ToVec4(currStoredVal & 0xFEFEFEFE);
// uint n = decUnsignedNibble(currStoredVal);
// rval = rval * n + val;
// rval /= ++n;
// rval = round(rval / 2) * 2;
// newVal = encUnsignedNibble(convVec4ToRGBA8(rval), n);
// prevStoredVal = currStoredVal;
// counter++;
// }
// }
// void imageAtomicFloatAdd(layout(r32ui) coherent volatile uimage3D imgUI, ivec3 coords, float val) {
// uint newVal = floatBitsToUint(val);
// uint prevVal = 0;
// uint curVal;
// // Loop as long as destination value gets changed by other threads
// while ((curVal = imageAtomicCompSwap(imgUI, coords, prevVal, newVal)) != prevVal) {
// prevVal = curVal;
// newVal = floatBitsToUint((val + uintBitsToFloat(curVal)));
// }
// }
// vec4 convRGBA8ToVec4(uint val) {
// return vec4(float((val & 0x000000FF)), float((val & 0x0000FF00)>>8U) , float((val & 0x00FF0000)>>16U) , float((val & 0xFF000000)>>24U));
// }
// uint convVec4ToRGBA8( vec4 val) {
// return (uint(val.w) & 0x000000FF) <<24U | (uint(val.z) & 0x000000FF)<<16U | (uint(val.y) & 0x000000FF)<<8U | (uint(val.x) & 0x000000FF);
// }
// void imageAtomicRGBA8Avg( layout ( r32ui ) coherent volatile uimage3D imgUI , ivec3 coords , vec4 val ) {
// val.rgb *= 255.0f; // Optimise following calculations
// uint newVal = convVec4ToRGBA8(val);
// uint prevStoredVal = 0;
// uint curStoredVal;
// // Loop as long as destination value gets changed by other threads
// while ((curStoredVal = imageAtomicCompSwap(imgUI, coords, prevStoredVal, newVal)) != prevStoredVal) {
// prevStoredVal = curStoredVal;
// vec4 rval = convRGBA8ToVec4(curStoredVal);
// rval.xyz = (rval.xyz * rval.w) ; // Denormalize
// vec4 curValF = rval + val; // Add new value
// curValF.xyz /= (curValF.w); // Renormalize
// newVal = convVec4ToRGBA8(curValF);
// }
// }

View file

@ -174,7 +174,7 @@ def parse_shader(sres, c, con, defs, lines, parse_attributes):
cid = s[2][:-1]
found = False # Unique check
if ctype == 'sampler2D' or ctype == 'sampler2DShadow' or ctype == 'sampler3D' or ctype == 'samplerCube' or ctype == 'image2D' or ctype == 'image3D': # Texture unit
if ctype == 'sampler2D' or ctype == 'sampler2DShadow' or ctype == 'sampler3D' or ctype == 'samplerCube' or ctype == 'image2D' or ctype == 'uimage2D' or ctype == 'image3D' or ctype == 'uimage3D': # Texture unit
for tu in con['texture_units']: # Texture already present
if tu['name'] == cid:
found = True
@ -183,7 +183,7 @@ def parse_shader(sres, c, con, defs, lines, parse_attributes):
tu = {}
tu['name'] = cid
# sampler2D / image2D
if ctype == 'image2D' or ctype == 'image3D':
if ctype == 'image2D' or ctype == 'uimage2D' or ctype == 'image3D' or ctype == 'uimage3D':
tu['is_image'] = True
# Check for link
for l in c['links']:

View file

@ -115,12 +115,13 @@ def make_deferred(cam):
# n.inputs[4].default_value = 'RGBA64'
links.new(nodes['Begin'].outputs[0], nodes['Branch Function Voxelize'].inputs[0])
links.new(nodes['Merge Stages Voxelize'].outputs[0], nodes['Set Target Mesh'].inputs[0])
n.inputs[1].default_value = cam.rp_voxelgi_resolution[0]
n.inputs[2].default_value = cam.rp_voxelgi_resolution[1]
n.inputs[3].default_value = cam.rp_voxelgi_resolution[2]
res = int(cam.rp_voxelgi_resolution)
n.inputs[1].default_value = res
n.inputs[2].default_value = res
n.inputs[3].default_value = res
n = nodes['Set Viewport Voxels']
n.inputs[1].default_value = cam.rp_voxelgi_resolution[0]
n.inputs[2].default_value = cam.rp_voxelgi_resolution[1]
n.inputs[1].default_value = res
n.inputs[2].default_value = res
links.new(nodes['Image 3D Voxels'].outputs[0], nodes['Deferred Indirect'].inputs[4])
if cam.rp_shadowmap != 'None':

View file

@ -113,6 +113,8 @@ def build_node_tree(world):
assets.add_khafile_def('arm_voxelgi_revox')
if wrd.voxelgi_multibounce:
wrd.world_defs += '_VoxelGIMulti'
if wrd.voxelgi_camera:
wrd.world_defs += '_VoxelGICam'
wrd.world_defs += '_VoxelGI'
wrd.world_defs += '_Rad' # Always do radiance for voxels
wrd.world_defs += '_Irr'

View file

@ -22,7 +22,7 @@ def make(context_id):
frag.ins = geom.outs
frag.write('vec3 lp = lightPos - wposition * voxelgiDimensions.x;')
frag.write('vec3 lp = lightPos - wposition * voxelgiDimensions;')
frag.write('vec3 l = normalize(lp);')
frag.write('float visibility = 1.0;')
frag.add_include('../../Shaders/compiled.glsl')
@ -42,9 +42,15 @@ def make(context_id):
frag.write('int lightShadow = 0;')
frag.add_include('../../Shaders/std/math.glsl')
frag.add_include('../../Shaders/std/imageatomic.glsl')
frag.write_header('#extension GL_ARB_shader_image_load_store : enable')
frag.add_uniform('layout(RGBA8) image3D voxels')
# if bpy.data.cameras[0].rp_voxelgi_hdr:
# frag.add_uniform('layout(RGBA16) image3D voxels')
# else:
# frag.add_uniform('layout(RGBA8) image3D voxels')
frag.add_uniform('layout(r32ui) uimage3D voxels')
frag.add_uniform('vec3 lightPos', '_lampPosition')
frag.add_uniform('vec3 lightColor', '_lampColor')
@ -63,7 +69,8 @@ def make(context_id):
cycles.parse(mat_state.nodes, con_voxel, vert, frag, geom, tesc, tese, parse_opacity=False, parse_displacement=False)
if wrd.voxelgi_camera:
vert.add_uniform('vec3 eye', '_cameraPosition')
vert.add_uniform('mat4 W', '_worldMatrix')
vert.add_uniform('mat3 N', '_normalMatrix')
@ -76,7 +83,12 @@ def make(context_id):
vert.add_out('vec2 texCoordGeom')
vert.write('texCoordGeom = tex;')
vert.write('wpositionGeom = vec3(W * vec4(pos, 1.0)) / voxelgiDimensions.x;')
if wrd.voxelgi_camera:
vert.write('const float step = voxelgiDimensions / voxelgiResolution;') # TODO: Pass as uniform
vert.write('vec3 eyeSnap = ivec3(eye / step) * step;') # TODO: Pass as uniform
vert.write('wpositionGeom = (vec3(W * vec4(pos, 1.0)) - eyeSnap) / voxelgiDimensions;')
else:
vert.write('wpositionGeom = vec3(W * vec4(pos, 1.0)) / voxelgiDimensions;')
vert.write('wnormalGeom = normalize(N * nor);')
vert.write('gl_Position = vec4(0.0, 0.0, 0.0, 1.0);')
@ -118,12 +130,54 @@ def make(context_id):
if cycles.emission_found:
frag.write('vec3 color = basecol;')
else:
frag.write('vec3 color = basecol * visibility * lightColor * dotNL * attenuate(distance(wposition * voxelgiDimensions.x, lightPos));')
frag.write('vec3 color = basecol * visibility * lightColor * dotNL * attenuate(distance(wposition * voxelgiDimensions, lightPos));')
frag.write('vec3 voxel = wposition * 0.5 + vec3(0.5);')
if wrd.lighting_model == 'Cycles':
frag.write('color = min(color * 0.9, vec3(0.9)) + min(color / 200.0, 0.1);') # Higher range to allow emission
frag.write('imageStore(voxels, ivec3(voxelgiResolution * voxel), vec4(color, 1.0));')
# if bpy.data.cameras[0].rp_voxelgi_hdr:
# frag.write('imageStore(voxels, ivec3(voxelgiResolution * voxel), vec4(color, 1.0));')
# else:
frag.write('color = clamp(color, vec3(0.0), vec3(1.0));')
frag.write('uint val = convVec4ToRGBA8(vec4(color, 1.0) * 255);')
frag.write('imageAtomicMax(voxels, ivec3(voxelgiResolution * voxel), val);')
# frag.write('imageStore(voxels, ivec3(voxelgiResolution * voxel), vec4(color, 1.0));')
# frag.write('imageAtomicRGBA8Avg(voxels, ivec3(voxelgiResolution * voxel), vec4(color, 1.0));')
# frag.write('ivec3 coords = ivec3(voxelgiResolution * voxel);')
# frag.write('vec4 val = vec4(color, 1.0);')
# frag.write('val *= 255.0;')
# frag.write('uint newVal = encUnsignedNibble(convVec4ToRGBA8(val), 1);')
# frag.write('uint prevStoredVal = 0;')
# frag.write('uint currStoredVal;')
# # frag.write('int counter = 0;')
# frag.write('// Loop as long as destination value gets changed by other threads')
# # frag.write('while ((currStoredVal = imageAtomicCompSwap(voxels, coords, prevStoredVal, newVal)) != prevStoredVal && counter < 16) {')
# frag.write('while ((currStoredVal = imageAtomicCompSwap(voxels, coords, prevStoredVal, newVal)) != prevStoredVal) {')
# frag.write(' vec4 rval = convRGBA8ToVec4(currStoredVal & 0xFEFEFEFE);')
# frag.write(' uint n = decUnsignedNibble(currStoredVal);')
# frag.write(' rval = rval * n + val;')
# frag.write(' rval /= ++n;')
# frag.write(' rval = round(rval / 2) * 2;')
# frag.write(' newVal = encUnsignedNibble(convVec4ToRGBA8(rval), n);')
# frag.write(' prevStoredVal = currStoredVal;')
# # frag.write(' counter++;')
# frag.write('}')
# frag.write('val.rgb *= 255.0f;')
# frag.write('uint newVal = convVec4ToRGBA8(val);')
# frag.write('uint prevStoredVal = 0;')
# frag.write('uint curStoredVal;')
# frag.write('while ((curStoredVal = imageAtomicCompSwap(voxels, coords, prevStoredVal, newVal)) != prevStoredVal) {')
# frag.write(' prevStoredVal = curStoredVal;')
# frag.write(' vec4 rval = convRGBA8ToVec4(curStoredVal);')
# frag.write(' rval.xyz = (rval.xyz * rval.w);')
# frag.write(' vec4 curValF = rval + val;')
# frag.write(' curValF.xyz /= (curValF.w);')
# frag.write(' newVal = convVec4ToRGBA8(curValF);')
# frag.write('}')
return con_voxel

View file

@ -34,8 +34,8 @@ class Shader:
# layout(RGBA8) image3D voxels
utype = ar[-2]
uname = ar[-1]
if utype.startswith('sampler') or utype.startswith('image'):
is_image = True if utype.startswith('image') else None
if utype.startswith('sampler') or utype.startswith('image') or utype.startswith('uimage'):
is_image = True if (utype.startswith('image') or utype.startswith('uimage')) else None
self.context.add_texture_unit(utype, uname, link=link, is_image=is_image)
else:
# Prefer vec4[] for d3d to avoid padding

View file

@ -410,10 +410,19 @@ def init_properties():
bpy.types.Camera.rp_greasepencil = bpy.props.BoolProperty(name="Grease Pencil", description="Render Grease Pencil data", default=False, update=update_renderpath)
bpy.types.Camera.rp_ocean = bpy.props.BoolProperty(name="Ocean", description="Ocean pass", default=False, update=update_renderpath)
bpy.types.Camera.rp_voxelgi = bpy.props.BoolProperty(name="Voxel GI", description="Voxel-based Global Illumination", default=False, update=update_renderpath)
bpy.types.Camera.rp_voxelgi_resolution = bpy.props.FloatVectorProperty(name="Resolution", description="3D texture resolution", size=3, default=[128, 128, 128], update=update_renderpath)
bpy.types.Camera.rp_voxelgi_resolution = bpy.props.EnumProperty(
items=[('32', '32', '32'),
('64', '64', '64'),
('128', '128', '128'),
('256', '256', '256'),
('512', '512', '512')],
name="Resolution", description="3D texture resolution", default='128', update=update_renderpath)
bpy.types.Camera.rp_voxelgi_hdr = bpy.props.BoolProperty(name="HDR", description="Store voxels in RGBA64 instead of RGBA32", default=False, update=update_renderpath)
bpy.types.World.generate_voxelgi_dimensions = bpy.props.FloatProperty(name="Dimensions", description="Voxelization bounds",default=16, update=assets.invalidate_shader_cache)
bpy.types.World.voxelgi_revoxelize = bpy.props.BoolProperty(name="Revoxelize", description="Revoxelize scene each frame", default=False, update=assets.invalidate_shader_cache)
bpy.types.World.voxelgi_multibounce = bpy.props.BoolProperty(name="Multi-bounce", description="Accumulate multiple light bounces", default=False, update=assets.invalidate_shader_cache)
bpy.types.World.voxelgi_camera = bpy.props.BoolProperty(name="Camera", description="Use camera as voxelization origin", default=False, update=assets.invalidate_shader_cache)
bpy.types.World.voxelgi_anisotropic = bpy.props.BoolProperty(name="Anisotropic", description="Use anisotropic voxels", default=False, update=assets.invalidate_shader_cache)
bpy.types.World.voxelgi_diff = bpy.props.FloatProperty(name="Diffuse", description="", default=1.0, update=assets.invalidate_shader_cache)
bpy.types.World.voxelgi_spec = bpy.props.FloatProperty(name="Specular", description="", default=1.0, update=assets.invalidate_shader_cache)
bpy.types.World.voxelgi_occ = bpy.props.FloatProperty(name="Occlussion", description="", default=1.0, update=assets.invalidate_shader_cache)
@ -538,7 +547,6 @@ def init_properties():
items=[('PBR', 'PBR', 'PBR'),
('Cycles', 'Cycles', 'Cycles')],
name="Lighting", description="Preferred lighting calibration", default='PBR', update=assets.invalidate_shader_cache)
bpy.types.World.generate_voxelgi_dimensions = bpy.props.FloatVectorProperty(name="Dimensions", description="Voxelization bounds", size=3, default=[16, 16, 16], update=assets.invalidate_shader_cache)
# For material
bpy.types.NodeSocket.is_uniform = bpy.props.BoolProperty(name="Is Uniform", description="Mark node sockets to be processed as material uniforms", default=False)
bpy.types.NodeTree.is_cached = bpy.props.BoolProperty(name="Node Tree Cached", description="No need to reexport node tree", default=False)

View file

@ -142,9 +142,7 @@ def set_preset(self, context, preset):
cam.rp_stereo = False
cam.rp_greasepencil = False
cam.rp_voxelgi = True
cam.rp_voxelgi_resolution[0] = 256
cam.rp_voxelgi_resolution[1] = 256
cam.rp_voxelgi_resolution[2] = 256
cam.rp_voxelgi_resolution = '256'
cam.rp_render_to_texture = True
cam.rp_supersampling = '2'
cam.rp_antialiasing = 'TAA'
@ -339,7 +337,10 @@ class GenRPDataPropsPanel(bpy.types.Panel):
row = layout.row()
row.prop(wrd, 'voxelgi_revoxelize')
row.prop(wrd, 'voxelgi_multibounce')
row.prop(dat, 'rp_voxelgi_hdr')
row = layout.row()
row.prop(wrd, 'voxelgi_camera')
row.prop(wrd, 'voxelgi_anisotropic')
layout.prop(dat, 'rp_voxelgi_hdr')
layout.separator()
layout.prop(dat, "rp_render_to_texture")

View file

@ -371,8 +371,8 @@ const float compoDOFLength = 160.0;
if bpy.data.cameras[0].rp_voxelgi:
f.write(
"""const vec3 voxelgiResolution = ivec3(""" + str(round(bpy.data.cameras[0].rp_voxelgi_resolution[0])) + """, """ + str(round(bpy.data.cameras[0].rp_voxelgi_resolution[1])) + """, """ + str(round(bpy.data.cameras[0].rp_voxelgi_resolution[2])) + """);
const vec3 voxelgiDimensions = ivec3(""" + str(round(wrd.generate_voxelgi_dimensions[0])) + """, """ + str(round(wrd.generate_voxelgi_dimensions[1])) + """, """ + str(round(wrd.generate_voxelgi_dimensions[2])) + """);
"""const float voxelgiResolution = """ + str(bpy.data.cameras[0].rp_voxelgi_resolution) + """;
const float voxelgiDimensions = """ + str(round(wrd.generate_voxelgi_dimensions)) + """;
const float voxelgiDiff = """ + str(round(wrd.voxelgi_diff * 100) / 100) + """;
const float voxelgiSpec = """ + str(round(wrd.voxelgi_spec * 100) / 100) + """;
const float voxelgiOcc = """ + str(round(wrd.voxelgi_occ * 100) / 100) + """;