Replace render tree bvh with version from 3.x

Some modifications of the collision routines to allow it to work more efficiently with the template callback mechanism of the old dynamic_bvh API.
This commit is contained in:
lawnjelly 2021-06-10 07:24:46 +01:00
parent 8473062dcc
commit 29805e797d
10 changed files with 212 additions and 556 deletions

View file

@ -48,7 +48,7 @@
#include "bvh_tree.h"
#define BVHTREE_CLASS BVH_Tree<T, 2, MAX_ITEMS, USE_PAIRS, Bounds, Point>
#define BVHTREE_CLASS BVH_Tree<T, 2, MAX_ITEMS, USE_PAIRS, false, Bounds, Point>
template <class T, bool USE_PAIRS = false, int MAX_ITEMS = 32, class Bounds = AABB, class Point = Vector3>
class BVH_Manager {

View file

@ -1,534 +0,0 @@
public:
// cull parameters is a convenient way of passing a bunch
// of arguments through the culling functions without
// writing loads of code. Not all members are used for some cull checks
struct CullParams {
int result_count_overall; // both trees
int result_count; // this tree only
int result_max;
T **result_array;
int *subindex_array;
// nobody truly understands how masks are intended to work.
uint32_t mask;
uint32_t pairable_type;
// optional components for different tests
Point point;
BVHABB_CLASS abb;
typename BVHABB_CLASS::ConvexHull hull;
typename BVHABB_CLASS::Segment segment;
// when collision testing, non pairable moving items
// only need to be tested against the pairable tree.
// collisions with other non pairable items are irrelevant.
bool test_pairable_only;
};
private:
void _cull_translate_hits(CullParams &p) {
int num_hits = _cull_hits.size();
int left = p.result_max - p.result_count_overall;
if (num_hits > left) {
num_hits = left;
}
int out_n = p.result_count_overall;
for (int n = 0; n < num_hits; n++) {
uint32_t ref_id = _cull_hits[n];
const ItemExtra &ex = _extra[ref_id];
p.result_array[out_n] = ex.userdata;
if (p.subindex_array) {
p.subindex_array[out_n] = ex.subindex;
}
out_n++;
}
p.result_count = num_hits;
p.result_count_overall += num_hits;
}
public:
int cull_convex(CullParams &r_params, bool p_translate_hits = true) {
_cull_hits.clear();
r_params.result_count = 0;
for (int n = 0; n < NUM_TREES; n++) {
if (_root_node_id[n] == BVHCommon::INVALID) {
continue;
}
_cull_convex_iterative(_root_node_id[n], r_params);
}
if (p_translate_hits) {
_cull_translate_hits(r_params);
}
return r_params.result_count;
}
int cull_segment(CullParams &r_params, bool p_translate_hits = true) {
_cull_hits.clear();
r_params.result_count = 0;
for (int n = 0; n < NUM_TREES; n++) {
if (_root_node_id[n] == BVHCommon::INVALID) {
continue;
}
_cull_segment_iterative(_root_node_id[n], r_params);
}
if (p_translate_hits) {
_cull_translate_hits(r_params);
}
return r_params.result_count;
}
int cull_point(CullParams &r_params, bool p_translate_hits = true) {
_cull_hits.clear();
r_params.result_count = 0;
for (int n = 0; n < NUM_TREES; n++) {
if (_root_node_id[n] == BVHCommon::INVALID) {
continue;
}
_cull_point_iterative(_root_node_id[n], r_params);
}
if (p_translate_hits) {
_cull_translate_hits(r_params);
}
return r_params.result_count;
}
int cull_aabb(CullParams &r_params, bool p_translate_hits = true) {
_cull_hits.clear();
r_params.result_count = 0;
for (int n = 0; n < NUM_TREES; n++) {
if (_root_node_id[n] == BVHCommon::INVALID) {
continue;
}
if ((n == 0) && r_params.test_pairable_only) {
continue;
}
_cull_aabb_iterative(_root_node_id[n], r_params);
}
if (p_translate_hits) {
_cull_translate_hits(r_params);
}
return r_params.result_count;
}
bool _cull_hits_full(const CullParams &p) {
// instead of checking every hit, we can do a lazy check for this condition.
// it isn't a problem if we write too much _cull_hits because they only the
// result_max amount will be translated and outputted. But we might as
// well stop our cull checks after the maximum has been reached.
return (int)_cull_hits.size() >= p.result_max;
}
// write this logic once for use in all routines
// double check this as a possible source of bugs in future.
bool _cull_pairing_mask_test_hit(uint32_t p_maskA, uint32_t p_typeA, uint32_t p_maskB, uint32_t p_typeB) const {
// double check this as a possible source of bugs in future.
bool A_match_B = p_maskA & p_typeB;
if (!A_match_B) {
bool B_match_A = p_maskB & p_typeA;
if (!B_match_A) {
return false;
}
}
return true;
}
void _cull_hit(uint32_t p_ref_id, CullParams &p) {
// take into account masks etc
// this would be more efficient to do before plane checks,
// but done here for ease to get started
if (USE_PAIRS) {
const ItemExtra &ex = _extra[p_ref_id];
if (!_cull_pairing_mask_test_hit(p.mask, p.pairable_type, ex.pairable_mask, ex.pairable_type)) {
return;
}
}
_cull_hits.push_back(p_ref_id);
}
bool _cull_segment_iterative(uint32_t p_node_id, CullParams &r_params) {
// our function parameters to keep on a stack
struct CullSegParams {
uint32_t node_id;
};
// most of the iterative functionality is contained in this helper class
BVH_IterativeInfo<CullSegParams> ii;
// alloca must allocate the stack from this function, it cannot be allocated in the
// helper class
ii.stack = (CullSegParams *)alloca(ii.get_alloca_stacksize());
// seed the stack
ii.get_first()->node_id = p_node_id;
CullSegParams csp;
// while there are still more nodes on the stack
while (ii.pop(csp)) {
TNode &tnode = _nodes[csp.node_id];
if (tnode.is_leaf()) {
// lazy check for hits full up condition
if (_cull_hits_full(r_params)) {
return false;
}
TLeaf &leaf = _node_get_leaf(tnode);
// test children individually
for (int n = 0; n < leaf.num_items; n++) {
const BVHABB_CLASS &aabb = leaf.get_aabb(n);
if (aabb.intersects_segment(r_params.segment)) {
uint32_t child_id = leaf.get_item_ref_id(n);
// register hit
_cull_hit(child_id, r_params);
}
}
} else {
// test children individually
for (int n = 0; n < tnode.num_children; n++) {
uint32_t child_id = tnode.children[n];
const BVHABB_CLASS &child_abb = _nodes[child_id].aabb;
if (child_abb.intersects_segment(r_params.segment)) {
// add to the stack
CullSegParams *child = ii.request();
child->node_id = child_id;
}
}
}
} // while more nodes to pop
// true indicates results are not full
return true;
}
bool _cull_point_iterative(uint32_t p_node_id, CullParams &r_params) {
// our function parameters to keep on a stack
struct CullPointParams {
uint32_t node_id;
};
// most of the iterative functionality is contained in this helper class
BVH_IterativeInfo<CullPointParams> ii;
// alloca must allocate the stack from this function, it cannot be allocated in the
// helper class
ii.stack = (CullPointParams *)alloca(ii.get_alloca_stacksize());
// seed the stack
ii.get_first()->node_id = p_node_id;
CullPointParams cpp;
// while there are still more nodes on the stack
while (ii.pop(cpp)) {
TNode &tnode = _nodes[cpp.node_id];
// no hit with this node?
if (!tnode.aabb.intersects_point(r_params.point)) {
continue;
}
if (tnode.is_leaf()) {
// lazy check for hits full up condition
if (_cull_hits_full(r_params)) {
return false;
}
TLeaf &leaf = _node_get_leaf(tnode);
// test children individually
for (int n = 0; n < leaf.num_items; n++) {
if (leaf.get_aabb(n).intersects_point(r_params.point)) {
uint32_t child_id = leaf.get_item_ref_id(n);
// register hit
_cull_hit(child_id, r_params);
}
}
} else {
// test children individually
for (int n = 0; n < tnode.num_children; n++) {
uint32_t child_id = tnode.children[n];
// add to the stack
CullPointParams *child = ii.request();
child->node_id = child_id;
}
}
} // while more nodes to pop
// true indicates results are not full
return true;
}
bool _cull_aabb_iterative(uint32_t p_node_id, CullParams &r_params, bool p_fully_within = false) {
// our function parameters to keep on a stack
struct CullAABBParams {
uint32_t node_id;
bool fully_within;
};
// most of the iterative functionality is contained in this helper class
BVH_IterativeInfo<CullAABBParams> ii;
// alloca must allocate the stack from this function, it cannot be allocated in the
// helper class
ii.stack = (CullAABBParams *)alloca(ii.get_alloca_stacksize());
// seed the stack
ii.get_first()->node_id = p_node_id;
ii.get_first()->fully_within = p_fully_within;
CullAABBParams cap;
// while there are still more nodes on the stack
while (ii.pop(cap)) {
TNode &tnode = _nodes[cap.node_id];
if (tnode.is_leaf()) {
// lazy check for hits full up condition
if (_cull_hits_full(r_params)) {
return false;
}
TLeaf &leaf = _node_get_leaf(tnode);
// if fully within we can just add all items
// as long as they pass mask checks
if (cap.fully_within) {
for (int n = 0; n < leaf.num_items; n++) {
uint32_t child_id = leaf.get_item_ref_id(n);
// register hit
_cull_hit(child_id, r_params);
}
} else {
for (int n = 0; n < leaf.num_items; n++) {
const BVHABB_CLASS &aabb = leaf.get_aabb(n);
if (aabb.intersects(r_params.abb)) {
uint32_t child_id = leaf.get_item_ref_id(n);
// register hit
_cull_hit(child_id, r_params);
}
}
} // not fully within
} else {
if (!cap.fully_within) {
// test children individually
for (int n = 0; n < tnode.num_children; n++) {
uint32_t child_id = tnode.children[n];
const BVHABB_CLASS &child_abb = _nodes[child_id].aabb;
if (child_abb.intersects(r_params.abb)) {
// is the node totally within the aabb?
bool fully_within = r_params.abb.is_other_within(child_abb);
// add to the stack
CullAABBParams *child = ii.request();
// should always return valid child
child->node_id = child_id;
child->fully_within = fully_within;
}
}
} else {
for (int n = 0; n < tnode.num_children; n++) {
uint32_t child_id = tnode.children[n];
// add to the stack
CullAABBParams *child = ii.request();
// should always return valid child
child->node_id = child_id;
child->fully_within = true;
}
}
}
} // while more nodes to pop
// true indicates results are not full
return true;
}
// returns full up with results
bool _cull_convex_iterative(uint32_t p_node_id, CullParams &r_params, bool p_fully_within = false) {
// our function parameters to keep on a stack
struct CullConvexParams {
uint32_t node_id;
bool fully_within;
};
// most of the iterative functionality is contained in this helper class
BVH_IterativeInfo<CullConvexParams> ii;
// alloca must allocate the stack from this function, it cannot be allocated in the
// helper class
ii.stack = (CullConvexParams *)alloca(ii.get_alloca_stacksize());
// seed the stack
ii.get_first()->node_id = p_node_id;
ii.get_first()->fully_within = p_fully_within;
// preallocate these as a once off to be reused
uint32_t max_planes = r_params.hull.num_planes;
uint32_t *plane_ids = (uint32_t *)alloca(sizeof(uint32_t) * max_planes);
CullConvexParams ccp;
// while there are still more nodes on the stack
while (ii.pop(ccp)) {
const TNode &tnode = _nodes[ccp.node_id];
if (!ccp.fully_within) {
typename BVHABB_CLASS::IntersectResult res = tnode.aabb.intersects_convex(r_params.hull);
switch (res) {
default: {
continue; // miss, just move on to the next node in the stack
} break;
case BVHABB_CLASS::IR_PARTIAL: {
} break;
case BVHABB_CLASS::IR_FULL: {
ccp.fully_within = true;
} break;
}
} // if not fully within already
if (tnode.is_leaf()) {
// lazy check for hits full up condition
if (_cull_hits_full(r_params)) {
return false;
}
const TLeaf &leaf = _node_get_leaf(tnode);
// if fully within, simply add all items to the result
// (taking into account masks)
if (ccp.fully_within) {
for (int n = 0; n < leaf.num_items; n++) {
uint32_t child_id = leaf.get_item_ref_id(n);
// register hit
_cull_hit(child_id, r_params);
}
} else {
// we can either use a naive check of all the planes against the AABB,
// or an optimized check, which finds in advance which of the planes can possibly
// cut the AABB, and only tests those. This can be much faster.
#define BVH_CONVEX_CULL_OPTIMIZED
#ifdef BVH_CONVEX_CULL_OPTIMIZED
// first find which planes cut the aabb
uint32_t num_planes = tnode.aabb.find_cutting_planes(r_params.hull, plane_ids);
BVH_ASSERT(num_planes <= max_planes);
//#define BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
#ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
// rigorous check
uint32_t results[MAX_ITEMS];
uint32_t num_results = 0;
#endif
// test children individually
for (int n = 0; n < leaf.num_items; n++) {
//const Item &item = leaf.get_item(n);
const BVHABB_CLASS &aabb = leaf.get_aabb(n);
if (aabb.intersects_convex_optimized(r_params.hull, plane_ids, num_planes)) {
uint32_t child_id = leaf.get_item_ref_id(n);
#ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
results[num_results++] = child_id;
#endif
// register hit
_cull_hit(child_id, r_params);
}
}
#ifdef BVH_CONVEX_CULL_OPTIMIZED_RIGOR_CHECK
uint32_t test_count = 0;
for (int n = 0; n < leaf.num_items; n++) {
const BVHABB_CLASS &aabb = leaf.get_aabb(n);
if (aabb.intersects_convex_partial(r_params.hull)) {
uint32_t child_id = leaf.get_item_ref_id(n);
CRASH_COND(child_id != results[test_count++]);
CRASH_COND(test_count > num_results);
}
}
#endif
#else
// not BVH_CONVEX_CULL_OPTIMIZED
// test children individually
for (int n = 0; n < leaf.num_items; n++) {
const BVHABB_CLASS &aabb = leaf.get_aabb(n);
if (aabb.intersects_convex_partial(r_params.hull)) {
uint32_t child_id = leaf.get_item_ref_id(n);
// full up with results? exit early, no point in further testing
if (!_cull_hit(child_id, r_params))
return false;
}
}
#endif // BVH_CONVEX_CULL_OPTIMIZED
} // if not fully within
} else {
for (int n = 0; n < tnode.num_children; n++) {
uint32_t child_id = tnode.children[n];
// add to the stack
CullConvexParams *child = ii.request();
// should always return valid child
child->node_id = child_id;
child->fully_within = ccp.fully_within;
}
}
} // while more nodes to pop
// true indicates results are not full
return true;
}

View file

@ -421,3 +421,31 @@ void update() {
}
#endif
}
void clear() {
_refs.clear();
_extra.clear();
_pairs.clear();
_nodes.clear();
_leaves.clear();
_active_refs.clear();
_current_active_ref = 0;
_cull_hits.clear();
for (int n = 0; n < NUM_TREES; n++) {
_root_node_id[n] = BVHCommon::INVALID;
}
// disallow zero leaf ids
// (as these ids are stored as negative numbers in the node)
uint32_t dummy_leaf_id;
_leaves.request(dummy_leaf_id);
}
bool is_empty() const {
for (int n = 0; n < NUM_TREES; n++) {
if (_root_node_id[n] != BVHCommon::INVALID)
return false;
}
return true;
}

157
core/math/bvh_simple.h Normal file
View file

@ -0,0 +1,157 @@
/*************************************************************************/
/* bvh_simple.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2021 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2021 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#ifndef BVH_SIMPLE_H
#define BVH_SIMPLE_H
#include "bvh_tree.h"
#include "dynamic_bvh.h"
#define GODOT_BVH_SIMPLE_TREE_CLASS BVH_Tree<T, 2, 256, false, true>
template <class T>
class BVH_SimpleT {
public:
struct ID {
ID() { _handle.set_invalid(); }
BVHHandle _handle;
public:
bool is_valid() const { return !_handle.is_invalid(); }
};
ID insert(const AABB &p_box, T *p_userdata) {
ID id;
id._handle = _tree.item_add(p_userdata, true, p_box, 0, false, 0, 0);
return id;
}
bool update(const ID &p_id, const AABB &p_box) {
return _tree.item_move(p_id._handle, p_box);
}
void remove(const ID &p_id) {
_tree.item_remove(p_id._handle);
}
void set_index(uint32_t p_index) { _index = p_index; }
uint32_t get_index() const { return _index; }
void clear() { _tree.clear(); }
bool is_empty() const { return _tree.is_empty(); }
void optimize_incremental(int passes) {
if (passes) {
_tree.incremental_optimize();
}
}
/* Discouraged, but works as a reference on how it must be used */
struct DefaultQueryResult {
virtual bool operator()(T *p_data) = 0; //return true whether you want to continue the query
virtual ~DefaultQueryResult() {}
};
template <class QueryResult>
_FORCE_INLINE_ void aabb_query(const AABB &p_aabb, QueryResult &r_result) {
typename GODOT_BVH_SIMPLE_TREE_CLASS::CullParams params;
params.result_count_overall = 0;
params.result_max = 0;
params.result_array = nullptr;
params.subindex_array = nullptr;
params.mask = 0;
params.pairable_type = 0;
params.test_pairable_only = false;
params.abb.from(p_aabb);
_tree.cull_aabb(params, false, &r_result);
}
template <class QueryResult>
_FORCE_INLINE_ void convex_query(const Plane *p_planes, int p_plane_count, const Vector3 *p_points, int p_point_count, QueryResult &r_result) {
if (!p_plane_count) {
return;
}
Vector<Vector3> convex_points = Geometry3D::compute_convex_mesh_points(p_planes, p_plane_count);
if (convex_points.size() == 0) {
return;
}
typename GODOT_BVH_SIMPLE_TREE_CLASS::CullParams params;
params.result_count_overall = 0;
params.result_max = 0;
params.result_array = nullptr;
params.subindex_array = nullptr;
params.mask = 0;
params.pairable_type = 0;
params.hull.planes = p_planes;
params.hull.num_planes = p_plane_count;
params.hull.points = &convex_points[0];
params.hull.num_points = convex_points.size();
_tree.cull_convex(params, false, &r_result);
}
template <class QueryResult>
_FORCE_INLINE_ void ray_query(const Vector3 &p_from, const Vector3 &p_to, QueryResult &r_result) {
typename GODOT_BVH_SIMPLE_TREE_CLASS::CullParams params;
params.result_count_overall = 0;
params.result_max = 0;
params.result_array = nullptr;
params.subindex_array = nullptr;
params.mask = 0;
params.pairable_type = 0;
params.segment.from = p_from;
params.segment.to = p_to;
_tree.cull_segment(params, false, &r_result);
}
private:
GODOT_BVH_SIMPLE_TREE_CLASS _tree;
uint32_t _index = 0;
};
// Define this if you want to compare the performance of the old dynamic BVH.
// There is no runtime switching, only this compile time switching...
//#define GODOT_USE_OLD_DYNAMIC_BVH
#ifdef GODOT_USE_OLD_DYNAMIC_BVH
class BVH_Simple : public DynamicBVH {
};
#else
class BVH_Simple : public BVH_SimpleT<void> {
};
#endif
#endif

View file

@ -15,10 +15,10 @@ struct ItemRef {
// as this is less used as keeps cache better
struct ItemExtra {
uint32_t last_updated_tick;
uint32_t pairable;
uint32_t pairable_mask;
uint32_t pairable_type;
int32_t subindex;
// the active reference is a separate list of which references

View file

@ -148,7 +148,7 @@ public:
}
};
template <class T, int MAX_CHILDREN, int MAX_ITEMS, bool USE_PAIRS = false, class Bounds = AABB, class Point = Vector3>
template <class T, int MAX_CHILDREN, int MAX_ITEMS, bool USE_PAIRS = false, bool USE_SIMPLE = false, class Bounds = AABB, class Point = Vector3>
class BVH_Tree {
friend class BVH;
@ -157,14 +157,7 @@ class BVH_Tree {
public:
BVH_Tree() {
for (int n = 0; n < NUM_TREES; n++) {
_root_node_id[n] = BVHCommon::INVALID;
}
// disallow zero leaf ids
// (as these ids are stored as negative numbers in the node)
uint32_t dummy_leaf_id;
_leaves.request(dummy_leaf_id);
clear();
}
private:

View file

@ -92,6 +92,18 @@ public:
freelist.push_back(p_id);
_used_size--;
}
void clear() {
list.clear();
freelist.clear();
_used_size = 0;
}
void reset() {
list.reset();
freelist.reset();
_used_size = 0;
}
};
#endif // POOLED_LIST_H

View file

@ -35,7 +35,7 @@
#include "godot_collision_object_3d.h"
#include "core/math/aabb.h"
#include "core/math/dynamic_bvh.h"
#include "core/math/bvh_simple.h"
#include "core/math/vector3.h"
#include "core/templates/local_vector.h"
#include "core/templates/set.h"
@ -56,7 +56,7 @@ class GodotSoftBody3D : public GodotCollisionObject3D {
Vector3 n; // Normal
real_t area = 0.0; // Area
real_t im = 0.0; // 1/mass
DynamicBVH::ID leaf; // Leaf data
BVH_Simple::ID leaf; // Leaf data
uint32_t index = 0;
};
@ -74,7 +74,7 @@ class GodotSoftBody3D : public GodotCollisionObject3D {
Node *n[3] = { nullptr, nullptr, nullptr }; // Node pointers
Vector3 normal; // Normal
real_t ra = 0.0; // Rest area
DynamicBVH::ID leaf; // Leaf data
BVH_Simple::ID leaf; // Leaf data
uint32_t index = 0;
};
@ -82,8 +82,8 @@ class GodotSoftBody3D : public GodotCollisionObject3D {
LocalVector<Link> links;
LocalVector<Face> faces;
DynamicBVH node_tree;
DynamicBVH face_tree;
BVH_Simple node_tree;
BVH_Simple face_tree;
LocalVector<uint32_t> map_visual_to_physics;

View file

@ -1707,7 +1707,7 @@ void RendererSceneCull::_unpair_instance(Instance *p_instance) {
p_instance->scenario->indexers[Scenario::INDEXER_VOLUMES].remove(p_instance->indexer_id);
}
p_instance->indexer_id = DynamicBVH::ID();
p_instance->indexer_id = BVH_Simple::ID();
//replace this by last
int32_t swap_with_index = p_instance->scenario->instance_data.size() - 1;

View file

@ -35,7 +35,7 @@
#include "core/templates/pass_func.h"
#include "servers/rendering/renderer_compositor.h"
#include "core/math/dynamic_bvh.h"
#include "core/math/bvh_simple.h"
#include "core/math/geometry_3d.h"
#include "core/math/octree.h"
#include "core/os/semaphore.h"
@ -314,7 +314,7 @@ public:
INDEXER_MAX
};
DynamicBVH indexers[INDEXER_MAX];
BVH_Simple indexers[INDEXER_MAX];
RID self;
@ -433,7 +433,7 @@ public:
RID self;
//scenario stuff
DynamicBVH::ID indexer_id;
BVH_Simple::ID indexer_id;
int32_t array_index;
int32_t visibility_index = -1;
float visibility_range_begin;
@ -717,8 +717,8 @@ public:
Instance *instance = nullptr;
PagedAllocator<InstancePair> *pair_allocator = nullptr;
SelfList<InstancePair>::List pairs_found;
DynamicBVH *bvh = nullptr;
DynamicBVH *bvh2 = nullptr; //some may need to cull in two
BVH_Simple *bvh = nullptr;
BVH_Simple *bvh2 = nullptr; //some may need to cull in two
uint32_t pair_mask;
uint64_t pair_pass;