Bullet Collision Detection & Physics Library
btQuantizedBvh.cpp
Go to the documentation of this file.
1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
4 
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
10 
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
14 */
15 
16 #include "btQuantizedBvh.h"
17 
18 #include "LinearMath/btAabbUtil2.h"
21 
22 #define RAYAABB2
23 
25  m_bulletVersion(BT_BULLET_VERSION),
26  m_useQuantization(false),
27  //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
28  m_traversalMode(TRAVERSAL_STACKLESS)
29  //m_traversalMode(TRAVERSAL_RECURSIVE)
30  ,m_subtreeHeaderCount(0) //PCK: add this line
31 {
34 }
35 
36 
37 
38 
39 
41 {
43  m_useQuantization = true;
44  int numLeafNodes = 0;
45 
47  {
48  //now we have an array of leafnodes in m_leafNodes
49  numLeafNodes = m_quantizedLeafNodes.size();
50 
51  m_quantizedContiguousNodes.resize(2*numLeafNodes);
52 
53  }
54 
55  m_curNodeIndex = 0;
56 
57  buildTree(0,numLeafNodes);
58 
61  {
64  subtree.m_rootNodeIndex = 0;
65  subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
66  }
67 
68  //PCK: update the copy of the size
70 
71  //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
74 }
75 
76 
77 
79 #ifdef DEBUG_PATCH_COLORS
80 btVector3 color[4]=
81 {
82  btVector3(1,0,0),
83  btVector3(0,1,0),
84  btVector3(0,0,1),
85  btVector3(0,1,1)
86 };
87 #endif //DEBUG_PATCH_COLORS
88 
89 
90 
91 void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin)
92 {
93  //enlarge the AABB to avoid division by zero when initializing the quantization values
94  btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin);
95  m_bvhAabbMin = bvhAabbMin - clampValue;
96  m_bvhAabbMax = bvhAabbMax + clampValue;
97  btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
98  m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
99 
100  m_useQuantization = true;
101 
102  {
103  unsigned short vecIn[3];
104  btVector3 v;
105  {
106  quantize(vecIn,m_bvhAabbMin,false);
107  v = unQuantize(vecIn);
108  m_bvhAabbMin.setMin(v-clampValue);
109  }
110  aabbSize = m_bvhAabbMax - m_bvhAabbMin;
111  m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
112  {
113  quantize(vecIn,m_bvhAabbMax,true);
114  v = unQuantize(vecIn);
115  m_bvhAabbMax.setMax(v+clampValue);
116  }
117  aabbSize = m_bvhAabbMax - m_bvhAabbMin;
118  m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
119  }
120 }
121 
122 
123 
124 
126 {
127 }
128 
129 #ifdef DEBUG_TREE_BUILDING
130 int gStackDepth = 0;
131 int gMaxStackDepth = 0;
132 #endif //DEBUG_TREE_BUILDING
133 
134 void btQuantizedBvh::buildTree (int startIndex,int endIndex)
135 {
136 #ifdef DEBUG_TREE_BUILDING
137  gStackDepth++;
138  if (gStackDepth > gMaxStackDepth)
139  gMaxStackDepth = gStackDepth;
140 #endif //DEBUG_TREE_BUILDING
141 
142 
143  int splitAxis, splitIndex, i;
144  int numIndices =endIndex-startIndex;
145  int curIndex = m_curNodeIndex;
146 
147  btAssert(numIndices>0);
148 
149  if (numIndices==1)
150  {
151 #ifdef DEBUG_TREE_BUILDING
152  gStackDepth--;
153 #endif //DEBUG_TREE_BUILDING
154 
156 
157  m_curNodeIndex++;
158  return;
159  }
160  //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
161 
162  splitAxis = calcSplittingAxis(startIndex,endIndex);
163 
164  splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis);
165 
166  int internalNodeIndex = m_curNodeIndex;
167 
168  //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
169  //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
170  setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
171  setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
172 
173 
174  for (i=startIndex;i<endIndex;i++)
175  {
177  }
178 
179  m_curNodeIndex++;
180 
181 
182  //internalNode->m_escapeIndex;
183 
184  int leftChildNodexIndex = m_curNodeIndex;
185 
186  //build left child tree
187  buildTree(startIndex,splitIndex);
188 
189  int rightChildNodexIndex = m_curNodeIndex;
190  //build right child tree
191  buildTree(splitIndex,endIndex);
192 
193 #ifdef DEBUG_TREE_BUILDING
194  gStackDepth--;
195 #endif //DEBUG_TREE_BUILDING
196 
197  int escapeIndex = m_curNodeIndex - curIndex;
198 
199  if (m_useQuantization)
200  {
201  //escapeIndex is the number of nodes of this subtree
202  const int sizeQuantizedNode =sizeof(btQuantizedBvhNode);
203  const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
204  if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
205  {
206  updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex);
207  }
208  } else
209  {
210 
211  }
212 
213  setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex);
214 
215 }
216 
217 void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)
218 {
220 
221  btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
222  int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
223  int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
224 
225  btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
226  int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
227  int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
228 
229  if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
230  {
232  subtree.setAabbFromQuantizeNode(leftChildNode);
233  subtree.m_rootNodeIndex = leftChildNodexIndex;
234  subtree.m_subtreeSize = leftSubTreeSize;
235  }
236 
237  if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
238  {
240  subtree.setAabbFromQuantizeNode(rightChildNode);
241  subtree.m_rootNodeIndex = rightChildNodexIndex;
242  subtree.m_subtreeSize = rightSubTreeSize;
243  }
244 
245  //PCK: update the copy of the size
247 }
248 
249 
250 int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)
251 {
252  int i;
253  int splitIndex =startIndex;
254  int numIndices = endIndex - startIndex;
255  btScalar splitValue;
256 
257  btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
258  for (i=startIndex;i<endIndex;i++)
259  {
260  btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
261  means+=center;
262  }
263  means *= (btScalar(1.)/(btScalar)numIndices);
264 
265  splitValue = means[splitAxis];
266 
267  //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
268  for (i=startIndex;i<endIndex;i++)
269  {
270  btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
271  if (center[splitAxis] > splitValue)
272  {
273  //swap
274  swapLeafNodes(i,splitIndex);
275  splitIndex++;
276  }
277  }
278 
279  //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
280  //otherwise the tree-building might fail due to stack-overflows in certain cases.
281  //unbalanced1 is unsafe: it can cause stack overflows
282  //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
283 
284  //unbalanced2 should work too: always use center (perfect balanced trees)
285  //bool unbalanced2 = true;
286 
287  //this should be safe too:
288  int rangeBalancedIndices = numIndices/3;
289  bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices)));
290 
291  if (unbalanced)
292  {
293  splitIndex = startIndex+ (numIndices>>1);
294  }
295 
296  bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex));
297  (void)unbal;
298  btAssert(!unbal);
299 
300  return splitIndex;
301 }
302 
303 
304 int btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex)
305 {
306  int i;
307 
308  btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
309  btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.));
310  int numIndices = endIndex-startIndex;
311 
312  for (i=startIndex;i<endIndex;i++)
313  {
314  btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
315  means+=center;
316  }
317  means *= (btScalar(1.)/(btScalar)numIndices);
318 
319  for (i=startIndex;i<endIndex;i++)
320  {
321  btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
322  btVector3 diff2 = center-means;
323  diff2 = diff2 * diff2;
324  variance += diff2;
325  }
326  variance *= (btScalar(1.)/ ((btScalar)numIndices-1) );
327 
328  return variance.maxAxis();
329 }
330 
331 
332 
333 void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
334 {
335  //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
336 
337  if (m_useQuantization)
338  {
340  unsigned short int quantizedQueryAabbMin[3];
341  unsigned short int quantizedQueryAabbMax[3];
342  quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0);
343  quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1);
344 
345  switch (m_traversalMode)
346  {
347  case TRAVERSAL_STACKLESS:
348  walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex);
349  break;
351  walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
352  break;
353  case TRAVERSAL_RECURSIVE:
354  {
355  const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
356  walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
357  }
358  break;
359  default:
360  //unsupported
361  btAssert(0);
362  }
363  } else
364  {
365  walkStacklessTree(nodeCallback,aabbMin,aabbMax);
366  }
367 }
368 
369 
371 
372 
373 void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
374 {
376 
377  const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
378  int escapeIndex, curIndex = 0;
379  int walkIterations = 0;
380  bool isLeafNode;
381  //PCK: unsigned instead of bool
382  unsigned aabbOverlap;
383 
384  while (curIndex < m_curNodeIndex)
385  {
386  //catch bugs in tree data
387  btAssert (walkIterations < m_curNodeIndex);
388 
389  walkIterations++;
390  aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
391  isLeafNode = rootNode->m_escapeIndex == -1;
392 
393  //PCK: unsigned instead of bool
394  if (isLeafNode && (aabbOverlap != 0))
395  {
396  nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
397  }
398 
399  //PCK: unsigned instead of bool
400  if ((aabbOverlap != 0) || isLeafNode)
401  {
402  rootNode++;
403  curIndex++;
404  } else
405  {
406  escapeIndex = rootNode->m_escapeIndex;
407  rootNode += escapeIndex;
408  curIndex += escapeIndex;
409  }
410  }
411  if (maxIterations < walkIterations)
412  maxIterations = walkIterations;
413 
414 }
415 
416 /*
418 void btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
419 {
420  bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
421  if (aabbOverlap)
422  {
423  isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
424  if (isLeafNode)
425  {
426  nodeCallback->processNode(rootNode);
427  } else
428  {
429  walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
430  walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
431  }
432  }
433 
434 }
435 */
436 
437 void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
438 {
440 
441  bool isLeafNode;
442  //PCK: unsigned instead of bool
443  unsigned aabbOverlap;
444 
445  //PCK: unsigned instead of bool
446  aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax);
447  isLeafNode = currentNode->isLeafNode();
448 
449  //PCK: unsigned instead of bool
450  if (aabbOverlap != 0)
451  {
452  if (isLeafNode)
453  {
454  nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex());
455  } else
456  {
457  //process left and right children
458  const btQuantizedBvhNode* leftChildNode = currentNode+1;
459  walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
460 
461  const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex();
462  walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
463  }
464  }
465 }
466 
467 
468 
469 void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
470 {
472 
473  const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
474  int escapeIndex, curIndex = 0;
475  int walkIterations = 0;
476  bool isLeafNode;
477  //PCK: unsigned instead of bool
478  unsigned aabbOverlap=0;
479  unsigned rayBoxOverlap=0;
480  btScalar lambda_max = 1.0;
481 
482  /* Quick pruning by quantized box */
483  btVector3 rayAabbMin = raySource;
484  btVector3 rayAabbMax = raySource;
485  rayAabbMin.setMin(rayTarget);
486  rayAabbMax.setMax(rayTarget);
487 
488  /* Add box cast extents to bounding box */
489  rayAabbMin += aabbMin;
490  rayAabbMax += aabbMax;
491 
492 #ifdef RAYAABB2
493  btVector3 rayDir = (rayTarget-raySource);
494  rayDir.normalize ();
495  lambda_max = rayDir.dot(rayTarget-raySource);
497  btVector3 rayDirectionInverse;
498  rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
499  rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
500  rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
501  unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
502 #endif
503 
504  btVector3 bounds[2];
505 
506  while (curIndex < m_curNodeIndex)
507  {
508  btScalar param = 1.0;
509  //catch bugs in tree data
510  btAssert (walkIterations < m_curNodeIndex);
511 
512  walkIterations++;
513 
514  bounds[0] = rootNode->m_aabbMinOrg;
515  bounds[1] = rootNode->m_aabbMaxOrg;
516  /* Add box cast extents */
517  bounds[0] -= aabbMax;
518  bounds[1] -= aabbMin;
519 
520  aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
521  //perhaps profile if it is worth doing the aabbOverlap test first
522 
523 #ifdef RAYAABB2
524  rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
528 
529 #else
530  btVector3 normal;
531  rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal);
532 #endif
533 
534  isLeafNode = rootNode->m_escapeIndex == -1;
535 
536  //PCK: unsigned instead of bool
537  if (isLeafNode && (rayBoxOverlap != 0))
538  {
539  nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
540  }
541 
542  //PCK: unsigned instead of bool
543  if ((rayBoxOverlap != 0) || isLeafNode)
544  {
545  rootNode++;
546  curIndex++;
547  } else
548  {
549  escapeIndex = rootNode->m_escapeIndex;
550  rootNode += escapeIndex;
551  curIndex += escapeIndex;
552  }
553  }
554  if (maxIterations < walkIterations)
555  maxIterations = walkIterations;
556 
557 }
558 
559 
560 
561 void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
562 {
564 
565  int curIndex = startNodeIndex;
566  int walkIterations = 0;
567  int subTreeSize = endNodeIndex - startNodeIndex;
568  (void)subTreeSize;
569 
570  const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
571  int escapeIndex;
572 
573  bool isLeafNode;
574  //PCK: unsigned instead of bool
575  unsigned boxBoxOverlap = 0;
576  unsigned rayBoxOverlap = 0;
577 
578  btScalar lambda_max = 1.0;
579 
580 #ifdef RAYAABB2
581  btVector3 rayDirection = (rayTarget-raySource);
582  rayDirection.normalize ();
583  lambda_max = rayDirection.dot(rayTarget-raySource);
585  rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0];
586  rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1];
587  rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2];
588  unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
589 #endif
590 
591  /* Quick pruning by quantized box */
592  btVector3 rayAabbMin = raySource;
593  btVector3 rayAabbMax = raySource;
594  rayAabbMin.setMin(rayTarget);
595  rayAabbMax.setMax(rayTarget);
596 
597  /* Add box cast extents to bounding box */
598  rayAabbMin += aabbMin;
599  rayAabbMax += aabbMax;
600 
601  unsigned short int quantizedQueryAabbMin[3];
602  unsigned short int quantizedQueryAabbMax[3];
603  quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0);
604  quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1);
605 
606  while (curIndex < endNodeIndex)
607  {
608 
609 //#define VISUALLY_ANALYZE_BVH 1
610 #ifdef VISUALLY_ANALYZE_BVH
611  //some code snippet to debugDraw aabb, to visually analyze bvh structure
612  static int drawPatch = 0;
613  //need some global access to a debugDrawer
614  extern btIDebugDraw* debugDrawerPtr;
615  if (curIndex==drawPatch)
616  {
617  btVector3 aabbMin,aabbMax;
618  aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
619  aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
620  btVector3 color(1,0,0);
621  debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
622  }
623 #endif//VISUALLY_ANALYZE_BVH
624 
625  //catch bugs in tree data
626  btAssert (walkIterations < subTreeSize);
627 
628  walkIterations++;
629  //PCK: unsigned instead of bool
630  // only interested if this is closer than any previous hit
631  btScalar param = 1.0;
632  rayBoxOverlap = 0;
633  boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
634  isLeafNode = rootNode->isLeafNode();
635  if (boxBoxOverlap)
636  {
637  btVector3 bounds[2];
638  bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
639  bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
640  /* Add box cast extents */
641  bounds[0] -= aabbMax;
642  bounds[1] -= aabbMin;
643  btVector3 normal;
644 #if 0
645  bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
646  bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
647  if (ra2 != ra)
648  {
649  printf("functions don't match\n");
650  }
651 #endif
652 #ifdef RAYAABB2
653 
657  //BT_PROFILE("btRayAabb2");
658  rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
659 
660 #else
661  rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
662 #endif
663  }
664 
665  if (isLeafNode && rayBoxOverlap)
666  {
667  nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
668  }
669 
670  //PCK: unsigned instead of bool
671  if ((rayBoxOverlap != 0) || isLeafNode)
672  {
673  rootNode++;
674  curIndex++;
675  } else
676  {
677  escapeIndex = rootNode->getEscapeIndex();
678  rootNode += escapeIndex;
679  curIndex += escapeIndex;
680  }
681  }
682  if (maxIterations < walkIterations)
683  maxIterations = walkIterations;
684 
685 }
686 
687 void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const
688 {
690 
691  int curIndex = startNodeIndex;
692  int walkIterations = 0;
693  int subTreeSize = endNodeIndex - startNodeIndex;
694  (void)subTreeSize;
695 
696  const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
697  int escapeIndex;
698 
699  bool isLeafNode;
700  //PCK: unsigned instead of bool
701  unsigned aabbOverlap;
702 
703  while (curIndex < endNodeIndex)
704  {
705 
706 //#define VISUALLY_ANALYZE_BVH 1
707 #ifdef VISUALLY_ANALYZE_BVH
708  //some code snippet to debugDraw aabb, to visually analyze bvh structure
709  static int drawPatch = 0;
710  //need some global access to a debugDrawer
711  extern btIDebugDraw* debugDrawerPtr;
712  if (curIndex==drawPatch)
713  {
714  btVector3 aabbMin,aabbMax;
715  aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
716  aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
717  btVector3 color(1,0,0);
718  debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
719  }
720 #endif//VISUALLY_ANALYZE_BVH
721 
722  //catch bugs in tree data
723  btAssert (walkIterations < subTreeSize);
724 
725  walkIterations++;
726  //PCK: unsigned instead of bool
727  aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
728  isLeafNode = rootNode->isLeafNode();
729 
730  if (isLeafNode && aabbOverlap)
731  {
732  nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
733  }
734 
735  //PCK: unsigned instead of bool
736  if ((aabbOverlap != 0) || isLeafNode)
737  {
738  rootNode++;
739  curIndex++;
740  } else
741  {
742  escapeIndex = rootNode->getEscapeIndex();
743  rootNode += escapeIndex;
744  curIndex += escapeIndex;
745  }
746  }
747  if (maxIterations < walkIterations)
748  maxIterations = walkIterations;
749 
750 }
751 
752 //This traversal can be called from Playstation 3 SPU
753 void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
754 {
756 
757  int i;
758 
759 
760  for (i=0;i<this->m_SubtreeHeaders.size();i++)
761  {
762  const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
763 
764  //PCK: unsigned instead of bool
765  unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
766  if (overlap != 0)
767  {
768  walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,
769  subtree.m_rootNodeIndex,
770  subtree.m_rootNodeIndex+subtree.m_subtreeSize);
771  }
772  }
773 }
774 
775 
776 void btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
777 {
778  reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0));
779 }
780 
781 
782 void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const
783 {
784  //always use stackless
785 
786  if (m_useQuantization)
787  {
788  walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
789  }
790  else
791  {
792  walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
793  }
794  /*
795  {
796  //recursive traversal
797  btVector3 qaabbMin = raySource;
798  btVector3 qaabbMax = raySource;
799  qaabbMin.setMin(rayTarget);
800  qaabbMax.setMax(rayTarget);
801  qaabbMin += aabbMin;
802  qaabbMax += aabbMax;
803  reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
804  }
805  */
806 
807 }
808 
809 
810 void btQuantizedBvh::swapLeafNodes(int i,int splitIndex)
811 {
812  if (m_useQuantization)
813  {
816  m_quantizedLeafNodes[splitIndex] = tmp;
817  } else
818  {
820  m_leafNodes[i] = m_leafNodes[splitIndex];
821  m_leafNodes[splitIndex] = tmp;
822  }
823 }
824 
825 void btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)
826 {
827  if (m_useQuantization)
828  {
829  m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
830  } else
831  {
832  m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
833  }
834 }
835 
836 //PCK: include
837 #include <new>
838 
839 #if 0
840 //PCK: consts
841 static const unsigned BVH_ALIGNMENT = 16;
842 static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
843 
844 static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
845 #endif
846 
847 
849 {
850  // I changed this to 0 since the extra padding is not needed or used.
851  return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
852 }
853 
855 {
856  unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
857  baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
858  if (m_useQuantization)
859  {
860  return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
861  }
862  return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
863 }
864 
865 bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
866 {
869 
870 /* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
871  {
873  btAssert(0);
874  return false;
875  }
876 */
877 
878  btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer;
879 
880  // construct the class so the virtual function table, etc will be set up
881  // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
882  new (targetBvh) btQuantizedBvh;
883 
884  if (i_swapEndian)
885  {
886  targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
887 
888 
892 
894  targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
895  }
896  else
897  {
898  targetBvh->m_curNodeIndex = m_curNodeIndex;
899  targetBvh->m_bvhAabbMin = m_bvhAabbMin;
900  targetBvh->m_bvhAabbMax = m_bvhAabbMax;
902  targetBvh->m_traversalMode = m_traversalMode;
904  }
905 
907 
908  unsigned char *nodeData = (unsigned char *)targetBvh;
909  nodeData += sizeof(btQuantizedBvh);
910 
911  unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
912  nodeData += sizeToAdd;
913 
914  int nodeCount = m_curNodeIndex;
915 
916  if (m_useQuantization)
917  {
918  targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
919 
920  if (i_swapEndian)
921  {
922  for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
923  {
924  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
925  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
926  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
927 
928  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
929  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
930  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
931 
932  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
933  }
934  }
935  else
936  {
937  for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
938  {
939 
940  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
941  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
942  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
943 
944  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
945  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
946  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
947 
948  targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
949 
950 
951  }
952  }
953  nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
954 
955  // this clears the pointer in the member variable it doesn't really do anything to the data
956  // it does call the destructor on the contained objects, but they are all classes with no destructor defined
957  // so the memory (which is not freed) is left alone
958  targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
959  }
960  else
961  {
962  targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
963 
964  if (i_swapEndian)
965  {
966  for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
967  {
968  btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
969  btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
970 
971  targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
972  targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
973  targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
974  }
975  }
976  else
977  {
978  for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
979  {
980  targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
981  targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
982 
983  targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
984  targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
985  targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
986  }
987  }
988  nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
989 
990  // this clears the pointer in the member variable it doesn't really do anything to the data
991  // it does call the destructor on the contained objects, but they are all classes with no destructor defined
992  // so the memory (which is not freed) is left alone
993  targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
994  }
995 
996  sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
997  nodeData += sizeToAdd;
998 
999  // Now serialize the subtree headers
1000  targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
1001  if (i_swapEndian)
1002  {
1003  for (int i = 0; i < m_subtreeHeaderCount; i++)
1004  {
1005  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1006  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1007  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1008 
1009  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1010  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1011  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1012 
1013  targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
1014  targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
1015  }
1016  }
1017  else
1018  {
1019  for (int i = 0; i < m_subtreeHeaderCount; i++)
1020  {
1021  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1022  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1023  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1024 
1025  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1026  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1027  targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1028 
1029  targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
1030  targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
1031 
1032  // need to clear padding in destination buffer
1033  targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
1034  targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
1035  targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
1036  }
1037  }
1038  nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
1039 
1040  // this clears the pointer in the member variable it doesn't really do anything to the data
1041  // it does call the destructor on the contained objects, but they are all classes with no destructor defined
1042  // so the memory (which is not freed) is left alone
1043  targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
1044 
1045  // this wipes the virtual function table pointer at the start of the buffer for the class
1046  *((void**)o_alignedDataBuffer) = NULL;
1047 
1048  return true;
1049 }
1050 
1051 btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
1052 {
1053 
1054  if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1055  {
1056  return NULL;
1057  }
1058  btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer;
1059 
1060  if (i_swapEndian)
1061  {
1062  bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
1063 
1067 
1069  bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
1070  }
1071 
1072  unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1073  btAssert(calculatedBufSize <= i_dataBufferSize);
1074 
1075  if (calculatedBufSize > i_dataBufferSize)
1076  {
1077  return NULL;
1078  }
1079 
1080  unsigned char *nodeData = (unsigned char *)bvh;
1081  nodeData += sizeof(btQuantizedBvh);
1082 
1083  unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1084  nodeData += sizeToAdd;
1085 
1086  int nodeCount = bvh->m_curNodeIndex;
1087 
1088  // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1089  // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1090  new (bvh) btQuantizedBvh(*bvh, false);
1091 
1092  if (bvh->m_useQuantization)
1093  {
1094  bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1095 
1096  if (i_swapEndian)
1097  {
1098  for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1099  {
1100  bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1101  bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1102  bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1103 
1104  bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1105  bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1106  bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1107 
1108  bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1109  }
1110  }
1111  nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
1112  }
1113  else
1114  {
1115  bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1116 
1117  if (i_swapEndian)
1118  {
1119  for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1120  {
1121  btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1122  btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1123 
1124  bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1125  bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1126  bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1127  }
1128  }
1129  nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1130  }
1131 
1132  sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1133  nodeData += sizeToAdd;
1134 
1135  // Now serialize the subtree headers
1137  if (i_swapEndian)
1138  {
1139  for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1140  {
1141  bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1142  bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1143  bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1144 
1145  bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1146  bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1147  bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1148 
1149  bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1150  bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1151  }
1152  }
1153 
1154  return bvh;
1155 }
1156 
1157 // Constructor that prevents btVector3's default constructor from being called
1158 btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) :
1163 {
1164 
1165 }
1166 
1168 {
1169  m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1170  m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1171  m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization);
1172 
1173  m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
1174  m_useQuantization = quantizedBvhFloatData.m_useQuantization!=0;
1175 
1176  {
1177  int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1178  m_contiguousNodes.resize(numElem);
1179 
1180  if (numElem)
1181  {
1182  btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1183 
1184  for (int i=0;i<numElem;i++,memPtr++)
1185  {
1186  m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
1187  m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
1188  m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1189  m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1190  m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1191  }
1192  }
1193  }
1194 
1195  {
1196  int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1198 
1199  if (numElem)
1200  {
1201  btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1202  for (int i=0;i<numElem;i++,memPtr++)
1203  {
1204  m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1205  m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1206  m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1207  m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1208  m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1209  m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1210  m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1211  }
1212  }
1213  }
1214 
1215  m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode);
1216 
1217  {
1218  int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1219  m_SubtreeHeaders.resize(numElem);
1220  if (numElem)
1221  {
1222  btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1223  for (int i=0;i<numElem;i++,memPtr++)
1224  {
1225  m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
1226  m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1227  m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1228  m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1229  m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1230  m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1231  m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1232  m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1233  }
1234  }
1235  }
1236 }
1237 
1239 {
1240  m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1241  m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1242  m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization);
1243 
1244  m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
1245  m_useQuantization = quantizedBvhDoubleData.m_useQuantization!=0;
1246 
1247  {
1248  int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1249  m_contiguousNodes.resize(numElem);
1250 
1251  if (numElem)
1252  {
1253  btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1254 
1255  for (int i=0;i<numElem;i++,memPtr++)
1256  {
1257  m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
1258  m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
1259  m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1260  m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1261  m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1262  }
1263  }
1264  }
1265 
1266  {
1267  int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1269 
1270  if (numElem)
1271  {
1272  btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1273  for (int i=0;i<numElem;i++,memPtr++)
1274  {
1275  m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1276  m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1277  m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1278  m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1279  m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1280  m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1281  m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1282  }
1283  }
1284  }
1285 
1286  m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode);
1287 
1288  {
1289  int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1290  m_SubtreeHeaders.resize(numElem);
1291  if (numElem)
1292  {
1293  btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1294  for (int i=0;i<numElem;i++,memPtr++)
1295  {
1296  m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
1297  m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1298  m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1299  m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1300  m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1301  m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1302  m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1303  m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1304  }
1305  }
1306  }
1307 
1308 }
1309 
1310 
1311 
1313 const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const
1314 {
1315  btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer;
1316 
1317  m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax);
1318  m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin);
1319  m_bvhQuantization.serialize(quantizedData->m_bvhQuantization);
1320 
1321  quantizedData->m_curNodeIndex = m_curNodeIndex;
1322  quantizedData->m_useQuantization = m_useQuantization;
1323 
1324  quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size();
1325  quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*) (m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0);
1326  if (quantizedData->m_contiguousNodesPtr)
1327  {
1328  int sz = sizeof(btOptimizedBvhNodeData);
1329  int numElem = m_contiguousNodes.size();
1330  btChunk* chunk = serializer->allocate(sz,numElem);
1332  for (int i=0;i<numElem;i++,memPtr++)
1333  {
1334  m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg);
1335  m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg);
1336  memPtr->m_escapeIndex = m_contiguousNodes[i].m_escapeIndex;
1337  memPtr->m_subPart = m_contiguousNodes[i].m_subPart;
1338  memPtr->m_triangleIndex = m_contiguousNodes[i].m_triangleIndex;
1339  // Fill padding with zeros to appease msan.
1340  memset(memPtr->m_pad, 0, sizeof(memPtr->m_pad));
1341  }
1342  serializer->finalizeChunk(chunk,"btOptimizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_contiguousNodes[0]);
1343  }
1344 
1345  quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size();
1346 // printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes);
1347  quantizedData->m_quantizedContiguousNodesPtr =(btQuantizedBvhNodeData*) (m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0);
1348  if (quantizedData->m_quantizedContiguousNodesPtr)
1349  {
1350  int sz = sizeof(btQuantizedBvhNodeData);
1351  int numElem = m_quantizedContiguousNodes.size();
1352  btChunk* chunk = serializer->allocate(sz,numElem);
1354  for (int i=0;i<numElem;i++,memPtr++)
1355  {
1356  memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex;
1357  memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0];
1358  memPtr->m_quantizedAabbMax[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[1];
1359  memPtr->m_quantizedAabbMax[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[2];
1360  memPtr->m_quantizedAabbMin[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[0];
1361  memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1];
1362  memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2];
1363  }
1364  serializer->finalizeChunk(chunk,"btQuantizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_quantizedContiguousNodes[0]);
1365  }
1366 
1367  quantizedData->m_traversalMode = int(m_traversalMode);
1368  quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size();
1369 
1370  quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*) (m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0);
1371  if (quantizedData->m_subTreeInfoPtr)
1372  {
1373  int sz = sizeof(btBvhSubtreeInfoData);
1374  int numElem = m_SubtreeHeaders.size();
1375  btChunk* chunk = serializer->allocate(sz,numElem);
1377  for (int i=0;i<numElem;i++,memPtr++)
1378  {
1379  memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0];
1380  memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1];
1381  memPtr->m_quantizedAabbMax[2] = m_SubtreeHeaders[i].m_quantizedAabbMax[2];
1382  memPtr->m_quantizedAabbMin[0] = m_SubtreeHeaders[i].m_quantizedAabbMin[0];
1383  memPtr->m_quantizedAabbMin[1] = m_SubtreeHeaders[i].m_quantizedAabbMin[1];
1384  memPtr->m_quantizedAabbMin[2] = m_SubtreeHeaders[i].m_quantizedAabbMin[2];
1385 
1386  memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex;
1387  memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize;
1388  }
1389  serializer->finalizeChunk(chunk,"btBvhSubtreeInfoData",BT_ARRAY_CODE,(void*)&m_SubtreeHeaders[0]);
1390  }
1391  return btQuantizedBvhDataName;
1392 }
1393 
1394 
1395 
1396 
1397 
void walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax) const
tree traversal designed for small-memory processors like PS3 SPU
void walkStacklessTreeAgainstRay(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax, int startNodeIndex, int endNodeIndex) const
#define BT_LARGE_FLOAT
Definition: btScalar.h:294
void quantizeWithClamp(unsigned short *out, const btVector3 &point2, int isMax) const
btTraversalMode m_traversalMode
void setQuantizationValues(const btVector3 &bvhAabbMin, const btVector3 &bvhAabbMax, btScalar quantizationMargin=btScalar(1.0))
***************************************** expert/internal use only ************************* ...
void walkStacklessTree(btNodeOverlapCallback *nodeCallback, const btVector3 &aabbMin, const btVector3 &aabbMax) const
void deSerializeDouble(const struct btVector3DoubleData &dataIn)
Definition: btVector3.h:1345
btVector3DoubleData m_bvhAabbMax
virtual bool serialize(void *o_alignedDataBuffer, unsigned i_dataBufferSize, bool i_swapEndian) const
Data buffer MUST be 16 byte aligned.
void setValue(const btScalar &_x, const btScalar &_y, const btScalar &_z)
Definition: btVector3.h:652
bool isLeafNode() const
void assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex)
btVector3FloatData m_bvhQuantization
void buildInternal()
buildInternal is expert use only: assumes that setQuantizationValues and LeafNodeArray are initialize...
btOptimizedBvhNodeFloatData * m_contiguousNodesPtr
btVector3DoubleData m_bvhAabbMin
virtual ~btQuantizedBvh()
btVector3 m_bvhAabbMax
unsigned short m_quantizedAabbMax[3]
btVector3 m_bvhQuantization
virtual void * getUniquePointer(void *oldPtr)=0
#define btOptimizedBvhNodeData
#define btAssert(x)
Definition: btScalar.h:131
bool TestAabbAgainstAabb2(const btVector3 &aabbMin1, const btVector3 &aabbMax1, const btVector3 &aabbMin2, const btVector3 &aabbMax2)
conservative test for overlap between two aabbs
Definition: btAabbUtil2.h:48
unsigned short m_quantizedAabbMin[3]
void setInternalNodeEscapeIndex(int nodeIndex, int escapeIndex)
btScalar dot(const btVector3 &v) const
Return the dot product.
Definition: btVector3.h:235
btVector3FloatData m_bvhAabbMin
btVector3 & normalize()
Normalize this vector x^2 + y^2 + z^2 = 1.
Definition: btVector3.h:309
btVector3DoubleData m_bvhQuantization
void clear()
clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations.
btBvhSubtreeInfo provides info to gather a subtree of limited size
void walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode *currentNode, btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax) const
use the 16-byte stackless &#39;skipindex&#39; node tree to do a recursive traversal
void updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex)
NodeArray m_contiguousNodes
#define SIMD_INFINITY
Definition: btScalar.h:522
NodeArray m_leafNodes
int size() const
return the number of elements in the array
void reportRayOverlappingNodex(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget) const
virtual void deSerializeDouble(struct btQuantizedBvhDoubleData &quantizedBvhDoubleData)
void setAabbFromQuantizeNode(const btQuantizedBvhNode &quantizedNode)
void setInternalNodeAabbMax(int nodeIndex, const btVector3 &aabbMax)
btVector3FloatData m_aabbMaxOrg
void mergeInternalNodeAabb(int nodeIndex, const btVector3 &newAabbMin, const btVector3 &newAabbMax)
unsigned testQuantizedAabbAgainstQuantizedAabb(const unsigned short int *aabbMin1, const unsigned short int *aabbMax1, const unsigned short int *aabbMin2, const unsigned short int *aabbMax2)
Definition: btAabbUtil2.h:212
virtual void processNode(int subPart, int triangleIndex)=0
btOptimizedBvhNode contains both internal and leaf node information.
void serialize(struct btVector3Data &dataOut) const
Definition: btVector3.h:1352
btVector3 getAabbMin(int nodeIndex) const
void initializeFromBuffer(void *buffer, int size, int capacity)
static unsigned int getAlignmentSerializationPadding()
The btIDebugDraw interface class allows hooking up a debug renderer to visually debug simulations...
Definition: btIDebugDraw.h:29
virtual void drawAabb(const btVector3 &from, const btVector3 &to, const btVector3 &color)
Definition: btIDebugDraw.h:137
void btUnSwapVector3Endian(btVector3 &vector)
btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization ...
Definition: btVector3.h:1272
void quantize(unsigned short *out, const btVector3 &point, int isMax) const
btVector3 unQuantize(const unsigned short *vecIn) const
void walkStacklessQuantizedTree(btNodeOverlapCallback *nodeCallback, unsigned short int *quantizedQueryAabbMin, unsigned short int *quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const
btQuantizedBvhNodeData * m_quantizedContiguousNodesPtr
btQuantizedBvhNode is a compressed aabb node, 16 bytes.
#define BT_ARRAY_CODE
Definition: btSerializer.h:128
BvhSubtreeInfoArray m_SubtreeHeaders
btVector3 m_bvhAabbMin
static btQuantizedBvh * deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
deSerializeInPlace loads and initializes a BVH from a buffer in memory &#39;in place&#39; ...
btVector3 can be used to represent 3D points and vectors.
Definition: btVector3.h:83
#define btQuantizedBvhDataName
unsigned short m_quantizedAabbMin[3]
btBvhSubtreeInfoData * m_subTreeInfoPtr
void setInternalNodeAabbMin(int nodeIndex, const btVector3 &aabbMin)
two versions, one for quantized and normal nodes.
btVector3FloatData m_aabbMinOrg
virtual void finalizeChunk(btChunk *chunk, const char *structType, int chunkCode, void *oldPtr)=0
unsigned short int m_quantizedAabbMax[3]
int getTriangleIndex() const
btVector3FloatData m_bvhAabbMax
btVector3 getAabbMax(int nodeIndex) const
int sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis)
virtual void deSerializeFloat(struct btQuantizedBvhFloatData &quantizedBvhFloatData)
void resize(int newsize, const T &fillData=T())
#define BT_BULLET_VERSION
Definition: btScalar.h:28
btBvhSubtreeInfoData * m_subTreeInfoPtr
#define MAX_SUBTREE_SIZE_IN_BYTES
The btQuantizedBvh class stores an AABB tree that can be quickly traversed on CPU and Cell SPU...
void swapLeafNodes(int firstIndex, int secondIndex)
unsigned short int m_quantizedAabbMin[3]
unsigned btSwapEndian(unsigned val)
Definition: btScalar.h:629
T & expand(const T &fillValue=T())
static btDbvtVolume bounds(btDbvtNode **leaves, int count)
Definition: btDbvt.cpp:284
unsigned calculateSerializeBufferSize() const
void walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax, int startNodeIndex, int endNodeIndex) const
unsigned short int m_quantizedAabbMax[3]
void buildTree(int startIndex, int endIndex)
void reportBoxCastOverlappingNodex(btNodeOverlapCallback *nodeCallback, const btVector3 &raySource, const btVector3 &rayTarget, const btVector3 &aabbMin, const btVector3 &aabbMax) const
void setMax(const btVector3 &other)
Set each element to the max of the current values and the values of another btVector3.
Definition: btVector3.h:621
QuantizedNodeArray m_quantizedLeafNodes
void deSerializeFloat(const struct btVector3FloatData &dataIn)
Definition: btVector3.h:1331
unsigned short m_quantizedAabbMax[3]
unsigned short int m_quantizedAabbMin[3]
btQuantizedBvhNodeData * m_quantizedContiguousNodesPtr
bool btRayAabb2(const btVector3 &rayFrom, const btVector3 &rayInvDirection, const unsigned int raySign[3], const btVector3 bounds[2], btScalar &tmin, btScalar lambda_min, btScalar lambda_max)
Definition: btAabbUtil2.h:90
void reportAabbOverlappingNodex(btNodeOverlapCallback *nodeCallback, const btVector3 &aabbMin, const btVector3 &aabbMax) const
***************************************** expert/internal use only ************************* ...
bool btRayAabb(const btVector3 &rayFrom, const btVector3 &rayTo, const btVector3 &aabbMin, const btVector3 &aabbMax, btScalar &param, btVector3 &normal)
Definition: btAabbUtil2.h:125
void * m_oldPtr
Definition: btSerializer.h:56
int getPartId() const
void btSwapVector3Endian(const btVector3 &sourceVec, btVector3 &destVec)
btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization ...
Definition: btVector3.h:1262
virtual btChunk * allocate(size_t size, int numElements)=0
btOptimizedBvhNodeDoubleData * m_contiguousNodesPtr
btVector3DoubleData m_aabbMinOrg
int calcSplittingAxis(int startIndex, int endIndex)
int maxIterations
void setMin(const btVector3 &other)
Set each element to the min of the current values and the values of another btVector3.
Definition: btVector3.h:638
float btScalar
The btScalar type abstracts floating point numbers, to easily switch between double and single floati...
Definition: btScalar.h:292
int getEscapeIndex() const
#define btQuantizedBvhData
int maxAxis() const
Return the axis with the largest value Note return values are 0,1,2 for x, y, or z.
Definition: btVector3.h:487
QuantizedNodeArray m_quantizedContiguousNodes
btVector3DoubleData m_aabbMaxOrg