feat: switch to the new Bvh from parry for the broad-phase (#853)

* feat: switch to the new Bvh from parry for the broad-phase

* chore: cargo fmt + update testbed

* chore: remove the multi-grid SAP broad-phase

* fix soft-ccd handling in broad-phase

* Fix contact cleanup in broad-phase after collider removal

* chore: clippy fixes

* fix CCD regression

* chore: update changelog

* fix build with the parallel feature enabled

* chore: remove the now useless broad-phase proxy index from colliders

* fix tests
This commit is contained in:
Sébastien Crozet
2025-07-11 22:36:40 +02:00
committed by GitHub
parent 86a257d4f1
commit 95bd6fcfeb
212 changed files with 2140 additions and 3953 deletions

View File

@@ -1,7 +1,6 @@
use crate::dynamics::RigidBodySet;
use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, Shape, ShapeCastHit};
use crate::geometry::{ColliderHandle, ContactManifold, Shape, ShapeCastHit};
use crate::math::{Isometry, Point, Real, UnitVector, Vector};
use crate::pipeline::{QueryFilter, QueryFilterFlags, QueryPipeline};
use crate::pipeline::{QueryFilterFlags, QueryPipeline, QueryPipelineMut};
use crate::utils;
use na::{RealField, Vector2};
use parry::bounding_volume::BoundingVolume;
@@ -215,13 +214,10 @@ impl KinematicCharacterController {
pub fn move_shape(
&self,
dt: Real,
bodies: &RigidBodySet,
colliders: &ColliderSet,
queries: &QueryPipeline,
character_shape: &dyn Shape,
character_pos: &Isometry<Real>,
desired_translation: Vector<Real>,
filter: QueryFilter,
mut events: impl FnMut(CharacterCollision),
) -> EffectiveCharacterMovement {
let mut result = EffectiveCharacterMovement {
@@ -238,13 +234,10 @@ impl KinematicCharacterController {
let grounded_at_starting_pos = self.detect_grounded_status_and_apply_friction(
dt,
bodies,
colliders,
queries,
character_shape,
character_pos,
&dims,
filter,
None,
None,
);
@@ -266,8 +259,6 @@ impl KinematicCharacterController {
// 2. Cast towards the movement direction.
if let Some((handle, hit)) = queries.cast_shape(
bodies,
colliders,
&(Translation::from(result.translation) * character_pos),
&translation_dir,
character_shape,
@@ -277,7 +268,6 @@ impl KinematicCharacterController {
max_time_of_impact: translation_dist,
compute_impact_geometry_on_penetration: true,
},
filter,
) {
// We hit something, compute and apply the allowed interference-free translation.
let allowed_dist = hit.time_of_impact;
@@ -297,13 +287,10 @@ impl KinematicCharacterController {
// Try to go upstairs.
if !self.handle_stairs(
bodies,
colliders,
queries,
*queries,
character_shape,
&(Translation::from(result.translation) * character_pos),
&dims,
filter,
handle,
&hit_info,
&mut translation_remaining,
@@ -324,13 +311,10 @@ impl KinematicCharacterController {
translation_remaining.fill(0.0);
result.grounded = self.detect_grounded_status_and_apply_friction(
dt,
bodies,
colliders,
queries,
character_shape,
&(Translation::from(result.translation) * character_pos),
&dims,
filter,
None,
None,
);
@@ -338,13 +322,10 @@ impl KinematicCharacterController {
}
result.grounded = self.detect_grounded_status_and_apply_friction(
dt,
bodies,
colliders,
queries,
character_shape,
&(Translation::from(result.translation) * character_pos),
&dims,
filter,
Some(&mut kinematic_friction_translation),
Some(&mut translation_remaining),
);
@@ -358,13 +339,10 @@ impl KinematicCharacterController {
if !is_moving {
result.grounded = self.detect_grounded_status_and_apply_friction(
dt,
bodies,
colliders,
queries,
character_shape,
&(Translation::from(result.translation) * character_pos),
&dims,
filter,
None,
None,
);
@@ -372,13 +350,10 @@ impl KinematicCharacterController {
// If needed, and if we are not already grounded, snap to the ground.
if grounded_at_starting_pos {
self.snap_to_ground(
bodies,
colliders,
queries,
character_shape,
&(Translation::from(result.translation) * character_pos),
&dims,
filter,
&mut result,
);
}
@@ -389,13 +364,10 @@ impl KinematicCharacterController {
fn snap_to_ground(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
queries: &QueryPipeline,
character_shape: &dyn Shape,
character_pos: &Isometry<Real>,
dims: &Vector2<Real>,
filter: QueryFilter,
result: &mut EffectiveCharacterMovement,
) -> Option<(ColliderHandle, ShapeCastHit)> {
if let Some(snap_distance) = self.snap_to_ground {
@@ -403,8 +375,6 @@ impl KinematicCharacterController {
let snap_distance = snap_distance.eval(dims.y);
let offset = self.offset.eval(dims.y);
if let Some((hit_handle, hit)) = queries.cast_shape(
bodies,
colliders,
character_pos,
&-self.up,
character_shape,
@@ -414,7 +384,6 @@ impl KinematicCharacterController {
max_time_of_impact: snap_distance,
compute_impact_geometry_on_penetration: true,
},
filter,
) {
// Apply the snap.
result.translation -= *self.up * hit.time_of_impact;
@@ -435,13 +404,10 @@ impl KinematicCharacterController {
fn detect_grounded_status_and_apply_friction(
&self,
dt: Real,
bodies: &RigidBodySet,
colliders: &ColliderSet,
queries: &QueryPipeline,
character_shape: &dyn Shape,
character_pos: &Isometry<Real>,
dims: &Vector2<Real>,
filter: QueryFilter,
mut kinematic_friction_translation: Option<&mut Vector<Real>>,
mut translation_remaining: Option<&mut Vector<Real>>,
) -> bool {
@@ -457,88 +423,82 @@ impl KinematicCharacterController {
let mut grounded = false;
queries.colliders_with_aabb_intersecting_aabb(&character_aabb, |handle| {
if let Some(collider) = colliders.get(*handle) {
if filter.test(bodies, *handle, collider) {
manifolds.clear();
let pos12 = character_pos.inv_mul(collider.position());
let _ = dispatcher.contact_manifolds(
&pos12,
character_shape,
collider.shape(),
prediction,
&mut manifolds,
&mut None,
);
'outer: for (_, collider) in queries.intersect_aabb_conservative(&character_aabb) {
manifolds.clear();
let pos12 = character_pos.inv_mul(collider.position());
let _ = dispatcher.contact_manifolds(
&pos12,
character_shape,
collider.shape(),
prediction,
&mut manifolds,
&mut None,
);
if let (Some(kinematic_friction_translation), Some(translation_remaining)) = (
kinematic_friction_translation.as_deref_mut(),
translation_remaining.as_deref_mut(),
) {
let init_kinematic_friction_translation = *kinematic_friction_translation;
let kinematic_parent = collider
.parent
.and_then(|p| bodies.get(p.handle))
.filter(|rb| rb.is_kinematic());
if let (Some(kinematic_friction_translation), Some(translation_remaining)) = (
kinematic_friction_translation.as_deref_mut(),
translation_remaining.as_deref_mut(),
) {
let init_kinematic_friction_translation = *kinematic_friction_translation;
let kinematic_parent = collider
.parent
.and_then(|p| queries.bodies.get(p.handle))
.filter(|rb| rb.is_kinematic());
for m in &manifolds {
if self.is_grounded_at_contact_manifold(m, character_pos, dims) {
grounded = true;
}
for m in &manifolds {
if self.is_grounded_at_contact_manifold(m, character_pos, dims) {
grounded = true;
}
if let Some(kinematic_parent) = kinematic_parent {
let mut num_active_contacts = 0;
let mut manifold_center = Point::origin();
let normal = -(character_pos * m.local_n1);
if let Some(kinematic_parent) = kinematic_parent {
let mut num_active_contacts = 0;
let mut manifold_center = Point::origin();
let normal = -(character_pos * m.local_n1);
for contact in &m.points {
if contact.dist <= prediction {
num_active_contacts += 1;
let contact_point = collider.position() * contact.local_p2;
let target_vel =
kinematic_parent.velocity_at_point(&contact_point);
for contact in &m.points {
if contact.dist <= prediction {
num_active_contacts += 1;
let contact_point = collider.position() * contact.local_p2;
let target_vel = kinematic_parent.velocity_at_point(&contact_point);
let normal_target_mvt = target_vel.dot(&normal) * dt;
let normal_current_mvt = translation_remaining.dot(&normal);
let normal_target_mvt = target_vel.dot(&normal) * dt;
let normal_current_mvt = translation_remaining.dot(&normal);
manifold_center += contact_point.coords;
*translation_remaining +=
normal * (normal_target_mvt - normal_current_mvt);
}
}
if num_active_contacts > 0 {
let target_vel = kinematic_parent.velocity_at_point(
&(manifold_center / num_active_contacts as Real),
);
let tangent_platform_mvt =
(target_vel - normal * target_vel.dot(&normal)) * dt;
kinematic_friction_translation.zip_apply(
&tangent_platform_mvt,
|y, x| {
if x.abs() > (*y).abs() {
*y = x;
}
},
);
}
manifold_center += contact_point.coords;
*translation_remaining +=
normal * (normal_target_mvt - normal_current_mvt);
}
}
*translation_remaining +=
*kinematic_friction_translation - init_kinematic_friction_translation;
} else {
for m in &manifolds {
if self.is_grounded_at_contact_manifold(m, character_pos, dims) {
grounded = true;
return false; // We can stop the search early.
}
if num_active_contacts > 0 {
let target_vel = kinematic_parent.velocity_at_point(
&(manifold_center / num_active_contacts as Real),
);
let tangent_platform_mvt =
(target_vel - normal * target_vel.dot(&normal)) * dt;
kinematic_friction_translation.zip_apply(
&tangent_platform_mvt,
|y, x| {
if x.abs() > (*y).abs() {
*y = x;
}
},
);
}
}
}
*translation_remaining +=
*kinematic_friction_translation - init_kinematic_friction_translation;
} else {
for m in &manifolds {
if self.is_grounded_at_contact_manifold(m, character_pos, dims) {
grounded = true;
break 'outer; // We can stop the search early.
}
}
}
true
});
}
grounded
}
@@ -662,13 +622,10 @@ impl KinematicCharacterController {
#[profiling::function]
fn handle_stairs(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
queries: &QueryPipeline,
mut queries: QueryPipeline,
character_shape: &dyn Shape,
character_pos: &Isometry<Real>,
dims: &Vector2<Real>,
mut filter: QueryFilter,
stair_handle: ColliderHandle,
hit: &HitInfo,
translation_remaining: &mut Vector<Real>,
@@ -688,10 +645,11 @@ impl KinematicCharacterController {
let max_height = autostep.max_height.eval(dims.y) + offset;
if !autostep.include_dynamic_bodies {
if colliders
if queries
.colliders
.get(stair_handle)
.and_then(|co| co.parent)
.and_then(|p| bodies.get(p.handle))
.and_then(|p| queries.bodies.get(p.handle))
.map(|b| b.is_dynamic())
== Some(true)
{
@@ -699,7 +657,7 @@ impl KinematicCharacterController {
return false;
}
filter.flags |= QueryFilterFlags::EXCLUDE_DYNAMIC;
queries.filter.flags |= QueryFilterFlags::EXCLUDE_DYNAMIC;
}
let shifted_character_pos = Translation::from(*self.up * max_height) * character_pos;
@@ -712,8 +670,6 @@ impl KinematicCharacterController {
if queries
.cast_shape(
bodies,
colliders,
character_pos,
&self.up,
character_shape,
@@ -723,7 +679,6 @@ impl KinematicCharacterController {
max_time_of_impact: max_height,
compute_impact_geometry_on_penetration: true,
},
filter,
)
.is_some()
{
@@ -733,8 +688,6 @@ impl KinematicCharacterController {
if queries
.cast_shape(
bodies,
colliders,
&shifted_character_pos,
&horizontal_dir,
character_shape,
@@ -744,7 +697,6 @@ impl KinematicCharacterController {
max_time_of_impact: min_width,
compute_impact_geometry_on_penetration: true,
},
filter,
)
.is_some()
{
@@ -755,8 +707,6 @@ impl KinematicCharacterController {
// Check that we are not getting into a ramp that is too steep
// after stepping.
if let Some((_, hit)) = queries.cast_shape(
bodies,
colliders,
&(Translation::from(horizontal_dir * min_width) * shifted_character_pos),
&-self.up,
character_shape,
@@ -766,7 +716,6 @@ impl KinematicCharacterController {
max_time_of_impact: max_height,
compute_impact_geometry_on_penetration: true,
},
filter,
) {
let [vertical_slope_translation, horizontal_slope_translation] = self
.split_into_components(translation_remaining)
@@ -786,8 +735,6 @@ impl KinematicCharacterController {
let step_height = max_height
- queries
.cast_shape(
bodies,
colliders,
&(Translation::from(horizontal_dir * min_width) * shifted_character_pos),
&-self.up,
character_shape,
@@ -797,7 +744,6 @@ impl KinematicCharacterController {
max_time_of_impact: max_height,
compute_impact_geometry_on_penetration: true,
},
filter,
)
.map(|hit| hit.1.time_of_impact)
.unwrap_or(max_height);
@@ -824,24 +770,18 @@ impl KinematicCharacterController {
pub fn solve_character_collision_impulses<'a>(
&self,
dt: Real,
bodies: &mut RigidBodySet,
colliders: &ColliderSet,
queries: &QueryPipeline,
mut queries: QueryPipelineMut,
character_shape: &dyn Shape,
character_mass: Real,
collisions: impl IntoIterator<Item = &'a CharacterCollision>,
filter: QueryFilter,
) {
for collision in collisions {
self.solve_single_character_collision_impulse(
dt,
bodies,
colliders,
queries,
&mut queries,
character_shape,
character_mass,
collision,
filter,
);
}
}
@@ -854,13 +794,10 @@ impl KinematicCharacterController {
fn solve_single_character_collision_impulse(
&self,
dt: Real,
bodies: &mut RigidBodySet,
colliders: &ColliderSet,
queries: &QueryPipeline,
queries: &mut QueryPipelineMut,
character_shape: &dyn Shape,
character_mass: Real,
collision: &CharacterCollision,
filter: QueryFilter,
) {
let extents = character_shape.compute_local_aabb().extents();
let up_extent = extents.dot(&self.up.abs());
@@ -876,41 +813,39 @@ impl KinematicCharacterController {
.compute_aabb(&collision.character_pos)
.loosened(prediction);
queries.colliders_with_aabb_intersecting_aabb(&character_aabb, |handle| {
if let Some(collider) = colliders.get(*handle) {
if let Some(parent) = collider.parent {
if filter.test(bodies, *handle, collider) {
if let Some(body) = bodies.get(parent.handle) {
if body.is_dynamic() {
manifolds.clear();
let pos12 = collision.character_pos.inv_mul(collider.position());
let prev_manifolds_len = manifolds.len();
let _ = dispatcher.contact_manifolds(
&pos12,
character_shape,
collider.shape(),
prediction,
&mut manifolds,
&mut None,
);
for (_, collider) in queries
.as_ref()
.intersect_aabb_conservative(&character_aabb)
{
if let Some(parent) = collider.parent {
if let Some(body) = queries.bodies.get(parent.handle) {
if body.is_dynamic() {
manifolds.clear();
let pos12 = collision.character_pos.inv_mul(collider.position());
let prev_manifolds_len = manifolds.len();
let _ = dispatcher.contact_manifolds(
&pos12,
character_shape,
collider.shape(),
prediction,
&mut manifolds,
&mut None,
);
for m in &mut manifolds[prev_manifolds_len..] {
m.data.rigid_body2 = Some(parent.handle);
m.data.normal = collision.character_pos * m.local_n1;
}
}
for m in &mut manifolds[prev_manifolds_len..] {
m.data.rigid_body2 = Some(parent.handle);
m.data.normal = collision.character_pos * m.local_n1;
}
}
}
}
true
});
}
let velocity_to_transfer = movement_to_transfer * utils::inv(dt);
for manifold in &manifolds {
let body_handle = manifold.data.rigid_body2.unwrap();
let body = &mut bodies[body_handle];
let body = &mut queries.bodies[body_handle];
for pt in &manifold.points {
if pt.dist <= prediction {
@@ -953,10 +888,9 @@ mod test {
let mut impulse_joints = ImpulseJointSet::new();
let mut multibody_joints = MultibodyJointSet::new();
let mut pipeline = PhysicsPipeline::new();
let mut bf = BroadPhaseMultiSap::new();
let mut bf = BroadPhaseBvh::new();
let mut nf = NarrowPhase::new();
let mut islands = IslandManager::new();
let mut query_pipeline = QueryPipeline::new();
let mut bodies = RigidBodySet::new();
@@ -1019,8 +953,23 @@ mod test {
let character_shape = collider.shape();
colliders.insert_with_parent(collider.clone(), character_handle_cannot_climb, &mut bodies);
query_pipeline.update(&colliders);
for i in 0..200 {
// Step once
pipeline.step(
&gravity,
&integration_parameters,
&mut islands,
&mut bf,
&mut nf,
&mut bodies,
&mut colliders,
&mut impulse_joints,
&mut multibody_joints,
&mut CCDSolver::new(),
&(),
&(),
);
let mut update_character_controller =
|controller: KinematicCharacterController, handle: RigidBodyHandle| {
let character_body = bodies.get(handle).unwrap();
@@ -1028,15 +977,18 @@ mod test {
// the character is being moved.
let mut collisions = vec![];
let filter_character_controller = QueryFilter::new().exclude_rigid_body(handle);
let effective_movement = controller.move_shape(
integration_parameters.dt,
let query_pipeline = bf.as_query_pipeline(
nf.query_dispatcher(),
&bodies,
&colliders,
filter_character_controller,
);
let effective_movement = controller.move_shape(
integration_parameters.dt,
&query_pipeline,
character_shape,
character_body.position(),
Vector::new(0.1, -0.1, 0.0),
filter_character_controller,
|collision| collisions.push(collision),
);
let character_body = bodies.get_mut(handle).unwrap();
@@ -1044,7 +996,8 @@ mod test {
assert!(
effective_movement.grounded,
"movement should be grounded at all times for current setup (iter: {}), pos: {}.",
i, translation + effective_movement.translation
i,
translation + effective_movement.translation
);
character_body.set_next_kinematic_translation(
translation + effective_movement.translation,
@@ -1064,22 +1017,6 @@ mod test {
character_handle_cannot_climb,
);
update_character_controller(character_controller_can_climb, character_handle_can_climb);
// Step once
pipeline.step(
&gravity,
&integration_parameters,
&mut islands,
&mut bf,
&mut nf,
&mut bodies,
&mut colliders,
&mut impulse_joints,
&mut multibody_joints,
&mut CCDSolver::new(),
Some(&mut query_pipeline),
&(),
&(),
);
}
let character_body = bodies.get(character_handle_can_climb).unwrap();
assert!(character_body.translation().x > 6.0);
@@ -1095,10 +1032,9 @@ mod test {
let mut impulse_joints = ImpulseJointSet::new();
let mut multibody_joints = MultibodyJointSet::new();
let mut pipeline = PhysicsPipeline::new();
let mut bf = BroadPhaseMultiSap::new();
let mut bf = BroadPhaseBvh::new();
let mut nf = NarrowPhase::new();
let mut islands = IslandManager::new();
let mut query_pipeline = QueryPipeline::new();
let mut bodies = RigidBodySet::new();
@@ -1146,40 +1082,7 @@ mod test {
let character_shape = collider.shape();
colliders.insert_with_parent(collider.clone(), character_handle_no_snap, &mut bodies);
query_pipeline.update(&colliders);
for i in 0..10000 {
let mut update_character_controller =
|controller: KinematicCharacterController, handle: RigidBodyHandle| {
let character_body = bodies.get(handle).unwrap();
// Use a closure to handle or collect the collisions while
// the character is being moved.
let mut collisions = vec![];
let filter_character_controller = QueryFilter::new().exclude_rigid_body(handle);
let effective_movement = controller.move_shape(
integration_parameters.dt,
&bodies,
&colliders,
&query_pipeline,
character_shape,
character_body.position(),
Vector::new(0.1, -0.1, 0.1),
filter_character_controller,
|collision| collisions.push(collision),
);
let character_body = bodies.get_mut(handle).unwrap();
let translation = character_body.translation();
assert!(
effective_movement.grounded,
"movement should be grounded at all times for current setup (iter: {}), pos: {}.",
i, translation + effective_movement.translation
);
character_body.set_next_kinematic_translation(
translation + effective_movement.translation,
);
};
update_character_controller(character_controller_no_snap, character_handle_no_snap);
update_character_controller(character_controller_snap, character_handle_snap);
// Step once
pipeline.step(
&gravity,
@@ -1192,11 +1095,48 @@ mod test {
&mut impulse_joints,
&mut multibody_joints,
&mut CCDSolver::new(),
Some(&mut query_pipeline),
&(),
&(),
);
let mut update_character_controller =
|controller: KinematicCharacterController, handle: RigidBodyHandle| {
let character_body = bodies.get(handle).unwrap();
// Use a closure to handle or collect the collisions while
// the character is being moved.
let mut collisions = vec![];
let filter_character_controller = QueryFilter::new().exclude_rigid_body(handle);
let query_pipeline = bf.as_query_pipeline(
nf.query_dispatcher(),
&bodies,
&colliders,
filter_character_controller,
);
let effective_movement = controller.move_shape(
integration_parameters.dt,
&query_pipeline,
character_shape,
character_body.position(),
Vector::new(0.1, -0.1, 0.1),
|collision| collisions.push(collision),
);
let character_body = bodies.get_mut(handle).unwrap();
let translation = character_body.translation();
assert!(
effective_movement.grounded,
"movement should be grounded at all times for current setup (iter: {}), pos: {}.",
i,
translation + effective_movement.translation
);
character_body.set_next_kinematic_translation(
translation + effective_movement.translation,
);
};
update_character_controller(character_controller_no_snap, character_handle_no_snap);
update_character_controller(character_controller_snap, character_handle_snap);
}
let character_body = bodies.get_mut(character_handle_no_snap).unwrap();
let translation = character_body.translation();

View File

@@ -3,7 +3,8 @@
use crate::dynamics::{RigidBody, RigidBodyHandle, RigidBodySet};
use crate::geometry::{ColliderHandle, ColliderSet, Ray};
use crate::math::{Point, Real, Rotation, Vector};
use crate::pipeline::{QueryFilter, QueryPipeline};
use crate::pipeline::QueryPipeline;
use crate::prelude::QueryPipelineMut;
use crate::utils::{SimdCross, SimdDot};
/// A character controller to simulate vehicles using ray-casting for the wheels.
@@ -322,28 +323,20 @@ impl DynamicRayCastVehicleController {
}
#[profiling::function]
fn ray_cast(
&mut self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
queries: &QueryPipeline,
filter: QueryFilter,
chassis: &RigidBody,
wheel_id: usize,
) {
fn ray_cast(&mut self, queries: &QueryPipeline, chassis: &RigidBody, wheel_id: usize) {
let wheel = &mut self.wheels[wheel_id];
let raylen = wheel.suspension_rest_length + wheel.radius;
let rayvector = wheel.wheel_direction_ws * raylen;
let source = wheel.raycast_info.hard_point_ws;
wheel.raycast_info.contact_point_ws = source + rayvector;
let ray = Ray::new(source, rayvector);
let hit = queries.cast_ray_and_get_normal(bodies, colliders, &ray, 1.0, true, filter);
let hit = queries.cast_ray_and_get_normal(&ray, 1.0, true);
wheel.raycast_info.ground_object = None;
if let Some((collider_hit, mut hit)) = hit {
if hit.time_of_impact == 0.0 {
let collider = &colliders[collider_hit];
let collider = &queries.colliders[collider_hit];
let up_ray = Ray::new(source + rayvector, -rayvector);
if let Some(hit2) =
collider
@@ -405,16 +398,9 @@ impl DynamicRayCastVehicleController {
/// Updates the vehicles velocity based on its suspension, engine force, and brake.
#[profiling::function]
pub fn update_vehicle(
&mut self,
dt: Real,
bodies: &mut RigidBodySet,
colliders: &ColliderSet,
queries: &QueryPipeline,
filter: QueryFilter,
) {
pub fn update_vehicle(&mut self, dt: Real, queries: QueryPipelineMut) {
let num_wheels = self.wheels.len();
let chassis = &bodies[self.chassis];
let chassis = &queries.bodies[self.chassis];
for i in 0..num_wheels {
self.update_wheel_transform(chassis, i);
@@ -433,13 +419,14 @@ impl DynamicRayCastVehicleController {
//
for wheel_id in 0..self.wheels.len() {
self.ray_cast(bodies, colliders, queries, filter, chassis, wheel_id);
self.ray_cast(&queries.as_ref(), chassis, wheel_id);
}
let chassis_mass = chassis.mass();
self.update_suspension(chassis_mass);
let chassis = bodies
let chassis = queries
.bodies
.get_mut_internal_with_modification_tracking(self.chassis)
.unwrap();
@@ -459,9 +446,10 @@ impl DynamicRayCastVehicleController {
chassis.apply_impulse_at_point(impulse, wheel.raycast_info.contact_point_ws, false);
}
self.update_friction(bodies, colliders, dt);
self.update_friction(queries.bodies, queries.colliders, dt);
let chassis = bodies
let chassis = queries
.bodies
.get_mut_internal_with_modification_tracking(self.chassis)
.unwrap();

View File

@@ -193,12 +193,6 @@ measure_method!(
stages.solver_time
);
measure_method!(ccd_started, ccd_completed, ccd_time_ms, stages.ccd_time);
measure_method!(
query_pipeline_update_started,
query_pipeline_update_completed,
query_pipeline_update_time_ms,
stages.query_pipeline_time
);
measure_method!(
assembly_started,

View File

@@ -14,8 +14,6 @@ pub struct StagesCounters {
pub solver_time: Timer,
/// Total time spent for CCD and CCD resolution.
pub ccd_time: Timer,
/// Total time spent updating the query pipeline (if provided to `PhysicsPipeline::step`).
pub query_pipeline_time: Timer,
/// Total time spent propagating user changes.
pub user_changes: Timer,
}
@@ -29,7 +27,6 @@ impl StagesCounters {
island_construction_time: Timer::new(),
solver_time: Timer::new(),
ccd_time: Timer::new(),
query_pipeline_time: Timer::new(),
user_changes: Timer::new(),
}
}
@@ -41,7 +38,6 @@ impl StagesCounters {
self.island_construction_time.reset();
self.solver_time.reset();
self.ccd_time.reset();
self.query_pipeline_time.reset();
self.user_changes.reset();
}
}
@@ -61,7 +57,6 @@ impl Display for StagesCounters {
)?;
writeln!(f, "Solver time: {}", self.solver_time)?;
writeln!(f, "CCD time: {}", self.ccd_time)?;
writeln!(f, "Query pipeline time: {}", self.query_pipeline_time)?;
writeln!(f, "User changes time: {}", self.user_changes)
}
}

View File

@@ -3,7 +3,6 @@
//! See <https://github.com/fitzgen/generational-arena/blob/master/src/lib.rs>.
//! This has been modified to have a fully deterministic deserialization (including for the order of
//! Index attribution after a deserialization of the arena).
use parry::partitioning::IndexedData;
use std::cmp;
use std::iter::{self, Extend, FromIterator, FusedIterator};
use std::mem;
@@ -58,16 +57,6 @@ impl Default for Index {
}
}
impl IndexedData for Index {
fn default() -> Self {
Default::default()
}
fn index(&self) -> usize {
self.into_raw_parts().0 as usize
}
}
impl Index {
/// Create a new `Index` from its raw parts.
///

View File

@@ -13,6 +13,11 @@ impl<T> Coarena<T> {
Self { data: Vec::new() }
}
/// Pre-allocates capacity for `additional` extra elements in this arena.
pub fn reserve(&mut self, additional: usize) {
self.data.reserve(additional);
}
/// Iterates through all the elements of this coarena.
pub fn iter(&self) -> impl Iterator<Item = (Index, &T)> {
self.data
@@ -30,8 +35,18 @@ impl<T> Coarena<T> {
self.data.get(index as usize).map(|(_, t)| t)
}
/// Gets a specific mutable element from the coarena without specifying its generation number.
///
/// It is strongly encouraged to use `Coarena::get_mut` instead of this method because this method
/// can suffer from the ABA problem.
pub fn get_mut_unknown_gen(&mut self, index: u32) -> Option<&mut T> {
self.data.get_mut(index as usize).map(|(_, t)| t)
}
pub(crate) fn get_gen(&self, index: u32) -> Option<u32> {
self.data.get(index as usize).map(|(gen, _)| *gen)
self.data
.get(index as usize)
.map(|(generation, _)| *generation)
}
/// Deletes an element for the coarena and returns its value.

View File

@@ -3,9 +3,9 @@ use crate::dynamics::{IslandManager, RigidBodyHandle, RigidBodySet};
use crate::geometry::{ColliderParent, ColliderSet, CollisionEvent, NarrowPhase};
use crate::math::Real;
use crate::parry::utils::SortedPair;
use crate::pipeline::{EventHandler, QueryPipeline};
use crate::prelude::{query_pipeline_generators, ActiveEvents, CollisionEventFlags};
use parry::query::{DefaultQueryDispatcher, QueryDispatcher};
use crate::pipeline::{EventHandler, QueryFilter, QueryPipeline};
use crate::prelude::{ActiveEvents, CollisionEventFlags};
use parry::partitioning::{Bvh, BvhBuildStrategy};
use parry::utils::hashmap::HashMap;
use std::collections::BinaryHeap;
@@ -19,8 +19,11 @@ pub enum PredictedImpacts {
#[derive(Clone)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct CCDSolver {
// TODO PERF: for now the CCD solver maintains its own bvh for CCD queries.
// At each frame it get rebuilt.
// We should consider an alternative to directly use the broad-phases.
#[cfg_attr(feature = "serde-serialize", serde(skip))]
query_pipeline: QueryPipeline,
bvh: Bvh,
}
impl Default for CCDSolver {
@@ -32,20 +35,7 @@ impl Default for CCDSolver {
impl CCDSolver {
/// Initializes a new CCD solver
pub fn new() -> Self {
Self::with_query_dispatcher(DefaultQueryDispatcher)
}
/// Initializes a CCD solver with a custom `QueryDispatcher` used for computing time-of-impacts.
///
/// Use this constructor in order to use a custom `QueryDispatcher` that is aware of your own
/// user-defined shapes.
pub fn with_query_dispatcher<D>(d: D) -> Self
where
D: 'static + QueryDispatcher,
{
CCDSolver {
query_pipeline: QueryPipeline::with_query_dispatcher(d),
}
Self { bvh: Bvh::new() }
}
/// Apply motion-clamping to the bodies affected by the given `impacts`.
@@ -117,15 +107,37 @@ impl CCDSolver {
colliders: &ColliderSet,
narrow_phase: &NarrowPhase,
) -> Option<Real> {
// Update the query pipeline.
self.query_pipeline.update_with_generator(
query_pipeline_generators::SweptAabbWithPredictedPosition {
bodies,
colliders,
dt,
},
// Update the query pipeline with the colliders predicted positions.
self.bvh = Bvh::from_iter(
BvhBuildStrategy::Binned,
colliders.iter_enabled().map(|(co_handle, co)| {
let id = co_handle.into_raw_parts().0;
if let Some(co_parent) = co.parent {
let rb = &bodies[co_parent.handle];
let predicted_pos = rb
.pos
.integrate_forces_and_velocities(dt, &rb.forces, &rb.vels, &rb.mprops);
let next_position = predicted_pos * co_parent.pos_wrt_parent;
(
id as usize,
co.shape.compute_swept_aabb(&co.pos, &next_position),
)
} else {
(id as usize, co.shape.compute_aabb(&co.pos))
}
}),
);
let query_pipeline = QueryPipeline {
// NOTE: the upcast needs at least rust 1.86
dispatcher: narrow_phase.query_dispatcher(),
bvh: &self.bvh,
bodies,
colliders,
filter: QueryFilter::default(),
};
let mut pairs_seen = HashMap::default();
let mut min_toi = dt;
@@ -156,73 +168,66 @@ impl CCDSolver {
.shape
.compute_swept_aabb(&co1.pos, &predicted_collider_pos1);
self.query_pipeline
.colliders_with_aabb_intersecting_aabb(&aabb1, |ch2| {
if *ch1 == *ch2 {
// Ignore self-intersection.
return true;
}
for (ch2, _) in query_pipeline.intersect_aabb_conservative(&aabb1) {
if *ch1 == ch2 {
// Ignore self-intersection.
continue;
}
if pairs_seen
.insert(
SortedPair::new(ch1.into_raw_parts().0, ch2.into_raw_parts().0),
(),
)
.is_none()
{
let co1 = &colliders[*ch1];
let co2 = &colliders[*ch2];
if pairs_seen
.insert(
SortedPair::new(ch1.into_raw_parts().0, ch2.into_raw_parts().0),
(),
)
.is_none()
{
let co1 = &colliders[*ch1];
let co2 = &colliders[ch2];
let bh1 = co1.parent.map(|p| p.handle);
let bh2 = co2.parent.map(|p| p.handle);
let bh1 = co1.parent.map(|p| p.handle);
let bh2 = co2.parent.map(|p| p.handle);
// Ignore self-intersection and sensors and apply collision groups filter.
if bh1 == bh2 // Ignore self-intersection.
// Ignore self-intersection and sensors and apply collision groups filter.
if bh1 == bh2 // Ignore self-intersection.
|| (co1.is_sensor() || co2.is_sensor()) // Ignore sensors.
|| !co1.flags.collision_groups.test(co2.flags.collision_groups) // Apply collision groups.
|| !co1.flags.solver_groups.test(co2.flags.solver_groups)
// Apply solver groups.
{
return true;
}
let smallest_dist = narrow_phase
.contact_pair(*ch1, *ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
let rb2 = bh2.and_then(|h| bodies.get(h));
if let Some(toi) = TOIEntry::try_from_colliders(
self.query_pipeline.query_dispatcher(),
*ch1,
*ch2,
co1,
co2,
Some(rb1),
rb2,
None,
None,
0.0,
min_toi,
smallest_dist,
) {
min_toi = min_toi.min(toi.toi);
}
// Apply solver groups.
{
continue;
}
true
});
let smallest_dist = narrow_phase
.contact_pair(*ch1, ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
let rb2 = bh2.and_then(|h| bodies.get(h));
if let Some(toi) = TOIEntry::try_from_colliders(
narrow_phase.query_dispatcher(),
*ch1,
ch2,
co1,
co2,
Some(rb1),
rb2,
None,
None,
0.0,
min_toi,
smallest_dist,
) {
min_toi = min_toi.min(toi.toi);
}
}
}
}
}
}
if min_toi < dt {
Some(min_toi)
} else {
None
}
if min_toi < dt { Some(min_toi) } else { None }
}
/// Outputs the set of bodies as well as their first time-of-impact event.
@@ -241,11 +246,32 @@ impl CCDSolver {
let mut pairs_seen = HashMap::default();
let mut min_overstep = dt;
// Update the query pipeline.
self.query_pipeline.update_with_generator(
query_pipeline_generators::SweptAabbWithNextPosition { bodies, colliders },
// Update the query pipeline with the colliders `next_position`.
self.bvh = Bvh::from_iter(
BvhBuildStrategy::Binned,
colliders.iter_enabled().map(|(co_handle, co)| {
let id = co_handle.into_raw_parts().0;
if let Some(co_parent) = co.parent {
let rb_next_pos = &bodies[co_parent.handle].pos.next_position;
let next_position = rb_next_pos * co_parent.pos_wrt_parent;
(
id as usize,
co.shape.compute_swept_aabb(&co.pos, &next_position),
)
} else {
(id as usize, co.shape.compute_aabb(&co.pos))
}
}),
);
let query_pipeline = QueryPipeline {
dispatcher: narrow_phase.query_dispatcher(),
bvh: &self.bvh,
bodies,
colliders,
filter: QueryFilter::default(),
};
/*
*
* First, collect all TOIs.
@@ -275,69 +301,66 @@ impl CCDSolver {
.shape
.compute_swept_aabb(&co1.pos, &predicted_collider_pos1);
self.query_pipeline
.colliders_with_aabb_intersecting_aabb(&aabb1, |ch2| {
if *ch1 == *ch2 {
// Ignore self-intersection.
return true;
}
for (ch2, _) in query_pipeline.intersect_aabb_conservative(&aabb1) {
if *ch1 == ch2 {
// Ignore self-intersection.
continue;
}
if pairs_seen
.insert(
SortedPair::new(ch1.into_raw_parts().0, ch2.into_raw_parts().0),
(),
)
.is_none()
if pairs_seen
.insert(
SortedPair::new(ch1.into_raw_parts().0, ch2.into_raw_parts().0),
(),
)
.is_none()
{
let co1 = &colliders[*ch1];
let co2 = &colliders[ch2];
let bh1 = co1.parent.map(|p| p.handle);
let bh2 = co2.parent.map(|p| p.handle);
// Ignore self-intersections and apply groups filter.
if bh1 == bh2
|| !co1.flags.collision_groups.test(co2.flags.collision_groups)
{
let co1 = &colliders[*ch1];
let co2 = &colliders[*ch2];
let bh1 = co1.parent.map(|p| p.handle);
let bh2 = co2.parent.map(|p| p.handle);
// Ignore self-intersections and apply groups filter.
if bh1 == bh2
|| !co1.flags.collision_groups.test(co2.flags.collision_groups)
{
return true;
}
let smallest_dist = narrow_phase
.contact_pair(*ch1, *ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
let rb1 = bh1.map(|h| &bodies[h]);
let rb2 = bh2.map(|h| &bodies[h]);
if let Some(toi) = TOIEntry::try_from_colliders(
self.query_pipeline.query_dispatcher(),
*ch1,
*ch2,
co1,
co2,
rb1,
rb2,
None,
None,
0.0,
// NOTE: we use dt here only once we know that
// there is at least one TOI before dt.
min_overstep,
smallest_dist,
) {
if toi.toi > dt {
min_overstep = min_overstep.min(toi.toi);
} else {
min_overstep = dt;
all_toi.push(toi);
}
}
continue;
}
true
});
let smallest_dist = narrow_phase
.contact_pair(*ch1, ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
let rb1 = bh1.map(|h| &bodies[h]);
let rb2 = bh2.map(|h| &bodies[h]);
if let Some(toi) = TOIEntry::try_from_colliders(
query_pipeline.dispatcher,
*ch1,
ch2,
co1,
co2,
rb1,
rb2,
None,
None,
0.0,
// NOTE: we use dt here only once we know that
// there is at least one TOI before dt.
min_overstep,
smallest_dist,
) {
if toi.toi > dt {
min_overstep = min_overstep.min(toi.toi);
} else {
min_overstep = dt;
all_toi.push(toi);
}
}
}
}
}
}
}
@@ -378,10 +401,10 @@ impl CCDSolver {
if toi.is_pseudo_intersection_test {
// NOTE: this test is redundant with the previous `if !should_freeze && ...`
// but let's keep it to avoid tricky regressions if we end up swapping both
// `if` for some reasons in the future.
// `if` for some reason in the future.
if should_freeze1 || should_freeze2 {
// This is only an intersection so we don't have to freeze and there is no
// need to resweep. However we will need to see if we have to generate
// need to resweep. However, we will need to see if we have to generate
// intersection events, so push the TOI for further testing.
pseudo_intersections_to_check.push(toi);
}
@@ -410,59 +433,53 @@ impl CCDSolver {
let co_next_pos1 = rb1.pos.next_position * co1_parent.pos_wrt_parent;
let aabb = co1.shape.compute_swept_aabb(&co1.pos, &co_next_pos1);
self.query_pipeline
.colliders_with_aabb_intersecting_aabb(&aabb, |ch2| {
let co2 = &colliders[*ch2];
for (ch2, _) in query_pipeline.intersect_aabb_conservative(&aabb) {
let co2 = &colliders[ch2];
let bh1 = co1.parent.map(|p| p.handle);
let bh2 = co2.parent.map(|p| p.handle);
let bh1 = co1.parent.map(|p| p.handle);
let bh2 = co2.parent.map(|p| p.handle);
// Ignore self-intersection and apply groups filter.
if bh1 == bh2
|| !co1.flags.collision_groups.test(co2.flags.collision_groups)
{
return true;
}
// Ignore self-intersection and apply groups filter.
if bh1 == bh2 || !co1.flags.collision_groups.test(co2.flags.collision_groups) {
continue;
}
let frozen1 = bh1.and_then(|h| frozen.get(&h));
let frozen2 = bh2.and_then(|h| frozen.get(&h));
let frozen1 = bh1.and_then(|h| frozen.get(&h));
let frozen2 = bh2.and_then(|h| frozen.get(&h));
let rb1 = bh1.and_then(|h| bodies.get(h));
let rb2 = bh2.and_then(|h| bodies.get(h));
let rb1 = bh1.and_then(|h| bodies.get(h));
let rb2 = bh2.and_then(|h| bodies.get(h));
if (frozen1.is_some() || !rb1.map(|b| b.ccd.ccd_active).unwrap_or(false))
&& (frozen2.is_some()
|| !rb2.map(|b| b.ccd.ccd_active).unwrap_or(false))
{
// We already did a resweep.
return true;
}
if (frozen1.is_some() || !rb1.map(|b| b.ccd.ccd_active).unwrap_or(false))
&& (frozen2.is_some() || !rb2.map(|b| b.ccd.ccd_active).unwrap_or(false))
{
// We already did a resweep.
continue;
}
let smallest_dist = narrow_phase
.contact_pair(*ch1, *ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
let smallest_dist = narrow_phase
.contact_pair(*ch1, ch2)
.and_then(|p| p.find_deepest_contact())
.map(|c| c.1.dist)
.unwrap_or(0.0);
if let Some(toi) = TOIEntry::try_from_colliders(
self.query_pipeline.query_dispatcher(),
*ch1,
*ch2,
co1,
co2,
rb1,
rb2,
frozen1.copied(),
frozen2.copied(),
start_time,
dt,
smallest_dist,
) {
all_toi.push(toi);
}
true
});
if let Some(toi) = TOIEntry::try_from_colliders(
query_pipeline.dispatcher,
*ch1,
ch2,
co1,
co2,
rb1,
rb2,
frozen1.copied(),
frozen2.copied(),
start_time,
dt,
smallest_dist,
) {
all_toi.push(toi);
}
}
}
}
@@ -523,12 +540,13 @@ impl CCDSolver {
let prev_coll_pos12 = co1.pos.inv_mul(&co2.pos);
let next_coll_pos12 = co_next_pos1.inv_mul(&co_next_pos2);
let query_dispatcher = self.query_pipeline.query_dispatcher();
let intersect_before = query_dispatcher
let intersect_before = query_pipeline
.dispatcher
.intersection_test(&prev_coll_pos12, co1.shape.as_ref(), co2.shape.as_ref())
.unwrap_or(false);
let intersect_after = query_dispatcher
let intersect_after = query_pipeline
.dispatcher
.intersection_test(&next_coll_pos12, co1.shape.as_ref(), co2.shape.as_ref())
.unwrap_or(false);

View File

@@ -111,11 +111,7 @@ impl IntegrationParameters {
/// This is zero if `self.dt` is zero.
#[inline(always)]
pub fn inv_dt(&self) -> Real {
if self.dt == 0.0 {
0.0
} else {
1.0 / self.dt
}
if self.dt == 0.0 { 0.0 } else { 1.0 / self.dt }
}
/// Sets the time-stepping length.

View File

@@ -2,7 +2,7 @@
use crate::dynamics::solver::MotorParameters;
use crate::dynamics::{FixedJoint, MotorModel, PrismaticJoint, RevoluteJoint, RopeJoint};
use crate::math::{Isometry, Point, Real, Rotation, UnitVector, Vector, SPATIAL_DIM};
use crate::math::{Isometry, Point, Real, Rotation, SPATIAL_DIM, UnitVector, Vector};
use crate::utils::{SimdBasis, SimdRealCopy};
#[cfg(feature = "dim3")]

View File

@@ -3,8 +3,8 @@ use parry::utils::hashset::HashSet;
use super::ImpulseJoint;
use crate::geometry::{InteractionGraph, RigidBodyGraphIndex, TemporaryInteractionIndex};
use crate::data::arena::Arena;
use crate::data::Coarena;
use crate::data::arena::Arena;
use crate::dynamics::{GenericJoint, IslandManager, RigidBodyHandle, RigidBodySet};
/// The unique identifier of a joint added to the joint set.

View File

@@ -4,13 +4,13 @@ use crate::dynamics::{RigidBodyHandle, RigidBodySet, RigidBodyType, RigidBodyVel
#[cfg(feature = "dim3")]
use crate::math::Matrix;
use crate::math::{
AngDim, AngVector, Dim, Isometry, Jacobian, Point, Real, Vector, ANG_DIM, DIM, SPATIAL_DIM,
ANG_DIM, AngDim, AngVector, DIM, Dim, Isometry, Jacobian, Point, Real, SPATIAL_DIM, Vector,
};
use crate::prelude::MultibodyJoint;
use crate::utils::{IndexMut2, SimdAngularInertia, SimdCross, SimdCrossMatrix};
use na::{
self, DMatrix, DVector, DVectorView, DVectorViewMut, Dyn, OMatrix, SMatrix, SVector,
StorageMut, LU,
self, DMatrix, DVector, DVectorView, DVectorViewMut, Dyn, LU, OMatrix, SMatrix, SVector,
StorageMut,
};
#[cfg(doc)]
@@ -1083,7 +1083,7 @@ impl Multibody {
/// - All the indices must be part of the same kinematic branch.
/// - If a link is `branch[i]`, then `branch[i - 1]` must be its parent.
///
/// In general, this method shouldnt be used directly and [`Self::forward_kinematics_single_link`̦]
/// In general, this method shouldnt be used directly and [`Self::forward_kinematics_single_link`]
/// should be preferred since it computes the branch indices automatically.
///
/// If you want to calculate the branch indices manually, see [`Self::kinematic_branch`].

View File

@@ -1,5 +1,5 @@
use crate::dynamics::{JointAxesMask, Multibody, MultibodyLink, RigidBodySet};
use crate::math::{Isometry, Jacobian, Real, ANG_DIM, DIM, SPATIAL_DIM};
use crate::math::{ANG_DIM, DIM, Isometry, Jacobian, Real, SPATIAL_DIM};
use na::{self, DVector, SMatrix};
use parry::math::SpacialVector;

View File

@@ -1,11 +1,11 @@
use crate::dynamics::solver::JointGenericOneBodyConstraint;
use crate::dynamics::{
joint, FixedJointBuilder, GenericJoint, IntegrationParameters, Multibody, MultibodyLink,
RigidBodyVelocity,
FixedJointBuilder, GenericJoint, IntegrationParameters, Multibody, MultibodyLink,
RigidBodyVelocity, joint,
};
use crate::math::{
Isometry, JacobianViewMut, Real, Rotation, SpacialVector, Translation, Vector, ANG_DIM, DIM,
SPATIAL_DIM,
ANG_DIM, DIM, Isometry, JacobianViewMut, Real, Rotation, SPATIAL_DIM, SpacialVector,
Translation, Vector,
};
use na::{DVector, DVectorViewMut};
#[cfg(feature = "dim3")]

View File

@@ -4,7 +4,6 @@ use crate::data::{Arena, Coarena, Index};
use crate::dynamics::joint::MultibodyLink;
use crate::dynamics::{GenericJoint, Multibody, MultibodyJoint, RigidBodyHandle};
use crate::geometry::{InteractionGraph, RigidBodyGraphIndex};
use crate::parry::partitioning::IndexedData;
/// The unique handle of an multibody_joint added to a `MultibodyJointSet`.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
@@ -44,15 +43,6 @@ impl Default for MultibodyJointHandle {
}
}
impl IndexedData for MultibodyJointHandle {
fn default() -> Self {
Self(IndexedData::default())
}
fn index(&self) -> usize {
self.0.index()
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
/// Indexes usable to get a multibody link from a `MultibodyJointSet`.
@@ -371,12 +361,12 @@ impl MultibodyJointSet {
/// suffer form the ABA problem.
pub fn get_unknown_gen(&self, i: u32) -> Option<(&Multibody, usize, MultibodyJointHandle)> {
let link = self.rb2mb.get_unknown_gen(i)?;
let gen = self.rb2mb.get_gen(i)?;
let generation = self.rb2mb.get_gen(i)?;
let multibody = self.multibodies.get(link.multibody.0)?;
Some((
multibody,
link.id,
MultibodyJointHandle(Index::from_raw_parts(i, gen)),
MultibodyJointHandle(Index::from_raw_parts(i, generation)),
))
}
@@ -427,7 +417,7 @@ impl MultibodyJointSet {
.flat_map(move |link| self.connectivity_graph.interactions_with(link.graph_id))
.map(|inter| {
// NOTE: the joint handle is always equal to the handle of the second rigid-body.
(inter.0, inter.1, MultibodyJointHandle(inter.1 .0))
(inter.0, inter.1, MultibodyJointHandle(inter.1.0))
})
}

View File

@@ -9,7 +9,6 @@ use crate::geometry::{
use crate::math::{
AngVector, AngularInertia, Isometry, Point, Real, Rotation, Translation, Vector,
};
use crate::parry::partitioning::IndexedData;
use crate::utils::{SimdAngularInertia, SimdCross, SimdDot};
use num::Zero;
@@ -42,16 +41,6 @@ impl RigidBodyHandle {
}
}
impl IndexedData for RigidBodyHandle {
fn default() -> Self {
Self(IndexedData::default())
}
fn index(&self) -> usize {
self.0.index()
}
}
/// The type of a body, governing the way it is affected by external forces.
#[deprecated(note = "renamed as RigidBodyType")]
pub type BodyStatus = RigidBodyType;

View File

@@ -6,13 +6,13 @@ use crate::dynamics::solver::contact_constraint::{
};
use crate::dynamics::solver::solver_body::SolverBody;
use crate::dynamics::solver::solver_vel::SolverVel;
use crate::dynamics::solver::{reset_buffer, ConstraintTypes, SolverConstraintsSet};
use crate::dynamics::solver::{ConstraintTypes, SolverConstraintsSet, reset_buffer};
use crate::dynamics::{
ImpulseJoint, IntegrationParameters, IslandManager, JointAxesMask, MultibodyJointSet,
RigidBodySet,
};
use crate::geometry::{ContactManifold, ContactManifoldIndex};
use crate::math::{Real, MAX_MANIFOLD_POINTS};
use crate::math::{MAX_MANIFOLD_POINTS, Real};
use na::DVector;
use parry::math::DIM;

View File

@@ -3,7 +3,7 @@ use crate::dynamics::{
IntegrationParameters, MultibodyJointSet, MultibodyLinkId, RigidBodySet, RigidBodyVelocity,
};
use crate::geometry::{ContactManifold, ContactManifoldIndex};
use crate::math::{Point, Real, DIM, MAX_MANIFOLD_POINTS};
use crate::math::{DIM, MAX_MANIFOLD_POINTS, Point, Real};
use crate::utils::SimdCross;
use super::{OneBodyConstraintElement, OneBodyConstraintNormalPart};

View File

@@ -1,7 +1,7 @@
use crate::dynamics::solver::{
OneBodyConstraintElement, OneBodyConstraintNormalPart, OneBodyConstraintTangentPart,
};
use crate::math::{Real, DIM};
use crate::math::{DIM, Real};
use na::DVector;
#[cfg(feature = "dim2")]
use na::SimdPartialOrd;

View File

@@ -1,7 +1,7 @@
use crate::dynamics::solver::{GenericRhs, TwoBodyConstraint};
use crate::dynamics::{IntegrationParameters, MultibodyJointSet, RigidBodySet};
use crate::geometry::{ContactManifold, ContactManifoldIndex};
use crate::math::{Real, DIM, MAX_MANIFOLD_POINTS};
use crate::math::{DIM, MAX_MANIFOLD_POINTS, Real};
use crate::utils::{SimdAngularInertia, SimdCross, SimdDot};
use super::{TwoBodyConstraintBuilder, TwoBodyConstraintElement, TwoBodyConstraintNormalPart};

View File

@@ -2,7 +2,7 @@ use crate::dynamics::solver::SolverVel;
use crate::dynamics::solver::{
TwoBodyConstraintElement, TwoBodyConstraintNormalPart, TwoBodyConstraintTangentPart,
};
use crate::math::{AngVector, Real, Vector, DIM};
use crate::math::{AngVector, DIM, Real, Vector};
use crate::utils::SimdDot;
use na::DVector;
#[cfg(feature = "dim2")]

View File

@@ -1,5 +1,5 @@
use super::{OneBodyConstraintElement, OneBodyConstraintNormalPart};
use crate::math::{Point, Real, Vector, DIM, MAX_MANIFOLD_POINTS};
use crate::math::{DIM, MAX_MANIFOLD_POINTS, Point, Real, Vector};
#[cfg(feature = "dim2")]
use crate::utils::SimdBasis;
use crate::utils::{self, SimdAngularInertia, SimdCross, SimdDot, SimdRealCopy};
@@ -7,8 +7,8 @@ use na::Matrix2;
use parry::math::Isometry;
use crate::dynamics::integration_parameters::BLOCK_SOLVER_ENABLED;
use crate::dynamics::solver::solver_body::SolverBody;
use crate::dynamics::solver::SolverVel;
use crate::dynamics::solver::solver_body::SolverBody;
use crate::dynamics::{IntegrationParameters, MultibodyJointSet, RigidBodySet, RigidBodyVelocity};
use crate::geometry::{ContactManifold, ContactManifoldIndex};

View File

@@ -1,7 +1,7 @@
use crate::dynamics::integration_parameters::BLOCK_SOLVER_ENABLED;
use crate::dynamics::solver::contact_constraint::TwoBodyConstraintNormalPart;
use crate::dynamics::solver::SolverVel;
use crate::math::{AngVector, TangentImpulse, Vector, DIM};
use crate::dynamics::solver::contact_constraint::TwoBodyConstraintNormalPart;
use crate::math::{AngVector, DIM, TangentImpulse, Vector};
use crate::utils::{SimdBasis, SimdDot, SimdRealCopy};
use na::Vector2;

View File

@@ -8,8 +8,8 @@ use crate::dynamics::{
};
use crate::geometry::{ContactManifold, ContactManifoldIndex};
use crate::math::{
AngVector, AngularInertia, Isometry, Point, Real, SimdReal, TangentImpulse, Vector, DIM,
MAX_MANIFOLD_POINTS, SIMD_WIDTH,
AngVector, AngularInertia, DIM, Isometry, MAX_MANIFOLD_POINTS, Point, Real, SIMD_WIDTH,
SimdReal, TangentImpulse, Vector,
};
#[cfg(feature = "dim2")]
use crate::utils::SimdBasis;

View File

@@ -5,7 +5,7 @@ use crate::dynamics::solver::{AnyConstraintMut, SolverBody};
use crate::dynamics::integration_parameters::BLOCK_SOLVER_ENABLED;
use crate::dynamics::{IntegrationParameters, MultibodyJointSet, RigidBodySet};
use crate::geometry::{ContactManifold, ContactManifoldIndex};
use crate::math::{Isometry, Real, Vector, DIM, MAX_MANIFOLD_POINTS};
use crate::math::{DIM, Isometry, MAX_MANIFOLD_POINTS, Real, Vector};
use crate::utils::{self, SimdAngularInertia, SimdBasis, SimdCross, SimdDot};
use na::{DVector, Matrix2};

View File

@@ -1,6 +1,6 @@
use crate::dynamics::integration_parameters::BLOCK_SOLVER_ENABLED;
use crate::dynamics::solver::SolverVel;
use crate::math::{AngVector, TangentImpulse, Vector, DIM};
use crate::math::{AngVector, DIM, TangentImpulse, Vector};
use crate::utils::{SimdBasis, SimdDot, SimdRealCopy};
use na::Vector2;
use simba::simd::SimdValue;

View File

@@ -8,8 +8,8 @@ use crate::dynamics::{
};
use crate::geometry::{ContactManifold, ContactManifoldIndex};
use crate::math::{
AngVector, AngularInertia, Isometry, Point, Real, SimdReal, TangentImpulse, Vector, DIM,
MAX_MANIFOLD_POINTS, SIMD_WIDTH,
AngVector, AngularInertia, DIM, Isometry, MAX_MANIFOLD_POINTS, Point, Real, SIMD_WIDTH,
SimdReal, TangentImpulse, Vector,
};
#[cfg(feature = "dim2")]
use crate::utils::SimdBasis;

View File

@@ -1,7 +1,7 @@
use super::{JointConstraintsSet, VelocitySolver};
use crate::counters::Counters;
use crate::dynamics::solver::contact_constraint::ContactConstraintsSet;
use crate::dynamics::IslandManager;
use crate::dynamics::solver::contact_constraint::ContactConstraintsSet;
use crate::dynamics::{IntegrationParameters, JointGraphEdge, JointIndex, RigidBodySet};
use crate::geometry::{ContactManifold, ContactManifoldIndex};
use crate::prelude::MultibodyJointSet;

View File

@@ -1,3 +1,4 @@
use crate::dynamics::JointGraphEdge;
use crate::dynamics::solver::joint_constraint::joint_generic_constraint::{
JointGenericOneBodyConstraint, JointGenericTwoBodyConstraint,
};
@@ -5,7 +6,6 @@ use crate::dynamics::solver::joint_constraint::joint_velocity_constraint::{
JointOneBodyConstraint, JointTwoBodyConstraint,
};
use crate::dynamics::solver::{AnyConstraintMut, ConstraintTypes};
use crate::dynamics::JointGraphEdge;
use crate::math::Real;
use na::DVector;
@@ -18,7 +18,7 @@ use crate::{
dynamics::solver::joint_constraint::joint_constraint_builder::{
JointOneBodyConstraintBuilderSimd, JointTwoBodyConstraintBuilderSimd,
},
math::{SimdReal, SIMD_WIDTH},
math::{SIMD_WIDTH, SimdReal},
};
use crate::dynamics::solver::joint_constraint::joint_constraint_builder::{

View File

@@ -1,12 +1,12 @@
use crate::dynamics::solver::ConstraintsCounts;
use crate::dynamics::solver::MotorParameters;
use crate::dynamics::solver::joint_constraint::JointSolverBody;
use crate::dynamics::solver::joint_constraint::joint_velocity_constraint::{
JointFixedSolverBody, JointOneBodyConstraint, JointTwoBodyConstraint, WritebackId,
};
use crate::dynamics::solver::joint_constraint::JointSolverBody;
use crate::dynamics::solver::solver_body::SolverBody;
use crate::dynamics::solver::ConstraintsCounts;
use crate::dynamics::solver::MotorParameters;
use crate::dynamics::{GenericJoint, ImpulseJoint, IntegrationParameters, JointIndex};
use crate::math::{AngVector, Isometry, Matrix, Point, Real, Rotation, Vector, ANG_DIM, DIM};
use crate::math::{ANG_DIM, AngVector, DIM, Isometry, Matrix, Point, Real, Rotation, Vector};
use crate::prelude::RigidBodySet;
use crate::utils;
use crate::utils::{IndexMut2, SimdCrossMatrix, SimdDot, SimdRealCopy};
@@ -16,7 +16,7 @@ use na::SMatrix;
use crate::utils::{SimdBasis, SimdQuat};
#[cfg(feature = "simd-is-enabled")]
use crate::math::{SimdReal, SIMD_WIDTH};
use crate::math::{SIMD_WIDTH, SimdReal};
pub struct JointTwoBodyConstraintBuilder {
body1: usize,

View File

@@ -2,10 +2,10 @@ use crate::dynamics::solver::categorization::categorize_joints;
use crate::dynamics::solver::solver_body::SolverBody;
use crate::dynamics::solver::solver_vel::SolverVel;
use crate::dynamics::solver::{
reset_buffer, JointConstraintTypes, JointGenericOneBodyConstraint,
JointGenericOneBodyConstraintBuilder, JointGenericTwoBodyConstraint,
JointGenericTwoBodyConstraintBuilder, JointGenericVelocityOneBodyExternalConstraintBuilder,
JointGenericVelocityOneBodyInternalConstraintBuilder, SolverConstraintsSet,
JointConstraintTypes, JointGenericOneBodyConstraint, JointGenericOneBodyConstraintBuilder,
JointGenericTwoBodyConstraint, JointGenericTwoBodyConstraintBuilder,
JointGenericVelocityOneBodyExternalConstraintBuilder,
JointGenericVelocityOneBodyInternalConstraintBuilder, SolverConstraintsSet, reset_buffer,
};
use crate::dynamics::{
IntegrationParameters, IslandManager, JointGraphEdge, JointIndex, MultibodyJointSet,

View File

@@ -1,10 +1,10 @@
use crate::dynamics::solver::SolverVel;
use crate::dynamics::solver::joint_constraint::joint_velocity_constraint::{
JointFixedSolverBody, WritebackId,
};
use crate::dynamics::solver::joint_constraint::{JointSolverBody, JointTwoBodyConstraintHelper};
use crate::dynamics::solver::SolverVel;
use crate::dynamics::{GenericJoint, IntegrationParameters, JointGraphEdge, JointIndex, Multibody};
use crate::math::{Isometry, Real, DIM};
use crate::math::{DIM, Isometry, Real};
use crate::prelude::SPATIAL_DIM;
use na::{DVector, DVectorView, DVectorViewMut};

View File

@@ -1,3 +1,4 @@
use crate::dynamics::solver::MotorParameters;
use crate::dynamics::solver::joint_constraint::joint_generic_constraint::{
JointGenericOneBodyConstraint, JointGenericTwoBodyConstraint,
};
@@ -5,19 +6,18 @@ use crate::dynamics::solver::joint_constraint::joint_velocity_constraint::{
JointFixedSolverBody, WritebackId,
};
use crate::dynamics::solver::joint_constraint::{JointSolverBody, JointTwoBodyConstraintHelper};
use crate::dynamics::solver::MotorParameters;
use crate::dynamics::{
GenericJoint, ImpulseJoint, IntegrationParameters, JointIndex, Multibody, MultibodyJointSet,
MultibodyLinkId, RigidBodySet,
};
use crate::math::{Real, Vector, ANG_DIM, DIM, SPATIAL_DIM};
use crate::math::{ANG_DIM, DIM, Real, SPATIAL_DIM, Vector};
use crate::utils;
use crate::utils::IndexMut2;
use crate::utils::SimdDot;
use na::{DVector, SVector};
use crate::dynamics::solver::solver_body::SolverBody;
use crate::dynamics::solver::ConstraintsCounts;
use crate::dynamics::solver::solver_body::SolverBody;
#[cfg(feature = "dim3")]
use crate::utils::SimdAngularInertia;
#[cfg(feature = "dim2")]

View File

@@ -1,15 +1,15 @@
use crate::dynamics::solver::joint_constraint::JointTwoBodyConstraintHelper;
use crate::dynamics::solver::SolverVel;
use crate::dynamics::solver::joint_constraint::JointTwoBodyConstraintHelper;
use crate::dynamics::{
GenericJoint, IntegrationParameters, JointAxesMask, JointGraphEdge, JointIndex,
};
use crate::math::{AngVector, AngularInertia, Isometry, Point, Real, Vector, DIM, SPATIAL_DIM};
use crate::math::{AngVector, AngularInertia, DIM, Isometry, Point, Real, SPATIAL_DIM, Vector};
use crate::num::Zero;
use crate::utils::{SimdDot, SimdRealCopy};
#[cfg(feature = "simd-is-enabled")]
use {
crate::math::{SimdReal, SIMD_WIDTH},
crate::math::{SIMD_WIDTH, SimdReal},
na::SimdValue,
};

View File

@@ -42,6 +42,9 @@ mod velocity_solver;
pub unsafe fn reset_buffer<T>(buffer: &mut Vec<T>, len: usize) {
buffer.clear();
buffer.reserve(len);
buffer.as_mut_ptr().write_bytes(u8::MAX, len);
buffer.set_len(len);
unsafe {
buffer.as_mut_ptr().write_bytes(u8::MAX, len);
buffer.set_len(len);
}
}

View File

@@ -1,4 +1,4 @@
use crate::math::{AngVector, Vector, SPATIAL_DIM};
use crate::math::{AngVector, SPATIAL_DIM, Vector};
use crate::utils::SimdRealCopy;
use na::{DVectorView, DVectorViewMut, Scalar};
use std::ops::{AddAssign, Sub, SubAssign};

View File

@@ -1,9 +1,9 @@
use super::{JointConstraintTypes, SolverConstraintsSet};
use crate::dynamics::solver::solver_body::SolverBody;
use crate::dynamics::{
solver::{ContactConstraintTypes, SolverVel},
IntegrationParameters, IslandManager, JointGraphEdge, JointIndex, MultibodyJointSet,
MultibodyLinkId, RigidBodySet,
solver::{ContactConstraintTypes, SolverVel},
};
use crate::geometry::{ContactManifold, ContactManifoldIndex};
use crate::math::Real;

View File

@@ -1,9 +1,7 @@
use crate::dynamics::RigidBodySet;
use crate::geometry::{BroadPhasePairEvent, ColliderHandle, ColliderSet};
use parry::math::Real;
/// An internal index stored in colliders by some broad-phase algorithms.
pub type BroadPhaseProxyIndex = u32;
use crate::prelude::IntegrationParameters;
use downcast_rs::DowncastSync;
/// Trait implemented by broad-phase algorithms supported by Rapier.
///
@@ -12,7 +10,7 @@ pub type BroadPhaseProxyIndex = u32;
/// two objects dont actually touch, but it is incorrect to remove a pair between two objects
/// that are still touching. In other words, it can have false-positive (though these induce
/// some computational overhead on the narrow-phase), but cannot have false-negative.
pub trait BroadPhase: Send + Sync + 'static {
pub trait BroadPhase: Send + Sync + 'static + DowncastSync {
/// Updates the broad-phase.
///
/// The results must be output through the `events` struct. The broad-phase algorithm is only
@@ -25,8 +23,7 @@ pub trait BroadPhase: Send + Sync + 'static {
/// **not** be modified during the broad-phase update.
///
/// # Parameters
/// - `prediction_distance`: colliders that are not exactly touching, but closer to this
/// distance must form a collision pair.
/// - `params`: the integration parameters governing the simulation.
/// - `colliders`: the set of colliders. Change detection with `collider.needs_broad_phase_update()`
/// can be relied on at this stage.
/// - `modified_colliders`: colliders that are know to be modified since the last update.
@@ -39,12 +36,13 @@ pub trait BroadPhase: Send + Sync + 'static {
/// are still touching or closer than `prediction_distance`.
fn update(
&mut self,
dt: Real,
prediction_distance: Real,
colliders: &mut ColliderSet,
params: &IntegrationParameters,
colliders: &ColliderSet,
bodies: &RigidBodySet,
modified_colliders: &[ColliderHandle],
removed_colliders: &[ColliderHandle],
events: &mut Vec<BroadPhasePairEvent>,
);
}
downcast_rs::impl_downcast!(sync BroadPhase);

View File

@@ -0,0 +1,270 @@
use crate::dynamics::{IntegrationParameters, RigidBodySet};
use crate::geometry::{BroadPhase, BroadPhasePairEvent, ColliderHandle, ColliderPair, ColliderSet};
use parry::bounding_volume::BoundingVolume;
use parry::partitioning::{Bvh, BvhWorkspace};
use parry::utils::hashmap::{Entry, HashMap};
/// A broad-phase based on parrys [`Bvh`] data structure.
#[derive(Default, Clone)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct BroadPhaseBvh {
pub(crate) tree: Bvh,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
workspace: BvhWorkspace,
pairs: HashMap<(ColliderHandle, ColliderHandle), u32>,
frame_index: u32,
optimization_strategy: BvhOptimizationStrategy,
}
// TODO: would be interesting to try out:
// "Fast Insertion-Based Optimization of Bounding Volume Hierarchies"
// by Bittner et al.
/// Selection of strategies to maintain through time the broad-phase BVH in shape that remains
/// efficient for collision-detection and scene queries.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Default, PartialEq, Eq, Copy, Clone)]
pub enum BvhOptimizationStrategy {
/// Different sub-trees of the BVH will be optimized at each frame.
#[default]
SubtreeOptimizer,
/// Disables incremental BVH optimization (discouraged).
///
/// This should not be used except for debugging purpose.
None,
}
const ENABLE_TREE_VALIDITY_CHECK: bool = false;
impl BroadPhaseBvh {
/// Initializes a new empty broad-phase.
pub fn new() -> Self {
Self::default()
}
/// Initializes a new empty broad-phase with the specified strategy for incremental
/// BVH optimization.
pub fn with_optimization_strategy(optimization_strategy: BvhOptimizationStrategy) -> Self {
Self {
optimization_strategy,
..Default::default()
}
}
fn update_with_strategy(
&mut self,
params: &IntegrationParameters,
colliders: &ColliderSet,
bodies: &RigidBodySet,
modified_colliders: &[ColliderHandle],
removed_colliders: &[ColliderHandle],
events: &mut Vec<BroadPhasePairEvent>,
strategy: BvhOptimizationStrategy,
) {
const CHANGE_DETECTION_ENABLED: bool = true;
self.frame_index = self.frame_index.overflowing_add(1).0;
// Removals must be handled first, in case another collider in
// `modified_colliders` shares the same index.
for handle in removed_colliders {
self.tree.remove(handle.into_raw_parts().0);
}
if modified_colliders.is_empty() {
return;
}
let first_pass = self.tree.is_empty();
// let t0 = std::time::Instant::now();
for modified in modified_colliders {
if let Some(collider) = colliders.get(*modified) {
if !collider.is_enabled() || !collider.changes.needs_broad_phase_update() {
continue;
}
// Take soft-ccd into account by growing the aabb.
let next_pose = collider.parent.and_then(|p| {
let parent = bodies.get(p.handle)?;
(parent.soft_ccd_prediction() > 0.0).then(|| {
parent.predict_position_using_velocity_and_forces_with_max_dist(
params.dt,
parent.soft_ccd_prediction(),
) * p.pos_wrt_parent
})
});
let prediction_distance = params.prediction_distance();
let mut aabb = collider.compute_collision_aabb(prediction_distance / 2.0);
if let Some(next_pose) = next_pose {
let next_aabb = collider
.shape
.compute_aabb(&next_pose)
.loosened(collider.contact_skin() + prediction_distance / 2.0);
aabb.merge(&next_aabb);
}
let change_detection_skin = if CHANGE_DETECTION_ENABLED {
1.0e-2 * params.length_unit
} else {
0.0
};
self.tree.insert_or_update_partially(
aabb,
modified.into_raw_parts().0,
change_detection_skin,
);
}
}
if ENABLE_TREE_VALIDITY_CHECK {
if first_pass {
self.tree.assert_well_formed();
}
self.tree.assert_well_formed_topology_only();
}
// let t0 = std::time::Instant::now();
match strategy {
BvhOptimizationStrategy::SubtreeOptimizer => {
self.tree.optimize_incremental(&mut self.workspace);
}
BvhOptimizationStrategy::None => {}
};
// println!(
// "Incremental optimization: {}",
// t0.elapsed().as_secs_f32() * 1000.0
// );
// NOTE: we run refit after optimization so we can skip updating internal nodes during
// optimization, and so we can reorder the tree in memory (in depth-first order)
// to make it more cache friendly after the rebuild shuffling everything around.
// let t0 = std::time::Instant::now();
self.tree.refit(&mut self.workspace);
if ENABLE_TREE_VALIDITY_CHECK {
self.tree.assert_well_formed();
}
// println!("Refit: {}", t0.elapsed().as_secs_f32() * 1000.0);
// println!(
// "leaf count: {}/{} (changed: {})",
// self.tree.leaf_count(),
// self.tree.reachable_leaf_count(0),
// self.tree.changed_leaf_count(0),
// );
// self.tree.assert_is_depth_first();
// self.tree.assert_well_formed();
// println!(
// "Is well formed. Tree height: {}",
// self.tree.subtree_height(0),
// );
// // println!("Tree quality: {}", self.tree.quality_metric());
let mut pairs_collector = |co1: u32, co2: u32| {
assert_ne!(co1, co2);
let Some((_, mut handle1)) = colliders.get_unknown_gen(co1) else {
return;
};
let Some((_, mut handle2)) = colliders.get_unknown_gen(co2) else {
return;
};
if co1 > co2 {
std::mem::swap(&mut handle1, &mut handle2);
}
match self.pairs.entry((handle1, handle2)) {
Entry::Occupied(e) => *e.into_mut() = self.frame_index,
Entry::Vacant(e) => {
e.insert(self.frame_index);
events.push(BroadPhasePairEvent::AddPair(ColliderPair::new(
handle1, handle2,
)));
}
}
};
// let t0 = std::time::Instant::now();
self.tree
.traverse_bvtt_single_tree::<CHANGE_DETECTION_ENABLED>(
&mut self.workspace,
&mut pairs_collector,
);
// println!("Detection: {}", t0.elapsed().as_secs_f32() * 1000.0);
// println!(">>>>>> Num events: {}", events.iter().len());
// Find outdated entries.
// TODO PERF:
// Currently, the narrow-phase isnt capable of removing its own outdated
// collision pairs. So we need to run a pass here to find aabbs that are
// no longer overlapping. This, and the pair deduplication happening in
// the `pairs_collector` is expensive and should be done more efficiently
// by the narrow-phase itself (or islands) once we rework it.
//
// let t0 = std::time::Instant::now();
self.pairs.retain(|(h0, h1), timestamp| {
if *timestamp != self.frame_index {
if !colliders.contains(*h0) || !colliders.contains(*h1) {
// At least one of the colliders no longer exist, dont retain the pair.
return false;
}
let Some(node0) = self.tree.leaf_node(h0.into_raw_parts().0) else {
return false;
};
let Some(node1) = self.tree.leaf_node(h1.into_raw_parts().0) else {
return false;
};
if (!CHANGE_DETECTION_ENABLED || node0.is_changed() || node1.is_changed())
&& !node0.intersects(node1)
{
events.push(BroadPhasePairEvent::DeletePair(ColliderPair::new(*h0, *h1)));
false
} else {
true
}
} else {
// If the timestamps match, we already saw this pair during traversal.
// There can be rare occurrences where the timestamp will be equal
// even though we didnt see the pair during traversal. This happens
// if the frame index overflowed. But this is fine, well catch it
// in another frame.
true
}
});
// println!(
// "Post-filtering: {} (added pairs: {}, removed pairs: {})",
// t0.elapsed().as_secs_f32() * 1000.0,
// added_pairs,
// removed_pairs
// );
}
}
impl BroadPhase for BroadPhaseBvh {
fn update(
&mut self,
params: &IntegrationParameters,
colliders: &ColliderSet,
bodies: &RigidBodySet,
modified_colliders: &[ColliderHandle],
removed_colliders: &[ColliderHandle],
events: &mut Vec<BroadPhasePairEvent>,
) {
self.update_with_strategy(
params,
colliders,
bodies,
modified_colliders,
removed_colliders,
events,
self.optimization_strategy,
);
}
}

View File

@@ -1,702 +0,0 @@
use super::{
BroadPhasePairEvent, ColliderPair, SAPLayer, SAPProxies, SAPProxy, SAPProxyData, SAPRegionPool,
};
use crate::geometry::{
BroadPhaseProxyIndex, Collider, ColliderBroadPhaseData, ColliderChanges, ColliderHandle,
ColliderSet,
};
use crate::math::{Isometry, Real};
use crate::prelude::{BroadPhase, RigidBodySet};
use crate::utils::IndexMut2;
use parry::bounding_volume::BoundingVolume;
use parry::utils::hashmap::HashMap;
/// A broad-phase combining a Hierarchical Grid and Sweep-and-Prune.
///
/// The basic Sweep-and-Prune (SAP) algorithm has one significant flaws:
/// the interactions between far-away objects. This means that objects
/// that are very far away will still have some of their endpoints swapped
/// within the SAP data-structure. This results in poor scaling because this
/// results in lots of swapping between endpoints of Aabbs that won't ever
/// actually interact.
///
/// The first optimization to address this problem is to use the Multi-SAP
/// method. This basically combines an SAP with a grid. The grid subdivides
/// the spaces into equally-sized subspaces (grid cells). Each subspace, which we call
/// a "region" contains an SAP instance (i.e. there SAP axes responsible for
/// collecting endpoints and swapping them when they move to detect interaction pairs).
/// Each Aabb is inserted in all the regions it intersects.
/// This prevents the far-away problem because two objects that are far away will
/// be located on different regions. So their endpoints will never meet.
///
/// However, the Multi-SAP approach has one notable problem: the region size must
/// be chosen wisely. It could be user-defined, but that's makes it more difficult
/// to use (for the end-user). Or it can be given a fixed value. Using a fixed
/// value may result in large objects intersecting lots of regions, resulting in
/// poor performances and very high memory usage.
///
/// So a solution to that large-objects problem is the Multi-SAP approach is to
/// replace the grid by a hierarchical grid. A hierarchical grid is composed of
/// several layers. And each layer have different region sizes. For example all
/// the regions on layer 0 will have the size 1x1x1. All the regions on the layer
/// 1 will have the size 10x10x10, etc. That way, a given Aabb will be inserted
/// on the layer that has regions big enough to avoid the large-object problem.
/// For example a 20x20x20 object will be inserted in the layer with region
/// of size 10x10x10, resulting in only 8 regions being intersect by the Aabb.
/// (If it was inserted in the layer with regions of size 1x1x1, it would have intersected
/// 8000 regions, which is a problem performance-wise.)
///
/// We call this new method the Hierarchical-SAP.
///
/// Now with the Hierarchical-SAP, we can update each layer independently from one another.
/// However, objects belonging to different layers will never be detected as intersecting that
/// way. So we need a way to do inter-layer interference detection. There is a lot ways of doing
/// this: performing inter-layer Multi-Box-Pruning passes is one example (but this is not what we do).
/// In our implementation, we do the following:
/// - The Aabb bounds of each region of the layer `n` are inserted into the corresponding larger region
/// of the layer `n + 1`.
/// - When an Aabb in the region of the layer `n + 1` intersects the Aabb corresponding to one of the
/// regions at the smaller layer `n`, we add that Aabb to that smaller region.
///
/// So in the end it means that a given Aabb will be inserted into all the region it intersects at
/// the layer `n`. And it will also be inserted into all the regions it intersects at the smaller layers
/// (the layers `< n`), but only for the regions that already exist (so we don't have to discretize
/// our Aabb into the layers `< n`). This involves a fair amount of bookkeeping unfortunately, but
/// this has the benefit of keep the overall complexity of the algorithm O(1) in the typical specially
/// coherent scenario.
///
/// From an implementation point-of-view, our hierarchical SAP is implemented with the following structures:
/// - There is one `SAPLayer` per layer of the hierarchical grid.
/// - Each `SAPLayer` contains multiple `SAPRegion` (each being a region of the grid represented by that layer).
/// - Each `SAPRegion` contains three `SAPAxis`, representing the "classical" SAP algorithm running on this region.
/// - Each `SAPAxis` maintains a sorted list of `SAPEndpoints` representing the endpoints of the Aabbs intersecting
/// the bounds on the `SAPRegion` containing this `SAPAxis`.
/// - A set of `SAPProxy` are maintained separately. It contains the Aabbs of all the colliders managed by this
/// broad-phase, as well as the Aabbs of all the regions part of this broad-phase.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct BroadPhaseMultiSap {
proxies: SAPProxies,
layers: Vec<SAPLayer>,
smallest_layer: u8,
largest_layer: u8,
// NOTE: we maintain this hashmap to simplify collider removal.
// This information is also present in the ColliderProxyId
// component. However if that component is removed, we need
// a way to access it to do some cleanup.
// Note that we could just remove the ColliderProxyId component
// altogether but that would be slow because of the need to
// always access this hashmap. Instead, we access this hashmap
// only when the collider has been added/removed.
// Another alternative would be to remove ColliderProxyId and
// just use a Coarena. But this seems like it could use too
// much memory.
#[cfg_attr(
feature = "serde-serialize",
serde(
serialize_with = "crate::utils::serde::serialize_to_vec_tuple",
deserialize_with = "crate::utils::serde::deserialize_from_vec_tuple"
)
)]
colliders_proxy_ids: HashMap<ColliderHandle, BroadPhaseProxyIndex>,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
region_pool: SAPRegionPool, // To avoid repeated allocations.
// We could think serializing this workspace is useless.
// It turns out is is important to serialize at least its capacity
// and restore this capacity when deserializing the hashmap.
// This is because the order of future elements inserted into the
// hashmap depends on its capacity (because the internal bucket indices
// depend on this capacity). So not restoring this capacity may alter
// the order at which future elements are reported. This will in turn
// alter the order at which the pairs are registered in the narrow-phase,
// thus altering the order of the contact manifold. In the end, this
// alters the order of the resolution of contacts, resulting in
// diverging simulation after restoration of a snapshot.
#[cfg_attr(
feature = "serde-serialize",
serde(
serialize_with = "parry::utils::hashmap::serialize_hashmap_capacity",
deserialize_with = "parry::utils::hashmap::deserialize_hashmap_capacity"
)
)]
reporting: HashMap<(u32, u32), bool>, // Workspace
}
impl Default for BroadPhaseMultiSap {
fn default() -> Self {
Self::new()
}
}
impl BroadPhaseMultiSap {
/// Create a new empty broad-phase.
pub fn new() -> Self {
BroadPhaseMultiSap {
proxies: SAPProxies::new(),
layers: Vec::new(),
smallest_layer: 0,
largest_layer: 0,
region_pool: Vec::new(),
reporting: HashMap::default(),
colliders_proxy_ids: HashMap::default(),
}
}
/// Maintain the broad-phase internal state by taking collider removal into account.
///
/// For each colliders marked as removed, we make their containing layer mark
/// its proxy as pre-deleted. The actual proxy removal will happen at the end
/// of the `BroadPhaseMultiSap::update`.
fn handle_removed_colliders(&mut self, removed_colliders: &[ColliderHandle]) {
// For each removed collider, remove the corresponding proxy.
for removed in removed_colliders {
if let Some(proxy_id) = self.colliders_proxy_ids.get(removed).copied() {
self.predelete_proxy(proxy_id);
}
}
}
/// Pre-deletes a proxy from this broad-phase.
///
/// The removal of a proxy is a semi-lazy process. It will mark
/// the proxy as predeleted, and will set its Aabb as +infinity.
/// After this method has been called with all the proxies to
/// remove, the `complete_removal` method MUST be called to
/// complete the removal of these proxies, by actually removing them
/// from all the relevant layers/regions/axes.
fn predelete_proxy(&mut self, proxy_index: BroadPhaseProxyIndex) {
if proxy_index == crate::INVALID_U32 {
// This collider has not been added to the broad-phase yet.
return;
}
let proxy = &mut self.proxies[proxy_index];
let layer = &mut self.layers[proxy.layer_id as usize];
// Let the layer know that the proxy is being deleted.
layer.predelete_proxy(&mut self.proxies, proxy_index);
}
/// Completes the removal of the deleted proxies.
///
/// If `self.predelete_proxy` was called, then this `complete_removals`
/// method must be called to complete the removals.
///
/// This method will actually remove from the proxy list all the proxies
/// marked as deletable by `self.predelete_proxy`, making their proxy
/// handles re-usable by new proxies.
fn complete_removals(
&mut self,
colliders: &mut ColliderSet,
removed_colliders: &[ColliderHandle],
) {
// If there is no layer, there is nothing to remove.
if self.layers.is_empty() {
return;
}
// This is a bottom-up pass:
// - Complete the removal on the layer `n`. This may cause so regions to be deleted.
// - Continue with the layer `n + 1`. This will delete from `n + 1` all the proxies
// of the regions originating from `n`.
// This bottom-up approach will propagate region removal from the smallest layer up
// to the largest layer.
let mut curr_layer_id = self.smallest_layer;
loop {
let curr_layer = &mut self.layers[curr_layer_id as usize];
if let Some(larger_layer_id) = curr_layer.larger_layer {
let (curr_layer, larger_layer) = self
.layers
.index_mut2(curr_layer_id as usize, larger_layer_id as usize);
curr_layer.complete_removals(
Some(larger_layer),
&mut self.proxies,
&mut self.region_pool,
);
// NOTE: we don't care about reporting pairs.
self.reporting.clear();
curr_layer_id = larger_layer_id;
} else {
curr_layer.complete_removals(None, &mut self.proxies, &mut self.region_pool);
// NOTE: we don't care about reporting pairs.
self.reporting.clear();
break;
}
}
/*
* Actually remove the colliders proxies.
*/
for removed in removed_colliders {
#[cfg(feature = "enhanced-determinism")]
let proxy_id = self.colliders_proxy_ids.swap_remove(removed);
#[cfg(not(feature = "enhanced-determinism"))]
let proxy_id = self.colliders_proxy_ids.remove(removed);
if let Some(proxy_id) = proxy_id {
if proxy_id != crate::INVALID_U32 {
self.proxies.remove(proxy_id);
}
}
if let Some(co) = colliders.get_mut_internal(*removed) {
// Reset the proxy index.
co.bf_data.proxy_index = crate::INVALID_U32;
}
}
}
/// Finalize the insertion of the layer identified by `layer_id`.
///
/// This will:
/// - Remove all the subregion proxies from the larger layer.
/// - Pre-insert all the smaller layer's region proxies into this layer.
#[profiling::function]
fn finalize_layer_insertion(&mut self, layer_id: u8) {
// Remove all the region endpoints from the larger layer.
// They will be automatically replaced by the new layer's regions.
if let Some(larger_layer) = self.layers[layer_id as usize].larger_layer {
self.layers[larger_layer as usize].unregister_all_subregions(&mut self.proxies);
}
// Add all the regions from the smaller layer to the new layer.
// This will result in new regions to be created in the new layer.
// These new regions will automatically propagate to the larger layers in
// the Phase 3 of `Self::update`.
if let Some(smaller_layer) = self.layers[layer_id as usize].smaller_layer {
let (smaller_layer, new_layer) = self
.layers
.index_mut2(smaller_layer as usize, layer_id as usize);
smaller_layer.propagate_existing_regions(
new_layer,
&mut self.proxies,
&mut self.region_pool,
);
}
}
/// Ensures that a given layer exists.
///
/// If the layer does not exist then:
/// 1. It is created and added to `self.layers`.
/// 2. The smaller/larger layer indices are updated to order them
/// properly depending on their depth.
/// 3. All the subregion proxies from the larger layer are deleted:
/// they will be replaced by this new layer's regions later in
/// the `update` function.
/// 4. All the regions from the smaller layer are added to that new
/// layer.
#[profiling::function]
fn ensure_layer_exists(&mut self, new_depth: i8) -> u8 {
// Special case: we don't have any layers yet.
if self.layers.is_empty() {
let layer_id = self.layers.len() as u8; // TODO: check overflow.
self.layers
.push(SAPLayer::new(new_depth, layer_id, None, None));
return 0;
}
// Find the first layer with a depth larger or equal to new_depth.
let mut larger_layer_id = Some(self.smallest_layer);
while let Some(curr_layer_id) = larger_layer_id {
if self.layers[curr_layer_id as usize].depth >= new_depth {
break;
}
larger_layer_id = self.layers[curr_layer_id as usize].larger_layer;
}
match larger_layer_id {
None => {
// The layer we are currently creating is the new largest layer. So
// we need to update `self.largest_layer` accordingly then call
// `self.finalize_layer_insertion.
assert_ne!(self.layers.len() as u8, u8::MAX, "Not yet implemented.");
let new_layer_id = self.layers.len() as u8;
self.layers[self.largest_layer as usize].larger_layer = Some(new_layer_id);
self.layers.push(SAPLayer::new(
new_depth,
new_layer_id,
Some(self.largest_layer),
None,
));
self.largest_layer = new_layer_id;
self.finalize_layer_insertion(new_layer_id);
new_layer_id
}
Some(larger_layer_id) => {
if self.layers[larger_layer_id as usize].depth == new_depth {
// Found it! The layer already exists.
larger_layer_id
} else {
// The layer does not exist yet. Create it.
// And we found another layer that is larger than this one.
// So we need to adjust the smaller/larger layer indices too
// keep the list sorted, and then call `self.finalize_layer_insertion`
// to deal with region propagation.
let new_layer_id = self.layers.len() as u8;
let smaller_layer_id = self.layers[larger_layer_id as usize].smaller_layer;
self.layers[larger_layer_id as usize].smaller_layer = Some(new_layer_id);
if let Some(smaller_layer_id) = smaller_layer_id {
self.layers[smaller_layer_id as usize].larger_layer = Some(new_layer_id);
} else {
self.smallest_layer = new_layer_id;
}
self.layers.push(SAPLayer::new(
new_depth,
new_layer_id,
smaller_layer_id,
Some(larger_layer_id),
));
self.finalize_layer_insertion(new_layer_id);
new_layer_id
}
}
}
}
fn handle_modified_collider(
&mut self,
prediction_distance: Real,
handle: ColliderHandle,
proxy_index: &mut u32,
collider: &Collider,
next_position: Option<&Isometry<Real>>,
) -> bool {
let mut aabb = collider.compute_collision_aabb(prediction_distance / 2.0);
if let Some(next_position) = next_position {
let next_aabb = collider
.shape
.compute_aabb(next_position)
.loosened(collider.contact_skin() + prediction_distance / 2.0);
aabb.merge(&next_aabb);
}
if aabb.mins.coords.iter().any(|e| !e.is_finite())
|| aabb.maxs.coords.iter().any(|e| !e.is_finite())
{
// Reject Aabbs with non-finite values.
return false;
}
aabb.mins = super::clamp_point(aabb.mins);
aabb.maxs = super::clamp_point(aabb.maxs);
let prev_aabb;
let layer_id = if let Some(proxy) = self.proxies.get_mut(*proxy_index) {
let mut layer_id = proxy.layer_id;
prev_aabb = proxy.aabb;
proxy.aabb = aabb;
if collider.changes.contains(ColliderChanges::SHAPE) {
// If the shape was changed, then we need to see if this proxy should be
// migrated to a larger layer. Indeed, if the shape was replaced by
// a much larger shape, we need to promote the proxy to a bigger layer
// to avoid the O(n²) discretization problem.
let new_layer_depth = super::layer_containing_aabb(&aabb);
if new_layer_depth > proxy.layer_depth {
self.layers[proxy.layer_id as usize]
.proper_proxy_moved_to_bigger_layer(&mut self.proxies, *proxy_index);
// We need to promote the proxy to the bigger layer.
layer_id = self.ensure_layer_exists(new_layer_depth);
self.proxies[*proxy_index].layer_id = layer_id;
self.proxies[*proxy_index].layer_depth = new_layer_depth;
}
}
layer_id
} else {
let layer_depth = super::layer_containing_aabb(&aabb);
let layer_id = self.ensure_layer_exists(layer_depth);
// Create the proxy.
let proxy = SAPProxy::collider(handle, aabb, layer_id, layer_depth);
prev_aabb = aabb;
*proxy_index = self.proxies.insert(proxy);
layer_id
};
let layer = &mut self.layers[layer_id as usize];
// Preupdate the collider in the layer.
// We need to use both the prev Aabb and the new Aabb for this update, to
// handle special cases where one Aabb has left a region that doesnt contain
// any other modified Aabbs.
// If the combination of both previous and new aabbs isnt more than 25% bigger
// than the new Aabb, we just merge them to save some computation times (to avoid
// discretizing twice the area at their intersection. If its bigger than 25% then
// we discretize both aabbs individually.
let merged_aabbs = prev_aabb.merged(&aabb);
if merged_aabbs.volume() > aabb.volume() * 1.25 {
layer.preupdate_collider(
*proxy_index,
&aabb,
None,
&mut self.proxies,
&mut self.region_pool,
);
layer.preupdate_collider(
*proxy_index,
&prev_aabb,
Some(&aabb),
&mut self.proxies,
&mut self.region_pool,
);
} else {
layer.preupdate_collider(
*proxy_index,
&merged_aabbs,
Some(&aabb),
&mut self.proxies,
&mut self.region_pool,
);
}
// Returns true if propagation is needed.
!layer.created_regions.is_empty()
}
/// Propagate regions from the smallest layers up to the larger layers.
///
/// Whenever a region is created on a layer `n`, then its Aabb must be
/// added to its larger layer so we can detect when an object
/// in a larger layer may start interacting with objects in a smaller
/// layer.
#[profiling::function]
fn propagate_created_regions(&mut self) {
let mut curr_layer = Some(self.smallest_layer);
while let Some(curr_layer_id) = curr_layer {
let layer = &mut self.layers[curr_layer_id as usize];
let larger_layer = layer.larger_layer;
if !layer.created_regions.is_empty() {
if let Some(larger_layer) = larger_layer {
let (layer, larger_layer) = self
.layers
.index_mut2(curr_layer_id as usize, larger_layer as usize);
layer.propagate_created_regions(
larger_layer,
&mut self.proxies,
&mut self.region_pool,
);
layer.created_regions.clear();
} else {
// Always clear the set of created regions, even if
// there is no larger layer.
layer.created_regions.clear();
}
}
curr_layer = larger_layer;
}
}
#[profiling::function]
fn update_layers_and_find_pairs(&mut self, out_events: &mut Vec<BroadPhasePairEvent>) {
if self.layers.is_empty() {
return;
}
// This is a top-down operation: we start by updating the largest
// layer, and continue down to the smallest layer.
//
// This must be top-down because:
// 1. If a non-region proxy from the layer `n` interacts with one of
// the regions from the layer `n - 1`, it must be added to that
// smaller layer before that smaller layer is updated.
// 2. If a region has been updated, then all its subregions at the
// layer `n - 1` must be marked as "needs-to-be-updated" too, in
// order to account for the fact that a big proxy moved.
// NOTE: this 2nd point could probably be improved: instead of updating
// all the subregions, we could perhaps just update the subregions
// that crosses the boundary of the Aabb of the big proxies that
// moved in they layer `n`.
let mut layer_id = Some(self.largest_layer);
while let Some(curr_layer_id) = layer_id {
self.layers[curr_layer_id as usize]
.update_regions(&mut self.proxies, &mut self.reporting);
layer_id = self.layers[curr_layer_id as usize].smaller_layer;
for ((proxy_id1, proxy_id2), colliding) in &self.reporting {
let (proxy1, proxy2) = self
.proxies
.elements
.index_mut2(*proxy_id1 as usize, *proxy_id2 as usize);
match (&mut proxy1.data, &mut proxy2.data) {
(SAPProxyData::Collider(handle1), SAPProxyData::Collider(handle2)) => {
if *colliding {
out_events.push(BroadPhasePairEvent::AddPair(ColliderPair::new(
*handle1, *handle2,
)));
} else {
out_events.push(BroadPhasePairEvent::DeletePair(ColliderPair::new(
*handle1, *handle2,
)));
}
}
(SAPProxyData::Collider(_), SAPProxyData::Region(_)) => {
if *colliding {
// Add the collider to the subregion.
proxy2
.data
.as_region_mut()
.preupdate_proxy(*proxy_id1, false);
}
}
(SAPProxyData::Region(_), SAPProxyData::Collider(_)) => {
if *colliding {
// Add the collider to the subregion.
proxy1
.data
.as_region_mut()
.preupdate_proxy(*proxy_id2, false);
}
}
(SAPProxyData::Region(_), SAPProxyData::Region(_)) => {
// This will only happen between two adjacent subregions because
// they share some identical bounds. So this case does not matter.
}
}
}
self.reporting.clear();
}
}
}
impl BroadPhase for BroadPhaseMultiSap {
/// Updates the broad-phase, taking into account the new collider positions.
#[profiling::function]
fn update(
&mut self,
dt: Real,
prediction_distance: Real,
colliders: &mut ColliderSet,
bodies: &RigidBodySet,
modified_colliders: &[ColliderHandle],
removed_colliders: &[ColliderHandle],
events: &mut Vec<BroadPhasePairEvent>,
) {
// Phase 1: pre-delete the collisions that have been deleted.
self.handle_removed_colliders(removed_colliders);
let mut need_region_propagation = false;
// Phase 2: pre-delete the collisions that have been deleted.
for handle in modified_colliders {
// NOTE: we use `get` because the collider may no longer
// exist if it has been removed.
if let Some(co) = colliders.get_mut_internal(*handle) {
if !co.is_enabled() || !co.changes.needs_broad_phase_update() {
continue;
}
let mut new_proxy_id = co.bf_data.proxy_index;
let next_pos = co.parent.and_then(|p| {
let parent = bodies.get(p.handle)?;
(parent.soft_ccd_prediction() > 0.0).then(|| {
parent.predict_position_using_velocity_and_forces_with_max_dist(
dt,
parent.soft_ccd_prediction(),
) * p.pos_wrt_parent
})
});
if self.handle_modified_collider(
prediction_distance,
*handle,
&mut new_proxy_id,
co,
next_pos.as_ref(),
) {
need_region_propagation = true;
}
if co.bf_data.proxy_index != new_proxy_id {
self.colliders_proxy_ids.insert(*handle, new_proxy_id);
// Make sure we have the new proxy index in case
// the collider was added for the first time.
co.bf_data = ColliderBroadPhaseData {
proxy_index: new_proxy_id,
};
}
}
}
// Phase 3: bottom-up pass to propagate new regions from smaller layers to larger layers.
if need_region_propagation {
self.propagate_created_regions();
}
// Phase 4: top-down pass to propagate proxies from larger layers to smaller layers.
self.update_layers_and_find_pairs(events);
// Phase 5: bottom-up pass to remove proxies, and propagate region removed from smaller
// layers to possible remove regions from larger layers that would become empty that way.
self.complete_removals(colliders, removed_colliders);
}
}
#[cfg(test)]
mod test {
use crate::dynamics::{
ImpulseJointSet, IslandManager, MultibodyJointSet, RigidBodyBuilder, RigidBodySet,
};
use crate::geometry::{BroadPhase, BroadPhaseMultiSap, ColliderBuilder, ColliderSet};
#[test]
fn test_add_update_remove() {
let mut broad_phase = BroadPhaseMultiSap::new();
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
let mut impulse_joints = ImpulseJointSet::new();
let mut multibody_joints = MultibodyJointSet::new();
let mut islands = IslandManager::new();
let rb = RigidBodyBuilder::dynamic().build();
let co = ColliderBuilder::ball(0.5).build();
let hrb = bodies.insert(rb);
let coh = colliders.insert_with_parent(co, hrb, &mut bodies);
let mut events = Vec::new();
broad_phase.update(0.0, 0.0, &mut colliders, &bodies, &[coh], &[], &mut events);
bodies.remove(
hrb,
&mut islands,
&mut colliders,
&mut impulse_joints,
&mut multibody_joints,
true,
);
broad_phase.update(0.0, 0.0, &mut colliders, &bodies, &[], &[coh], &mut events);
// Create another body.
let rb = RigidBodyBuilder::dynamic().build();
let co = ColliderBuilder::ball(0.5).build();
let hrb = bodies.insert(rb);
let coh = colliders.insert_with_parent(co, hrb, &mut bodies);
// Make sure the proxy handles is recycled properly.
broad_phase.update(0.0, 0.0, &mut colliders, &bodies, &[coh], &[], &mut events);
}
}

View File

@@ -1,18 +0,0 @@
pub use self::broad_phase_multi_sap::BroadPhaseMultiSap;
pub use self::broad_phase_pair_event::{BroadPhasePairEvent, ColliderPair};
use self::sap_axis::*;
use self::sap_endpoint::*;
use self::sap_layer::*;
use self::sap_proxy::*;
use self::sap_region::*;
use self::sap_utils::*;
mod broad_phase_multi_sap;
mod broad_phase_pair_event;
mod sap_axis;
mod sap_endpoint;
mod sap_layer;
mod sap_proxy;
mod sap_region;
mod sap_utils;

View File

@@ -1,286 +0,0 @@
use super::{SAPEndpoint, SAPProxies, NUM_SENTINELS};
use crate::geometry::broad_phase_multi_sap::DELETED_AABB_VALUE;
use crate::geometry::BroadPhaseProxyIndex;
use crate::math::Real;
use bit_vec::BitVec;
use parry::bounding_volume::BoundingVolume;
use parry::utils::hashmap::HashMap;
use std::cmp::Ordering;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone, Debug)]
pub struct SAPAxis {
pub min_bound: Real,
pub max_bound: Real,
pub endpoints: Vec<SAPEndpoint>,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
pub new_endpoints: Vec<(SAPEndpoint, usize)>, // Workspace
}
impl SAPAxis {
pub fn new(min_bound: Real, max_bound: Real) -> Self {
assert!(min_bound <= max_bound);
Self {
min_bound,
max_bound,
endpoints: vec![SAPEndpoint::start_sentinel(), SAPEndpoint::end_sentinel()],
new_endpoints: Vec::new(),
}
}
pub fn clear(&mut self) {
self.new_endpoints.clear();
self.endpoints.clear();
self.endpoints.push(SAPEndpoint::start_sentinel());
self.endpoints.push(SAPEndpoint::end_sentinel());
}
#[profiling::function]
pub fn batch_insert(
&mut self,
dim: usize,
new_proxies: &[BroadPhaseProxyIndex],
proxies: &SAPProxies,
reporting: Option<&mut HashMap<(u32, u32), bool>>,
) {
if new_proxies.is_empty() {
return;
}
self.new_endpoints.clear();
for proxy_id in new_proxies {
let proxy = &proxies[*proxy_id];
assert!(
proxy.aabb.mins[dim] <= self.max_bound,
"proxy.aabb.mins {} (in {:?}) <= max_bound {}",
proxy.aabb.mins[dim],
proxy.aabb,
self.max_bound
);
assert!(
proxy.aabb.maxs[dim] >= self.min_bound,
"proxy.aabb.maxs {} (in {:?}) >= min_bound {}",
proxy.aabb.maxs[dim],
proxy.aabb,
self.min_bound
);
let start_endpoint = SAPEndpoint::start_endpoint(proxy.aabb.mins[dim], *proxy_id);
let end_endpoint = SAPEndpoint::end_endpoint(proxy.aabb.maxs[dim], *proxy_id);
self.new_endpoints.push((start_endpoint, 0));
self.new_endpoints.push((end_endpoint, 0));
}
self.new_endpoints
.sort_by(|a, b| a.0.value.partial_cmp(&b.0.value).unwrap_or(Ordering::Equal));
let mut curr_existing_index = self.endpoints.len() - NUM_SENTINELS - 1;
let new_num_endpoints = self.endpoints.len() + self.new_endpoints.len();
self.endpoints
.resize(new_num_endpoints, SAPEndpoint::end_sentinel());
let mut curr_shift_index = new_num_endpoints - NUM_SENTINELS - 1;
// Sort the endpoints.
// TODO: specialize for the case where this is the
// first time we insert endpoints to this axis?
for new_endpoint in self.new_endpoints.iter_mut().rev() {
loop {
let existing_endpoint = self.endpoints[curr_existing_index];
if existing_endpoint.value <= new_endpoint.0.value {
break;
}
self.endpoints[curr_shift_index] = existing_endpoint;
curr_shift_index -= 1;
curr_existing_index -= 1;
}
self.endpoints[curr_shift_index] = new_endpoint.0;
new_endpoint.1 = curr_shift_index;
curr_shift_index -= 1;
}
// Report pairs using a single mbp pass on each new endpoint.
let endpoints_wo_last_sentinel = &self.endpoints[..self.endpoints.len() - 1];
if let Some(reporting) = reporting {
for (endpoint, endpoint_id) in self.new_endpoints.drain(..).filter(|e| e.0.is_start()) {
let proxy1 = &proxies[endpoint.proxy()];
let min = endpoint.value;
let max = proxy1.aabb.maxs[dim];
for endpoint2 in &endpoints_wo_last_sentinel[endpoint_id + 1..] {
if endpoint2.proxy() == endpoint.proxy() {
continue;
}
let proxy2 = &proxies[endpoint2.proxy()];
// NOTE: some pairs with equal aabb.mins[dim] may end up being reported twice.
if (endpoint2.is_start() && endpoint2.value < max)
|| (endpoint2.is_end() && proxy2.aabb.mins[dim] <= min)
{
// Report pair.
if proxy1.aabb.intersects(&proxy2.aabb) {
// Report pair.
let pair = super::sort2(endpoint.proxy(), endpoint2.proxy());
reporting.insert(pair, true);
}
}
}
}
}
}
/// Removes from this axis all the endpoints that are out of bounds from this axis.
///
/// Returns the number of deleted proxies as well as the number of proxies deleted
/// such that `proxy.layer_depth <= layer_depth`.
pub fn delete_out_of_bounds_proxies(
&self,
proxies: &SAPProxies,
existing_proxies: &mut BitVec,
layer_depth: i8,
) -> (usize, usize) {
let mut num_subproper_proxies_deleted = 0;
let mut num_proxies_deleted = 0;
for endpoint in &self.endpoints {
if endpoint.value < self.min_bound {
let proxy_id = endpoint.proxy();
if endpoint.is_end() && existing_proxies[proxy_id as usize] {
existing_proxies.set(proxy_id as usize, false);
if proxies[proxy_id].layer_depth <= layer_depth {
num_subproper_proxies_deleted += 1;
}
num_proxies_deleted += 1;
}
} else {
break;
}
}
for endpoint in self.endpoints.iter().rev() {
if endpoint.value > self.max_bound {
let proxy_id = endpoint.proxy();
if endpoint.is_start() && existing_proxies[proxy_id as usize] {
existing_proxies.set(proxy_id as usize, false);
if proxies[proxy_id].layer_depth <= layer_depth {
num_subproper_proxies_deleted += 1;
}
num_proxies_deleted += 1;
}
} else {
break;
}
}
(num_proxies_deleted, num_subproper_proxies_deleted)
}
pub fn delete_out_of_bounds_endpoints(&mut self, existing_proxies: &BitVec) {
self.endpoints
.retain(|endpt| endpt.is_sentinel() || existing_proxies[endpt.proxy() as usize])
}
/// Removes from this axis all the endpoints corresponding to a proxy with an Aabb mins/maxs values
/// equal to DELETED_AABB_VALUE, indicating that the endpoints should be deleted.
///
/// Returns the number of deleted proxies such that `proxy.layer_depth <= layer_depth`.
pub fn delete_deleted_proxies_and_endpoints_after_subregion_removal(
&mut self,
proxies: &SAPProxies,
existing_proxies: &mut BitVec,
layer_depth: i8,
) -> usize {
let mut num_subproper_proxies_deleted = 0;
self.endpoints.retain(|endpt| {
if !endpt.is_sentinel() {
let proxy = &proxies[endpt.proxy()];
if proxy.aabb.mins.x == DELETED_AABB_VALUE {
if existing_proxies.get(endpt.proxy() as usize) == Some(true) {
existing_proxies.set(endpt.proxy() as usize, false);
if proxy.layer_depth <= layer_depth {
num_subproper_proxies_deleted += 1;
}
}
return false;
}
}
true
});
num_subproper_proxies_deleted
}
pub fn update_endpoints(
&mut self,
dim: usize,
proxies: &SAPProxies,
reporting: &mut HashMap<(u32, u32), bool>,
) {
let last_endpoint = self.endpoints.len() - NUM_SENTINELS;
for i in NUM_SENTINELS..last_endpoint {
let mut endpoint_i = self.endpoints[i];
let aabb_i = proxies[endpoint_i.proxy()].aabb;
if endpoint_i.is_start() {
endpoint_i.value = aabb_i.mins[dim];
} else {
endpoint_i.value = aabb_i.maxs[dim];
}
let mut j = i;
if endpoint_i.is_start() {
while endpoint_i.value < self.endpoints[j - 1].value {
let endpoint_j = self.endpoints[j - 1];
self.endpoints[j] = endpoint_j;
if endpoint_j.is_end() {
// Report start collision.
let proxy_j = &proxies[endpoint_j.proxy()];
if aabb_i.intersects(&proxy_j.aabb) {
let pair = super::sort2(endpoint_i.proxy(), endpoint_j.proxy());
reporting.insert(pair, true);
}
}
j -= 1;
}
} else {
while endpoint_i.value < self.endpoints[j - 1].value {
let endpoint_j = self.endpoints[j - 1];
self.endpoints[j] = endpoint_j;
if endpoint_j.is_start() {
// Report end collision.
if !aabb_i.intersects(&proxies[endpoint_j.proxy()].aabb) {
let pair = super::sort2(endpoint_i.proxy(), endpoint_j.proxy());
reporting.insert(pair, false);
}
}
j -= 1;
}
}
self.endpoints[j] = endpoint_i;
}
// println!(
// "Num start swaps: {}, end swaps: {}, dim: {}",
// num_start_swaps, num_end_swaps, dim
// );
}
}

View File

@@ -1,60 +0,0 @@
use super::SENTINEL_VALUE;
use crate::math::Real;
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
pub struct SAPEndpoint {
pub value: Real,
pub packed_flag_proxy: u32,
}
const START_FLAG_MASK: u32 = 0b1 << 31;
const PROXY_MASK: u32 = u32::MAX ^ START_FLAG_MASK;
const START_SENTINEL_TAG: u32 = u32::MAX;
const END_SENTINEL_TAG: u32 = u32::MAX ^ START_FLAG_MASK;
impl SAPEndpoint {
pub fn start_endpoint(value: Real, proxy: u32) -> Self {
Self {
value,
packed_flag_proxy: proxy | START_FLAG_MASK,
}
}
pub fn end_endpoint(value: Real, proxy: u32) -> Self {
Self {
value,
packed_flag_proxy: proxy & PROXY_MASK,
}
}
pub fn start_sentinel() -> Self {
Self {
value: -SENTINEL_VALUE,
packed_flag_proxy: START_SENTINEL_TAG,
}
}
pub fn end_sentinel() -> Self {
Self {
value: SENTINEL_VALUE,
packed_flag_proxy: END_SENTINEL_TAG,
}
}
pub fn is_sentinel(self) -> bool {
self.packed_flag_proxy & PROXY_MASK == PROXY_MASK
}
pub fn proxy(self) -> u32 {
self.packed_flag_proxy & PROXY_MASK
}
pub fn is_start(self) -> bool {
(self.packed_flag_proxy & START_FLAG_MASK) != 0
}
pub fn is_end(self) -> bool {
(self.packed_flag_proxy & START_FLAG_MASK) == 0
}
}

View File

@@ -1,409 +0,0 @@
use super::{RegionKey, SAPProxies, SAPProxy, SAPRegion, SAPRegionPool};
use crate::geometry::broad_phase_multi_sap::DELETED_AABB_VALUE;
use crate::geometry::{Aabb, BroadPhaseProxyIndex};
use crate::math::{Point, Real};
use parry::bounding_volume::BoundingVolume;
use parry::utils::hashmap::{Entry, HashMap};
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub(crate) struct SAPLayer {
pub depth: i8,
pub layer_id: u8,
pub smaller_layer: Option<u8>,
pub larger_layer: Option<u8>,
region_width: Real,
#[cfg_attr(
feature = "serde-serialize",
serde(
serialize_with = "crate::utils::serde::serialize_to_vec_tuple",
deserialize_with = "crate::utils::serde::deserialize_from_vec_tuple"
)
)]
pub regions: HashMap<Point<RegionKey>, BroadPhaseProxyIndex>,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
regions_to_potentially_remove: Vec<Point<RegionKey>>, // Workspace
#[cfg_attr(feature = "serde-serialize", serde(skip))]
pub created_regions: Vec<BroadPhaseProxyIndex>,
}
impl SAPLayer {
pub fn new(
depth: i8,
layer_id: u8,
smaller_layer: Option<u8>,
larger_layer: Option<u8>,
) -> Self {
Self {
depth,
smaller_layer,
larger_layer,
layer_id,
region_width: super::region_width(depth),
regions: HashMap::default(),
regions_to_potentially_remove: vec![],
created_regions: vec![],
}
}
/// Deletes from all the regions of this layer, all the endpoints corresponding
/// to subregions. Clears the arrays of subregions indices from all the regions of
/// this layer.
#[profiling::function]
pub fn unregister_all_subregions(&mut self, proxies: &mut SAPProxies) {
for region_id in self.regions.values() {
// Extract the region to make the borrow-checker happy.
let mut region = proxies[*region_id]
.data
.take_region()
.expect("Should be a region proxy.");
// Delete the endpoints.
region.delete_all_region_endpoints(proxies);
// Clear the subregions vec and reset the subregions parent ids.
for subregion in region.subregions.drain(..) {
proxies[subregion]
.data
.as_region_mut()
.id_in_parent_subregion = crate::INVALID_U32;
}
// Re set the region to make the borrow-checker happy.
proxies[*region_id].data.set_region(region);
}
}
/// Register into `larger_layer` all the region proxies of the recently-created regions
/// contained by `self`.
///
/// This method must be called in a bottom-up loop, propagating new regions from the
/// smallest layer, up to the largest layer. That loop is done by the Phase 3 of the
/// BroadPhaseMultiSap::update.
pub fn propagate_created_regions(
&mut self,
larger_layer: &mut Self,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
for proxy_id in self.created_regions.drain(..) {
larger_layer.register_subregion(proxy_id, proxies, pool)
}
}
/// Register into `larger_layer` all the region proxies of the region contained in `self`.
pub fn propagate_existing_regions(
&mut self,
larger_layer: &mut Self,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
for proxy_id in self.regions.values() {
larger_layer.register_subregion(*proxy_id, proxies, pool)
}
}
/// Registers a subregion of this layer.
///
/// The subregion proxy will be added to the region of `self` that contains
/// that subregion center. Because the hierarchical grid cells have aligned boundaries
/// at each depth, we have the guarantee that a given subregion will only be part of
/// one region on its parent "larger" layer.
#[profiling::function]
fn register_subregion(
&mut self,
proxy_id: BroadPhaseProxyIndex,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
if let Some(proxy) = proxies.get(proxy_id) {
let curr_id_in_parent_subregion = proxy.data.as_region().id_in_parent_subregion;
if curr_id_in_parent_subregion == crate::INVALID_U32 {
let region_key = super::point_key(proxy.aabb.center(), self.region_width);
let region_id = self.ensure_region_exists(region_key, proxies, pool);
let region = proxies[region_id].data.as_region_mut();
let id_in_parent_subregion = region.register_subregion(proxy_id);
proxies[proxy_id]
.data
.as_region_mut()
.id_in_parent_subregion = id_in_parent_subregion as u32;
} else {
// NOTE: all the following are just assertions to make sure the
// region ids are correctly wired. If this piece of code causes
// any performance problem, it can be deleted completely without
// hesitation.
if curr_id_in_parent_subregion != crate::INVALID_U32 {
let region_key = super::point_key(proxy.aabb.center(), self.region_width);
let region_id = self.regions.get(&region_key).unwrap();
let region = proxies[*region_id].data.as_region_mut();
assert_eq!(
region.subregions[curr_id_in_parent_subregion as usize],
proxy_id
);
}
}
}
}
#[profiling::function]
fn unregister_subregion(
&mut self,
proxy_id: BroadPhaseProxyIndex,
proxy_region: &SAPRegion,
proxies: &mut SAPProxies,
) {
if let Some(proxy) = proxies.get(proxy_id) {
let id_in_parent_subregion = proxy_region.id_in_parent_subregion;
let region_key = super::point_key(proxy.aabb.center(), self.region_width);
if let Some(region_id) = self.regions.get(&region_key) {
let proxy = &mut proxies[*region_id];
let region = proxy.data.as_region_mut();
if !region.needs_update_after_subregion_removal {
self.regions_to_potentially_remove.push(region_key);
region.needs_update_after_subregion_removal = true;
}
let removed = region
.subregions
.swap_remove(id_in_parent_subregion as usize); // Remove the subregion index from the subregion list.
assert_eq!(removed, proxy_id);
// Re-adjust the id_in_parent_subregion of the subregion that was swapped in place
// of the deleted one.
if let Some(subregion_to_update) = region
.subregions
.get(id_in_parent_subregion as usize)
.copied()
{
proxies[subregion_to_update]
.data
.as_region_mut()
.id_in_parent_subregion = id_in_parent_subregion;
}
}
}
}
/// Ensures a given region exists in this layer.
///
/// If the region with the given region key does not exist yet, it is created.
/// When a region is created, it creates a new proxy for that region, and its
/// proxy ID is added to `self.created_region` so it can be propagated during
/// the Phase 3 of `BroadPhaseMultiSap::update`.
///
/// This returns the proxy ID of the already existing region if it existed, or
/// of the new region if it did not exist and has been created by this method.
pub fn ensure_region_exists(
&mut self,
region_key: Point<RegionKey>,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) -> BroadPhaseProxyIndex {
match self.regions.entry(region_key) {
// Yay, the region already exists!
Entry::Occupied(occupied) => *occupied.get(),
// The region does not exist, create it.
Entry::Vacant(vacant) => {
let region_bounds = super::region_aabb(region_key, self.region_width);
let region = SAPRegion::recycle_or_new(region_bounds, pool);
// Create a new proxy for that region.
let region_proxy =
SAPProxy::subregion(region, region_bounds, self.layer_id, self.depth);
let region_proxy_id = proxies.insert(region_proxy);
// Push this region's proxy ID to the set of created regions.
self.created_regions.push(region_proxy_id as u32);
// Insert the new region to this layer's region hashmap.
let _ = vacant.insert(region_proxy_id);
region_proxy_id
}
}
}
pub fn preupdate_collider(
&mut self,
proxy_id: u32,
aabb_to_discretize: &Aabb,
actual_aabb: Option<&Aabb>,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
let start = super::point_key(aabb_to_discretize.mins, self.region_width);
let end = super::point_key(aabb_to_discretize.maxs, self.region_width);
// Discretize the aabb.
#[cfg(feature = "dim2")]
let k_range = 0..1;
#[cfg(feature = "dim3")]
let k_range = start.z..=end.z;
for i in start.x..=end.x {
for j in start.y..=end.y {
for _k in k_range.clone() {
#[cfg(feature = "dim2")]
let region_key = Point::new(i, j);
#[cfg(feature = "dim3")]
let region_key = Point::new(i, j, _k);
let region_id = self.ensure_region_exists(region_key, proxies, pool);
let region_proxy = &mut proxies[region_id];
let region = region_proxy.data.as_region_mut();
// NOTE: sometimes, rounding errors will generate start/end indices
// that lie outside of the actual regions Aabb.
// TODO: is there a smarter, more efficient way of dealing with this?
if !region_proxy.aabb.intersects(aabb_to_discretize) {
continue;
}
if let Some(actual_aabb) = actual_aabb {
// NOTE: if the actual Aabb doesn't intersect the
// regions Aabb, then we need to delete the
// proxy from that region because it means that
// during the last update the proxy intersected
// that region, but it doesn't intersect it any
// more during the current update.
if !region_proxy.aabb.intersects(actual_aabb) {
region.predelete_proxy(proxy_id);
continue;
}
}
region.preupdate_proxy(proxy_id, true);
}
}
}
}
#[profiling::function]
pub fn predelete_proxy(&mut self, proxies: &mut SAPProxies, proxy_index: BroadPhaseProxyIndex) {
// Discretize the Aabb to find the regions that need to be invalidated.
let proxy_aabb = &mut proxies[proxy_index].aabb;
let start = super::point_key(proxy_aabb.mins, self.region_width);
let end = super::point_key(proxy_aabb.maxs, self.region_width);
// Set the Aabb of the proxy to a very large value.
proxy_aabb.mins.coords.fill(DELETED_AABB_VALUE);
proxy_aabb.maxs.coords.fill(DELETED_AABB_VALUE);
#[cfg(feature = "dim2")]
let k_range = 0..1;
#[cfg(feature = "dim3")]
let k_range = start.z..=end.z;
for i in start.x..=end.x {
for j in start.y..=end.y {
for _k in k_range.clone() {
#[cfg(feature = "dim2")]
let key = Point::new(i, j);
#[cfg(feature = "dim3")]
let key = Point::new(i, j, _k);
if let Some(region_id) = self.regions.get(&key) {
let region = proxies[*region_id].data.as_region_mut();
region.predelete_proxy(proxy_index);
}
}
}
}
}
pub fn update_regions(
&mut self,
proxies: &mut SAPProxies,
reporting: &mut HashMap<(u32, u32), bool>,
) {
// println!(
// "Num regions at depth {}: {}",
// self.depth,
// self.regions.len(),
// );
for (point, region_id) in &self.regions {
if let Some(mut region) = proxies[*region_id].data.take_region() {
// Update the region.
region.update(proxies, self.depth, reporting);
// Mark all subregions as to-be-updated.
for subregion_id in &region.subregions {
proxies[*subregion_id].data.as_region_mut().mark_as_dirty();
}
if !region.contains_subproper_proxies() {
self.regions_to_potentially_remove.push(*point);
}
proxies[*region_id].data.set_region(region);
}
}
}
/// Complete the removals of empty regions on this layer.
///
/// This method must be called in a bottom-up fashion, i.e.,
/// by starting with the smallest layer up to the larger layer.
/// This is needed in order to properly propagate the removal
/// of a region (which is a subregion registered into the larger
/// layer).
pub fn complete_removals(
&mut self,
mut larger_layer: Option<&mut Self>,
proxies: &mut SAPProxies,
pool: &mut SAPRegionPool,
) {
// Delete all the empty regions and store them in the region pool.
for region_to_remove in self.regions_to_potentially_remove.drain(..) {
if let Entry::Occupied(region_id) = self.regions.entry(region_to_remove) {
if let Some(proxy) = proxies.get_mut(*region_id.get()) {
// First, we need to remove the endpoints of the deleted subregions.
let mut region = proxy.data.take_region().unwrap();
region.update_after_subregion_removal(proxies, self.depth);
// Check if we can actually delete this region.
if !region.contains_subproper_proxies() {
#[cfg(feature = "enhanced-determinism")]
let region_id = region_id.swap_remove();
#[cfg(not(feature = "enhanced-determinism"))]
let region_id = region_id.remove();
// We can delete this region. So we need to tell the larger
// layer that one of its subregion is being deleted.
// The next call to `complete_removals` on the larger layer
// will take care of updating its own regions by taking into
// account this deleted subregion.
if let Some(larger_layer) = &mut larger_layer {
larger_layer.unregister_subregion(region_id, &region, proxies);
}
// Move the proxy to infinity.
let proxy = &mut proxies[region_id];
proxy.aabb.mins.coords.fill(DELETED_AABB_VALUE);
proxy.aabb.maxs.coords.fill(DELETED_AABB_VALUE);
// Mark the proxy as deleted.
proxies.remove(region_id);
pool.push(region);
} else {
proxies[*region_id.get()].data.set_region(region);
}
}
}
}
}
pub fn proper_proxy_moved_to_bigger_layer(
&mut self,
proxies: &mut SAPProxies,
proxy_id: BroadPhaseProxyIndex,
) {
for (point, region_id) in &self.regions {
let region = &mut proxies[*region_id].data.as_region_mut();
let region_contains_proxy = region.proper_proxy_moved_to_a_bigger_layer(proxy_id);
// If that proper proxy was the last one keeping that region
// alive, mark the region as potentially removable.
if region_contains_proxy && !region.contains_subproper_proxies() {
self.regions_to_potentially_remove.push(*point);
}
}
}
}

View File

@@ -1,138 +0,0 @@
use super::NEXT_FREE_SENTINEL;
use crate::geometry::broad_phase_multi_sap::SAPRegion;
use crate::geometry::{BroadPhaseProxyIndex, ColliderHandle};
use parry::bounding_volume::Aabb;
use std::ops::{Index, IndexMut};
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub enum SAPProxyData {
Collider(ColliderHandle),
Region(Option<Box<SAPRegion>>),
}
impl SAPProxyData {
pub fn is_region(&self) -> bool {
matches!(self, SAPProxyData::Region(_))
}
pub fn as_region(&self) -> &SAPRegion {
match self {
SAPProxyData::Region(r) => r.as_ref().unwrap(),
_ => panic!("Invalid proxy type."),
}
}
pub fn as_region_mut(&mut self) -> &mut SAPRegion {
match self {
SAPProxyData::Region(r) => r.as_mut().unwrap(),
_ => panic!("Invalid proxy type."),
}
}
pub fn take_region(&mut self) -> Option<Box<SAPRegion>> {
match self {
SAPProxyData::Region(r) => r.take(),
_ => None,
}
}
pub fn set_region(&mut self, region: Box<SAPRegion>) {
*self = SAPProxyData::Region(Some(region));
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct SAPProxy {
pub data: SAPProxyData,
pub aabb: Aabb,
pub next_free: BroadPhaseProxyIndex,
// TODO: pack the layer_id and layer_depth into a single u16?
pub layer_id: u8,
pub layer_depth: i8,
}
impl SAPProxy {
pub fn collider(handle: ColliderHandle, aabb: Aabb, layer_id: u8, layer_depth: i8) -> Self {
Self {
data: SAPProxyData::Collider(handle),
aabb,
next_free: NEXT_FREE_SENTINEL,
layer_id,
layer_depth,
}
}
pub fn subregion(subregion: Box<SAPRegion>, aabb: Aabb, layer_id: u8, layer_depth: i8) -> Self {
Self {
data: SAPProxyData::Region(Some(subregion)),
aabb,
next_free: NEXT_FREE_SENTINEL,
layer_id,
layer_depth,
}
}
}
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct SAPProxies {
pub elements: Vec<SAPProxy>,
pub first_free: BroadPhaseProxyIndex,
}
impl Default for SAPProxies {
fn default() -> Self {
Self::new()
}
}
impl SAPProxies {
pub fn new() -> Self {
Self {
elements: Vec::new(),
first_free: NEXT_FREE_SENTINEL,
}
}
pub fn insert(&mut self, proxy: SAPProxy) -> BroadPhaseProxyIndex {
if self.first_free != NEXT_FREE_SENTINEL {
let proxy_id = self.first_free;
self.first_free = self.elements[proxy_id as usize].next_free;
self.elements[proxy_id as usize] = proxy;
proxy_id
} else {
self.elements.push(proxy);
self.elements.len() as u32 - 1
}
}
pub fn remove(&mut self, proxy_id: BroadPhaseProxyIndex) {
let proxy = &mut self.elements[proxy_id as usize];
proxy.next_free = self.first_free;
self.first_free = proxy_id;
}
// NOTE: this must not take holes into account.
pub fn get_mut(&mut self, i: BroadPhaseProxyIndex) -> Option<&mut SAPProxy> {
self.elements.get_mut(i as usize)
}
// NOTE: this must not take holes into account.
pub fn get(&self, i: BroadPhaseProxyIndex) -> Option<&SAPProxy> {
self.elements.get(i as usize)
}
}
impl Index<BroadPhaseProxyIndex> for SAPProxies {
type Output = SAPProxy;
fn index(&self, i: BroadPhaseProxyIndex) -> &SAPProxy {
self.elements.index(i as usize)
}
}
impl IndexMut<BroadPhaseProxyIndex> for SAPProxies {
fn index_mut(&mut self, i: BroadPhaseProxyIndex) -> &mut SAPProxy {
self.elements.index_mut(i as usize)
}
}

View File

@@ -1,260 +0,0 @@
use super::{SAPAxis, SAPProxies};
use crate::geometry::BroadPhaseProxyIndex;
use crate::math::DIM;
use bit_vec::BitVec;
use parry::bounding_volume::Aabb;
use parry::utils::hashmap::HashMap;
pub type SAPRegionPool = Vec<Box<SAPRegion>>;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct SAPRegion {
pub axes: [SAPAxis; DIM],
pub existing_proxies: BitVec,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
pub to_insert: Vec<BroadPhaseProxyIndex>, // Workspace
pub subregions: Vec<BroadPhaseProxyIndex>,
pub id_in_parent_subregion: u32,
pub update_count: u8,
pub needs_update_after_subregion_removal: bool,
// Number of proxies (added to this region) that originates
// from the layer at depth <= the depth of the layer containing
// this region.
subproper_proxy_count: usize,
}
impl SAPRegion {
pub fn new(bounds: Aabb) -> Self {
let axes = [
SAPAxis::new(bounds.mins.x, bounds.maxs.x),
SAPAxis::new(bounds.mins.y, bounds.maxs.y),
#[cfg(feature = "dim3")]
SAPAxis::new(bounds.mins.z, bounds.maxs.z),
];
SAPRegion {
axes,
existing_proxies: BitVec::new(),
to_insert: Vec::new(),
subregions: Vec::new(),
id_in_parent_subregion: crate::INVALID_U32,
update_count: 0,
needs_update_after_subregion_removal: false,
subproper_proxy_count: 0,
}
}
pub fn recycle(bounds: Aabb, mut old: Box<Self>) -> Box<Self> {
// Correct the bounds
for i in 0..DIM {
// Make sure the axis is empty (it may still contain
// some old endpoints from non-proper proxies.
old.axes[i].clear();
old.axes[i].min_bound = bounds.mins[i];
old.axes[i].max_bound = bounds.maxs[i];
}
old.update_count = 0;
if cfg!(feature = "enhanced-determinism") {
old.existing_proxies = BitVec::new();
} else {
old.existing_proxies.clear();
}
old.id_in_parent_subregion = crate::INVALID_U32;
old.subregions.clear();
old.needs_update_after_subregion_removal = false;
// The rest of the fields should be "empty"
assert_eq!(old.subproper_proxy_count, 0);
assert!(old.to_insert.is_empty());
old
}
#[allow(clippy::vec_box)] // PERF: see if Box actually makes it faster (due to less copying).
pub fn recycle_or_new(bounds: Aabb, pool: &mut Vec<Box<Self>>) -> Box<Self> {
if let Some(old) = pool.pop() {
Self::recycle(bounds, old)
} else {
Box::new(Self::new(bounds))
}
}
/// Does this region still contain endpoints of subproper proxies?
pub fn contains_subproper_proxies(&self) -> bool {
self.subproper_proxy_count > 0
}
/// If this region contains the given proxy, this will decrement this region's proxy count.
///
/// Returns `true` if this region contained the proxy. Returns `false` otherwise.
pub fn proper_proxy_moved_to_a_bigger_layer(&mut self, proxy_id: BroadPhaseProxyIndex) -> bool {
if self.existing_proxies.get(proxy_id as usize) == Some(true) {
// NOTE: we are just registering the fact that that proxy isn't a
// subproper proxy anymore. But it is still part of this region
// so we must not set `self.existing_proxies[proxy_id] = false`
// nor delete its endpoints.
self.subproper_proxy_count -= 1;
true
} else {
false
}
}
/// Deletes from the axes of this region all the endpoints that point
/// to a region.
pub fn delete_all_region_endpoints(&mut self, proxies: &SAPProxies) {
let mut num_deleted_subregion_endpoints = [0; DIM];
for (i, axis) in self.axes.iter_mut().enumerate() {
let existing_proxies = &mut self.existing_proxies;
axis.endpoints.retain(|e| {
// NOTE: we use `if let` instead of `unwrap` because no
// proxy will be found for the sentinels.
if let Some(proxy) = proxies.get(e.proxy()) {
if proxy.data.is_region() {
existing_proxies.set(e.proxy() as usize, false);
num_deleted_subregion_endpoints[i] += 1;
return false;
}
}
true
});
}
// All axes should have deleted the same number of region endpoints.
for k in 1..DIM {
assert_eq!(
num_deleted_subregion_endpoints[0],
num_deleted_subregion_endpoints[k]
);
}
// The number of deleted endpoints should be even because there
// are two endpoints per proxy on each axes.
assert_eq!(num_deleted_subregion_endpoints[0] % 2, 0);
// All the region endpoints are subproper proxies.
// So update the subproper proxy count accordingly.
self.subproper_proxy_count -= num_deleted_subregion_endpoints[0] / 2;
}
pub fn predelete_proxy(&mut self, _proxy_id: BroadPhaseProxyIndex) {
// We keep the proxy_id as argument for uniformity with the "preupdate"
// method. However we don't actually need it because the deletion will be
// handled transparently during the next update.
self.update_count = self.update_count.max(1);
}
pub fn mark_as_dirty(&mut self) {
self.update_count = self.update_count.max(1);
}
pub fn register_subregion(&mut self, proxy_id: BroadPhaseProxyIndex) -> usize {
let subregion_index = self.subregions.len();
self.subregions.push(proxy_id);
self.preupdate_proxy(proxy_id, true);
subregion_index
}
pub fn preupdate_proxy(
&mut self,
proxy_id: BroadPhaseProxyIndex,
is_subproper_proxy: bool,
) -> bool {
let mask_len = self.existing_proxies.len();
if proxy_id as usize >= mask_len {
self.existing_proxies
.grow(proxy_id as usize + 1 - mask_len, false);
}
if !self.existing_proxies[proxy_id as usize] {
self.to_insert.push(proxy_id);
self.existing_proxies.set(proxy_id as usize, true);
if is_subproper_proxy {
self.subproper_proxy_count += 1;
}
false
} else {
// Here we need a second update if all proxies exit this region. In this case, we need
// to delete the final proxy, but the region may not have Aabbs overlapping it, so it
// wouldn't get an update otherwise.
self.update_count = 2;
true
}
}
pub fn update_after_subregion_removal(&mut self, proxies: &SAPProxies, layer_depth: i8) {
if self.needs_update_after_subregion_removal {
for axis in &mut self.axes {
let removed_count = axis
.delete_deleted_proxies_and_endpoints_after_subregion_removal(
proxies,
&mut self.existing_proxies,
layer_depth,
);
if removed_count > self.subproper_proxy_count {
log::debug!(
"Reached unexpected state: attempted to remove more sub-proper proxies than added (removing: {}, total: {}).",
removed_count,
self.subproper_proxy_count
);
self.subproper_proxy_count = 0;
} else {
self.subproper_proxy_count -= removed_count;
}
}
self.needs_update_after_subregion_removal = false;
}
}
#[profiling::function]
pub fn update(
&mut self,
proxies: &SAPProxies,
layer_depth: i8,
reporting: &mut HashMap<(u32, u32), bool>,
) {
if self.update_count > 0 {
// Update endpoints.
let mut total_deleted = 0;
let mut total_deleted_subproper = 0;
for dim in 0..DIM {
self.axes[dim].update_endpoints(dim, proxies, reporting);
let (num_deleted, num_deleted_subproper) = self.axes[dim]
.delete_out_of_bounds_proxies(proxies, &mut self.existing_proxies, layer_depth);
total_deleted += num_deleted;
total_deleted_subproper += num_deleted_subproper;
}
if total_deleted > 0 {
self.subproper_proxy_count -= total_deleted_subproper;
for dim in 0..DIM {
self.axes[dim].delete_out_of_bounds_endpoints(&self.existing_proxies);
}
}
self.update_count -= 1;
}
if !self.to_insert.is_empty() {
// Insert new proxies.
for dim in 1..DIM {
self.axes[dim].batch_insert(dim, &self.to_insert, proxies, None);
}
self.axes[0].batch_insert(0, &self.to_insert, proxies, Some(reporting));
self.to_insert.clear();
// In the rare event that all proxies leave this region in the next step, we need an
// update to remove them.
self.update_count = 1;
}
}
}

View File

@@ -1,72 +0,0 @@
use crate::math::{Point, Real, Vector};
use parry::bounding_volume::Aabb;
#[cfg(feature = "f32")]
pub type RegionKey = i32;
#[cfg(feature = "f64")]
pub type RegionKey = i64;
pub(crate) const NUM_SENTINELS: usize = 1;
pub(crate) const NEXT_FREE_SENTINEL: u32 = u32::MAX;
pub(crate) const SENTINEL_VALUE: Real = Real::MAX;
pub(crate) const DELETED_AABB_VALUE: Real = SENTINEL_VALUE / 2.0;
pub(crate) const MAX_AABB_EXTENT: Real = SENTINEL_VALUE / 4.0;
pub(crate) const REGION_WIDTH_BASE: Real = 1.0;
pub(crate) const REGION_WIDTH_POWER_BASIS: Real = 5.0;
pub(crate) fn sort2(a: u32, b: u32) -> (u32, u32) {
assert_ne!(a, b);
if a < b {
(a, b)
} else {
(b, a)
}
}
pub(crate) fn clamp_point(point: Point<Real>) -> Point<Real> {
point.map(|e| na::clamp(e, -MAX_AABB_EXTENT, MAX_AABB_EXTENT))
}
pub(crate) fn point_key(point: Point<Real>, region_width: Real) -> Point<RegionKey> {
(point / region_width)
.coords
.map(|e| {
// If the region is outside this range, the region keys will overlap
assert!(e.floor() < RegionKey::MAX as Real);
assert!(e.floor() > RegionKey::MIN as Real);
e.floor() as RegionKey
})
.into()
}
pub(crate) fn region_aabb(index: Point<RegionKey>, region_width: Real) -> Aabb {
let mins = index.coords.map(|i| i as Real * region_width).into();
let maxs = mins + Vector::repeat(region_width);
Aabb::new(mins, maxs)
}
pub(crate) fn region_width(depth: i8) -> Real {
(REGION_WIDTH_BASE * REGION_WIDTH_POWER_BASIS.powi(depth as i32)).min(MAX_AABB_EXTENT)
}
/// Computes the depth of the layer the given [`Aabb`] should be part of.
///
/// The idea here is that an [`Aabb`] should be part of a layer which has
/// regions large enough so that one [`Aabb`] doesn't crosses too many
/// regions. But the regions must also not be too large, otherwise
/// we are loosing the benefits of Multi-SAP.
///
/// If the code below, we select a layer such that each region can
/// contain at least a chain of 10 contiguous objects with that [`Aabb`].
pub(crate) fn layer_containing_aabb(aabb: &Aabb) -> i8 {
// Max number of elements of this size we would like one region to be able to contain.
const NUM_ELEMENTS_PER_DIMENSION: Real = 10.0;
let width = 2.0 * aabb.half_extents().norm() * NUM_ELEMENTS_PER_DIMENSION;
(width / REGION_WIDTH_BASE)
.log(REGION_WIDTH_POWER_BASIS)
.round()
.max(i8::MIN as Real)
.min(i8::MAX as Real) as i8
}

View File

@@ -1,93 +0,0 @@
use crate::dynamics::RigidBodySet;
use crate::geometry::{BroadPhase, BroadPhasePairEvent, ColliderHandle, ColliderPair, ColliderSet};
use parry::math::Real;
use parry::partitioning::Qbvh;
use parry::partitioning::QbvhUpdateWorkspace;
use parry::query::visitors::BoundingVolumeIntersectionsSimultaneousVisitor;
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct BroadPhaseQbvh {
qbvh: Qbvh<ColliderHandle>,
stack: Vec<(u32, u32)>,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
workspace: QbvhUpdateWorkspace,
}
impl Default for BroadPhaseQbvh {
fn default() -> Self {
Self::new()
}
}
impl BroadPhaseQbvh {
pub fn new() -> Self {
Self {
qbvh: Qbvh::new(),
stack: vec![],
workspace: QbvhUpdateWorkspace::default(),
}
}
}
impl BroadPhase for BroadPhaseQbvh {
fn update(
&mut self,
_dt: Real,
prediction_distance: Real,
colliders: &mut ColliderSet,
_bodies: &RigidBodySet,
modified_colliders: &[ColliderHandle],
removed_colliders: &[ColliderHandle],
events: &mut Vec<BroadPhasePairEvent>,
) {
let margin = 0.01;
if modified_colliders.is_empty() {
return;
}
// Visitor to find collision pairs.
let mut visitor = BoundingVolumeIntersectionsSimultaneousVisitor::new(
|co1: &ColliderHandle, co2: &ColliderHandle| {
if *co1 != *co2 {
events.push(BroadPhasePairEvent::AddPair(ColliderPair::new(*co1, *co2)));
}
true
},
);
let full_rebuild = self.qbvh.raw_nodes().is_empty();
if full_rebuild {
self.qbvh.clear_and_rebuild(
colliders.iter().map(|(handle, collider)| {
(
handle,
collider.compute_collision_aabb(prediction_distance / 2.0),
)
}),
margin,
);
self.qbvh
.traverse_bvtt_with_stack(&self.qbvh, &mut visitor, &mut self.stack);
} else {
for modified in modified_colliders {
self.qbvh.pre_update_or_insert(*modified);
}
for removed in removed_colliders {
self.qbvh.remove(*removed);
}
let _ = self.qbvh.refit(margin, &mut self.workspace, |handle| {
colliders[*handle].compute_collision_aabb(prediction_distance / 2.0)
});
// self.qbvh
// .traverse_bvtt_with_stack(&self.qbvh, &mut visitor, &mut self.stack);
self.qbvh
.traverse_modified_bvtt_with_stack(&self.qbvh, &mut visitor, &mut self.stack);
self.qbvh.rebalance(margin, &mut self.workspace);
}
}
}

View File

@@ -2,11 +2,11 @@ use crate::dynamics::{CoefficientCombineRule, MassProperties, RigidBodyHandle};
#[cfg(feature = "dim3")]
use crate::geometry::HeightFieldFlags;
use crate::geometry::{
ActiveCollisionTypes, BroadPhaseProxyIndex, ColliderBroadPhaseData, ColliderChanges,
ColliderFlags, ColliderMassProps, ColliderMaterial, ColliderParent, ColliderPosition,
ColliderShape, ColliderType, InteractionGroups, MeshConverter, MeshConverterError, SharedShape,
ActiveCollisionTypes, ColliderChanges, ColliderFlags, ColliderMassProps, ColliderMaterial,
ColliderParent, ColliderPosition, ColliderShape, ColliderType, InteractionGroups,
MeshConverter, MeshConverterError, SharedShape,
};
use crate::math::{AngVector, Isometry, Point, Real, Rotation, Vector, DIM};
use crate::math::{AngVector, DIM, Isometry, Point, Real, Rotation, Vector};
use crate::parry::transformation::vhacd::VHACDParameters;
use crate::pipeline::{ActiveEvents, ActiveHooks};
use crate::prelude::ColliderEnabled;
@@ -29,7 +29,6 @@ pub struct Collider {
pub(crate) pos: ColliderPosition,
pub(crate) material: ColliderMaterial,
pub(crate) flags: ColliderFlags,
pub(crate) bf_data: ColliderBroadPhaseData,
contact_skin: Real,
contact_force_event_threshold: Real,
/// User-defined data associated to this collider.
@@ -38,7 +37,6 @@ pub struct Collider {
impl Collider {
pub(crate) fn reset_internal_references(&mut self) {
self.bf_data.proxy_index = crate::INVALID_U32;
self.changes = ColliderChanges::all();
}
@@ -54,21 +52,6 @@ impl Collider {
}
}
/// An internal index associated to this collider by the broad-phase algorithm.
pub fn internal_broad_phase_proxy_index(&self) -> BroadPhaseProxyIndex {
self.bf_data.proxy_index
}
/// Sets the internal index associated to this collider by the broad-phase algorithm.
///
/// This must **not** be called, unless you are implementing your own custom broad-phase
/// that require storing an index in the collider struct.
/// Modifying that index outside of a custom broad-phase code will most certainly break
/// the physics engine.
pub fn set_internal_broad_phase_proxy_index(&mut self, id: BroadPhaseProxyIndex) {
self.bf_data.proxy_index = id;
}
/// The rigid body this collider is attached to.
pub fn parent(&self) -> Option<RigidBodyHandle> {
self.parent.map(|parent| parent.handle)
@@ -107,7 +90,6 @@ impl Collider {
pos,
material,
flags,
bf_data: _bf_data, // Internal ids must not be overwritten.
contact_force_event_threshold,
user_data,
contact_skin,
@@ -1079,7 +1061,6 @@ impl ColliderBuilder {
};
let changes = ColliderChanges::all();
let pos = ColliderPosition(self.position);
let bf_data = ColliderBroadPhaseData::default();
let coll_type = if self.is_sensor {
ColliderType::Sensor
} else {
@@ -1093,7 +1074,6 @@ impl ColliderBuilder {
parent: None,
changes,
pos,
bf_data,
flags,
coll_type,
contact_force_event_threshold: self.contact_force_event_threshold,

View File

@@ -1,7 +1,6 @@
use crate::dynamics::{CoefficientCombineRule, MassProperties, RigidBodyHandle, RigidBodyType};
use crate::geometry::{BroadPhaseProxyIndex, InteractionGroups, Shape, SharedShape};
use crate::geometry::{InteractionGroups, Shape, SharedShape};
use crate::math::{Isometry, Real};
use crate::parry::partitioning::IndexedData;
use crate::pipeline::{ActiveEvents, ActiveHooks};
use std::ops::{Deref, DerefMut};
@@ -31,16 +30,6 @@ impl ColliderHandle {
}
}
impl IndexedData for ColliderHandle {
fn default() -> Self {
Self(IndexedData::default())
}
fn index(&self) -> usize {
self.0.index()
}
}
bitflags::bitflags! {
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
@@ -115,21 +104,6 @@ impl ColliderType {
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
/// Data associated to a collider that takes part to a broad-phase algorithm.
pub struct ColliderBroadPhaseData {
pub(crate) proxy_index: BroadPhaseProxyIndex,
}
impl Default for ColliderBroadPhaseData {
fn default() -> Self {
ColliderBroadPhaseData {
proxy_index: crate::INVALID_U32,
}
}
}
/// The shape of a collider.
pub type ColliderShape = SharedShape;

View File

@@ -75,7 +75,8 @@ impl MeshConverter {
SharedShape::new(cuboid)
}
MeshConverter::Aabb => {
let aabb = bounding_volume::details::local_point_cloud_aabb(&vertices);
let aabb =
bounding_volume::details::local_point_cloud_aabb(vertices.iter().copied());
let cuboid = Cuboid::new(aabb.half_extents());
transform = Isometry::from(aabb.center().coords);
SharedShape::new(cuboid)

View File

@@ -1,7 +1,8 @@
//! Structures related to geometry: colliders, shapes, etc.
pub use self::broad_phase::BroadPhase;
pub use self::broad_phase_multi_sap::{BroadPhaseMultiSap, BroadPhasePairEvent, ColliderPair};
pub use self::broad_phase_bvh::{BroadPhaseBvh, BvhOptimizationStrategy};
pub use self::broad_phase_pair_event::{BroadPhasePairEvent, ColliderPair};
pub use self::collider::{Collider, ColliderBuilder};
pub use self::collider_components::*;
pub use self::collider_set::ColliderSet;
@@ -16,6 +17,7 @@ pub use self::mesh_converter::{MeshConverter, MeshConverterError};
pub use self::narrow_phase::NarrowPhase;
pub use parry::bounding_volume::BoundingVolume;
pub use parry::partitioning::{Bvh, BvhBuildStrategy};
pub use parry::query::{PointQuery, PointQueryWithLocation, RayCast, TrackedContact};
pub use parry::shape::{SharedShape, VoxelState, VoxelType, Voxels};
@@ -54,7 +56,7 @@ pub type PointProjection = parry::query::PointProjection;
/// The result of a shape-cast between two shapes.
pub type ShapeCastHit = parry::query::ShapeCastHit;
/// The default broad-phase implementation recommended for general-purpose usage.
pub type DefaultBroadPhase = BroadPhaseMultiSap;
pub type DefaultBroadPhase = BroadPhaseBvh;
bitflags::bitflags! {
/// Flags providing more information regarding a collision event.
@@ -181,24 +183,16 @@ impl ContactForceEvent {
}
}
pub(crate) use self::broad_phase::BroadPhaseProxyIndex;
pub(crate) use self::collider_set::ModifiedColliders;
pub(crate) use self::narrow_phase::ContactManifoldIndex;
pub(crate) use parry::partitioning::Qbvh;
pub use parry::shape::*;
#[cfg(feature = "serde-serialize")]
pub(crate) fn default_persistent_query_dispatcher(
) -> std::sync::Arc<dyn parry::query::PersistentQueryDispatcher<ContactManifoldData, ContactData>> {
pub(crate) fn default_persistent_query_dispatcher()
-> std::sync::Arc<dyn parry::query::PersistentQueryDispatcher<ContactManifoldData, ContactData>> {
std::sync::Arc::new(parry::query::DefaultQueryDispatcher)
}
#[cfg(feature = "serde-serialize")]
pub(crate) fn default_query_dispatcher() -> std::sync::Arc<dyn parry::query::QueryDispatcher> {
std::sync::Arc::new(parry::query::DefaultQueryDispatcher)
}
mod broad_phase_multi_sap;
mod collider_components;
mod contact_pair;
mod interaction_graph;
@@ -206,7 +200,8 @@ mod interaction_groups;
mod narrow_phase;
mod broad_phase;
mod broad_phase_qbvh;
mod broad_phase_bvh;
mod broad_phase_pair_event;
mod collider;
mod collider_set;
mod mesh_converter;

View File

@@ -1,8 +1,8 @@
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use crate::data::graph::EdgeIndex;
use crate::data::Coarena;
use crate::data::graph::EdgeIndex;
use crate::dynamics::{
CoefficientCombineRule, ImpulseJointSet, IslandManager, RigidBodyDominance, RigidBodySet,
RigidBodyType,
@@ -89,7 +89,7 @@ impl NarrowPhase {
}
/// The query dispatcher used by this narrow-phase to select the right collision-detection
/// algorithms depending of the shape types.
/// algorithms depending on the shape types.
pub fn query_dispatcher(
&self,
) -> &dyn PersistentQueryDispatcher<ContactManifoldData, ContactData> {
@@ -1189,7 +1189,7 @@ mod test {
use crate::prelude::{
CCDSolver, ColliderBuilder, DefaultBroadPhase, IntegrationParameters, PhysicsPipeline,
QueryPipeline, RigidBodyBuilder,
RigidBodyBuilder,
};
use super::*;
@@ -1237,7 +1237,6 @@ mod test {
let mut impulse_joint_set = ImpulseJointSet::new();
let mut multibody_joint_set = MultibodyJointSet::new();
let mut ccd_solver = CCDSolver::new();
let mut query_pipeline = QueryPipeline::new();
let physics_hooks = ();
let event_handler = ();
@@ -1252,7 +1251,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
Some(&mut query_pipeline),
&physics_hooks,
&event_handler,
);
@@ -1288,7 +1286,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
Some(&mut query_pipeline),
&physics_hooks,
&event_handler,
);
@@ -1317,7 +1314,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
Some(&mut query_pipeline),
&physics_hooks,
&event_handler,
);
@@ -1385,7 +1381,6 @@ mod test {
let mut impulse_joint_set = ImpulseJointSet::new();
let mut multibody_joint_set = MultibodyJointSet::new();
let mut ccd_solver = CCDSolver::new();
let mut query_pipeline = QueryPipeline::new();
let physics_hooks = ();
let event_handler = ();
@@ -1400,7 +1395,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
Some(&mut query_pipeline),
&physics_hooks,
&event_handler,
);
@@ -1435,7 +1429,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
Some(&mut query_pipeline),
&physics_hooks,
&event_handler,
);
@@ -1462,7 +1455,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
Some(&mut query_pipeline),
&physics_hooks,
&event_handler,
);

View File

@@ -40,7 +40,9 @@ pub use rayon;
not(feature = "simd-stable"),
not(feature = "simd-nightly")
))]
std::compile_error!("The `simd-is-enabled` feature should not be enabled explicitly. Please enable the `simd-stable` or the `simd-nightly` feature instead.");
std::compile_error!(
"The `simd-is-enabled` feature should not be enabled explicitly. Please enable the `simd-stable` or the `simd-nightly` feature instead."
);
#[cfg(all(feature = "simd-is-enabled", feature = "enhanced-determinism"))]
std::compile_error!(
"SIMD cannot be enabled when the `enhanced-determinism` feature is also enabled."
@@ -216,6 +218,6 @@ pub mod prelude {
pub use crate::geometry::*;
pub use crate::math::*;
pub use crate::pipeline::*;
pub use na::{point, vector, DMatrix, DVector};
pub use na::{DMatrix, DVector, point, vector};
pub extern crate nalgebra;
}

View File

@@ -1,12 +1,12 @@
//! Physics pipeline structures.
use crate::dynamics::{ImpulseJointSet, MultibodyJointSet};
use crate::dynamics::{ImpulseJointSet, IntegrationParameters, MultibodyJointSet};
use crate::geometry::{
BroadPhase, BroadPhasePairEvent, ColliderChanges, ColliderHandle, ColliderPair,
ModifiedColliders, NarrowPhase,
};
use crate::math::Real;
use crate::pipeline::{EventHandler, PhysicsHooks, QueryPipeline};
use crate::pipeline::{EventHandler, PhysicsHooks};
use crate::{dynamics::RigidBodySet, geometry::ColliderSet};
/// The collision pipeline, responsible for performing collision detection between colliders.
@@ -58,9 +58,14 @@ impl CollisionPipeline {
self.broad_phase_events.clear();
self.broadphase_collider_pairs.clear();
let params = IntegrationParameters {
normalized_prediction_distance: prediction_distance,
dt: 0.0,
..Default::default()
};
broad_phase.update(
0.0,
prediction_distance,
&params,
colliders,
bodies,
modified_colliders,
@@ -117,7 +122,6 @@ impl CollisionPipeline {
narrow_phase: &mut NarrowPhase,
bodies: &mut RigidBodySet,
colliders: &mut ColliderSet,
query_pipeline: Option<&mut QueryPipeline>,
hooks: &dyn PhysicsHooks,
events: &dyn EventHandler,
) {
@@ -161,10 +165,6 @@ impl CollisionPipeline {
true,
);
if let Some(queries) = query_pipeline {
queries.update_incremental(colliders, &modified_colliders, &removed_colliders, true);
}
self.clear_modified_colliders(colliders, &mut modified_colliders);
removed_colliders.clear();
}
@@ -198,7 +198,7 @@ mod tests {
let _ = collider_set.insert(collider_b);
let integration_parameters = IntegrationParameters::default();
let mut broad_phase = BroadPhaseMultiSap::new();
let mut broad_phase = BroadPhaseBvh::new();
let mut narrow_phase = NarrowPhase::new();
let mut collision_pipeline = CollisionPipeline::new();
let physics_hooks = ();
@@ -209,7 +209,6 @@ mod tests {
&mut narrow_phase,
&mut rigid_body_set,
&mut collider_set,
None,
&physics_hooks,
&(),
);
@@ -250,7 +249,7 @@ mod tests {
let _ = collider_set.insert(collider_b);
let integration_parameters = IntegrationParameters::default();
let mut broad_phase = BroadPhaseMultiSap::new();
let mut broad_phase = BroadPhaseBvh::new();
let mut narrow_phase = NarrowPhase::new();
let mut collision_pipeline = CollisionPipeline::new();
let physics_hooks = ();
@@ -261,7 +260,6 @@ mod tests {
&mut narrow_phase,
&mut rigid_body_set,
&mut collider_set,
None,
&physics_hooks,
&(),
);

View File

@@ -1,13 +1,13 @@
use super::{outlines, DebugColor, DebugRenderBackend};
use super::{DebugColor, DebugRenderBackend, outlines};
use crate::dynamics::{
GenericJoint, ImpulseJointSet, MultibodyJointSet, RigidBodySet, RigidBodyType,
};
use crate::geometry::{Ball, ColliderSet, Cuboid, NarrowPhase, Shape, TypedShape};
#[cfg(feature = "dim3")]
use crate::geometry::{Cone, Cylinder};
use crate::math::{Isometry, Point, Real, Vector, DIM};
use crate::pipeline::debug_render_pipeline::debug_render_backend::DebugRenderObject;
use crate::math::{DIM, Isometry, Point, Real, Vector};
use crate::pipeline::debug_render_pipeline::DebugRenderStyle;
use crate::pipeline::debug_render_pipeline::debug_render_backend::DebugRenderObject;
use crate::utils::SimdBasis;
use parry::utils::IsometryOpt;
use std::any::TypeId;

View File

@@ -4,9 +4,7 @@ pub use collision_pipeline::CollisionPipeline;
pub use event_handler::{ActiveEvents, ChannelEventCollector, EventHandler};
pub use physics_hooks::{ActiveHooks, ContactModificationContext, PairFilterContext, PhysicsHooks};
pub use physics_pipeline::PhysicsPipeline;
pub use query_pipeline::{
generators as query_pipeline_generators, QueryFilter, QueryFilterFlags, QueryPipeline,
};
pub use query_pipeline::{QueryFilter, QueryFilterFlags, QueryPipeline, QueryPipelineMut};
#[cfg(feature = "debug-render")]
pub use self::debug_render_pipeline::{

View File

@@ -14,7 +14,7 @@ use crate::geometry::{
ContactManifoldIndex, ModifiedColliders, NarrowPhase, TemporaryInteractionIndex,
};
use crate::math::{Real, Vector};
use crate::pipeline::{EventHandler, PhysicsHooks, QueryPipeline};
use crate::pipeline::{EventHandler, PhysicsHooks};
use crate::prelude::ModifiedRigidBodies;
use {crate::dynamics::RigidBodySet, crate::geometry::ColliderSet};
@@ -117,8 +117,7 @@ impl PhysicsPipeline {
self.broad_phase_events.clear();
self.broadphase_collider_pairs.clear();
broad_phase.update(
integration_parameters.dt,
integration_parameters.prediction_distance(),
integration_parameters,
colliders,
bodies,
modified_colliders,
@@ -422,7 +421,6 @@ impl PhysicsPipeline {
impulse_joints: &mut ImpulseJointSet,
multibody_joints: &mut MultibodyJointSet,
ccd_solver: &mut CCDSolver,
mut query_pipeline: Option<&mut QueryPipeline>,
hooks: &dyn PhysicsHooks,
events: &dyn EventHandler,
) {
@@ -502,12 +500,6 @@ impl PhysicsPipeline {
true,
);
if let Some(queries) = query_pipeline.as_deref_mut() {
self.counters.stages.query_pipeline_time.start();
queries.update_incremental(colliders, &modified_colliders, &removed_colliders, false);
self.counters.stages.query_pipeline_time.pause();
}
self.counters.stages.user_changes.resume();
self.clear_modified_colliders(colliders, &mut modified_colliders);
self.clear_modified_bodies(bodies, &mut modified_bodies);
@@ -640,17 +632,6 @@ impl PhysicsPipeline {
false,
);
if let Some(queries) = query_pipeline.as_deref_mut() {
self.counters.stages.query_pipeline_time.resume();
queries.update_incremental(
colliders,
&modified_colliders,
&[],
remaining_substeps == 0,
);
self.counters.stages.query_pipeline_time.pause();
}
self.clear_modified_colliders(colliders, &mut modified_colliders);
}
@@ -679,7 +660,7 @@ mod test {
CCDSolver, ImpulseJointSet, IntegrationParameters, IslandManager, RigidBodyBuilder,
RigidBodySet,
};
use crate::geometry::{BroadPhaseMultiSap, ColliderBuilder, ColliderSet, NarrowPhase};
use crate::geometry::{BroadPhaseBvh, ColliderBuilder, ColliderSet, NarrowPhase};
use crate::math::Vector;
use crate::pipeline::PhysicsPipeline;
use crate::prelude::{MultibodyJointSet, RevoluteJointBuilder, RigidBodyType};
@@ -690,7 +671,7 @@ mod test {
let mut impulse_joints = ImpulseJointSet::new();
let mut multibody_joints = MultibodyJointSet::new();
let mut pipeline = PhysicsPipeline::new();
let mut bf = BroadPhaseMultiSap::new();
let mut bf = BroadPhaseBvh::new();
let mut nf = NarrowPhase::new();
let mut bodies = RigidBodySet::new();
let mut islands = IslandManager::new();
@@ -716,7 +697,6 @@ mod test {
&mut impulse_joints,
&mut multibody_joints,
&mut CCDSolver::new(),
None,
&(),
&(),
);
@@ -728,7 +708,7 @@ mod test {
let mut impulse_joints = ImpulseJointSet::new();
let mut multibody_joints = MultibodyJointSet::new();
let mut pipeline = PhysicsPipeline::new();
let mut bf = BroadPhaseMultiSap::new();
let mut bf = BroadPhaseBvh::new();
let mut nf = NarrowPhase::new();
let mut islands = IslandManager::new();
@@ -772,7 +752,6 @@ mod test {
&mut impulse_joints,
&mut multibody_joints,
&mut CCDSolver::new(),
None,
&(),
&(),
);
@@ -838,7 +817,7 @@ mod test {
let mut pipeline = PhysicsPipeline::new();
let gravity = Vector::y() * -9.81;
let integration_parameters = IntegrationParameters::default();
let mut broad_phase = BroadPhaseMultiSap::new();
let mut broad_phase = BroadPhaseBvh::new();
let mut narrow_phase = NarrowPhase::new();
let mut bodies = RigidBodySet::new();
let mut colliders = ColliderSet::new();
@@ -875,7 +854,6 @@ mod test {
&mut impulse_joints,
&mut multibody_joints,
&mut ccd,
None,
&physics_hooks,
&event_handler,
);
@@ -888,7 +866,7 @@ mod test {
let mut impulse_joints = ImpulseJointSet::new();
let mut multibody_joints = MultibodyJointSet::new();
let mut pipeline = PhysicsPipeline::new();
let mut bf = BroadPhaseMultiSap::new();
let mut bf = BroadPhaseBvh::new();
let mut nf = NarrowPhase::new();
let mut islands = IslandManager::new();
@@ -913,7 +891,6 @@ mod test {
&mut impulse_joints,
&mut multibody_joints,
&mut CCDSolver::new(),
None,
&(),
&(),
);
@@ -936,7 +913,6 @@ mod test {
&mut impulse_joints,
&mut multibody_joints,
&mut CCDSolver::new(),
None,
&(),
&(),
);
@@ -957,7 +933,7 @@ mod test {
let mut impulse_joints = ImpulseJointSet::new();
let mut multibody_joints = MultibodyJointSet::new();
let mut pipeline = PhysicsPipeline::new();
let mut bf = BroadPhaseMultiSap::new();
let mut bf = BroadPhaseBvh::new();
let mut nf = NarrowPhase::new();
let mut islands = IslandManager::new();
@@ -997,7 +973,6 @@ mod test {
&mut impulse_joints,
&mut multibody_joints,
&mut CCDSolver::new(),
None,
&(),
&(),
);
@@ -1041,7 +1016,7 @@ mod test {
let integration_parameters = IntegrationParameters::default();
let mut physics_pipeline = PhysicsPipeline::new();
let mut island_manager = IslandManager::new();
let mut broad_phase = BroadPhaseMultiSap::new();
let mut broad_phase = BroadPhaseBvh::new();
let mut narrow_phase = NarrowPhase::new();
let mut impulse_joint_set = ImpulseJointSet::new();
let mut multibody_joint_set = MultibodyJointSet::new();
@@ -1060,7 +1035,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
None,
&physics_hooks,
&event_handler,
);
@@ -1087,7 +1061,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
None,
&physics_hooks,
&event_handler,
);
@@ -1114,7 +1087,6 @@ mod test {
&mut impulse_joint_set,
&mut multibody_joint_set,
&mut ccd_solver,
None,
&physics_hooks,
&event_handler,
);

View File

@@ -0,0 +1,597 @@
use crate::dynamics::RigidBodyHandle;
use crate::geometry::{Aabb, Collider, ColliderHandle, PointProjection, Ray, RayIntersection};
use crate::geometry::{BroadPhaseBvh, InteractionGroups};
use crate::math::{Isometry, Point, Real, Vector};
use crate::{dynamics::RigidBodySet, geometry::ColliderSet};
use parry::bounding_volume::BoundingVolume;
use parry::partitioning::{Bvh, BvhNode};
use parry::query::details::{NormalConstraints, ShapeCastOptions};
use parry::query::{NonlinearRigidMotion, QueryDispatcher, RayCast, ShapeCastHit};
use parry::shape::{CompositeShape, CompositeShapeRef, FeatureId, Shape, TypedCompositeShape};
/// The query pipeline responsible for running scene queries on the physics world.
///
/// This structure is generally obtained by calling [`BroadPhaseBvh::as_query_pipeline_mut`].
#[derive(Copy, Clone)]
pub struct QueryPipeline<'a> {
/// The query dispatcher for running geometric queries on leaf geometries.
pub dispatcher: &'a dyn QueryDispatcher,
/// A bvh containing collider indices at its leaves.
pub bvh: &'a Bvh,
/// Rigid-bodies potentially involved in the scene queries.
pub bodies: &'a RigidBodySet,
/// Colliders potentially involved in the scene queries.
pub colliders: &'a ColliderSet,
/// The query filters for controlling what colliders should be ignored by the queries.
pub filter: QueryFilter<'a>,
}
/// Same as [`QueryPipeline`] but holds mutable references to the body and collider sets.
///
/// This structure is generally obtained by calling [`BroadPhaseBvh::as_query_pipeline_mut`].
/// This is useful for argument passing. Call `.as_ref()` for obtaining a `QueryPipeline`
/// to run the scene queries.
pub struct QueryPipelineMut<'a> {
/// The query dispatcher for running geometric queries on leaf geometries.
pub dispatcher: &'a dyn QueryDispatcher,
/// A bvh containing collider indices at its leaves.
pub bvh: &'a Bvh,
/// Rigid-bodies potentially involved in the scene queries.
pub bodies: &'a mut RigidBodySet,
/// Colliders potentially involved in the scene queries.
pub colliders: &'a mut ColliderSet,
/// The query filters for controlling what colliders should be ignored by the queries.
pub filter: QueryFilter<'a>,
}
impl QueryPipelineMut<'_> {
/// Downgrades the mutable reference to an immutable reference.
pub fn as_ref(&self) -> QueryPipeline {
QueryPipeline {
dispatcher: self.dispatcher,
bvh: self.bvh,
bodies: &*self.bodies,
colliders: &*self.colliders,
filter: self.filter,
}
}
}
impl CompositeShape for QueryPipeline<'_> {
fn map_part_at(
&self,
shape_id: u32,
f: &mut dyn FnMut(Option<&Isometry<Real>>, &dyn Shape, Option<&dyn NormalConstraints>),
) {
self.map_untyped_part_at(shape_id, f);
}
fn bvh(&self) -> &Bvh {
self.bvh
}
}
impl TypedCompositeShape for QueryPipeline<'_> {
type PartNormalConstraints = ();
type PartShape = dyn Shape;
fn map_typed_part_at<T>(
&self,
shape_id: u32,
mut f: impl FnMut(
Option<&Isometry<Real>>,
&Self::PartShape,
Option<&Self::PartNormalConstraints>,
) -> T,
) -> Option<T> {
let (co, co_handle) = self.colliders.get_unknown_gen(shape_id)?;
if self.filter.test(self.bodies, co_handle, co) {
Some(f(Some(co.position()), co.shape(), None))
} else {
None
}
}
fn map_untyped_part_at<T>(
&self,
shape_id: u32,
mut f: impl FnMut(Option<&Isometry<Real>>, &dyn Shape, Option<&dyn NormalConstraints>) -> T,
) -> Option<T> {
let (co, co_handle) = self.colliders.get_unknown_gen(shape_id)?;
if self.filter.test(self.bodies, co_handle, co) {
Some(f(Some(co.position()), co.shape(), None))
} else {
None
}
}
}
impl BroadPhaseBvh {
/// Initialize a [`QueryPipeline`] for scene queries from this broad-phase.
pub fn as_query_pipeline<'a>(
&'a self,
dispatcher: &'a dyn QueryDispatcher,
bodies: &'a RigidBodySet,
colliders: &'a ColliderSet,
filter: QueryFilter<'a>,
) -> QueryPipeline<'a> {
QueryPipeline {
dispatcher,
bvh: &self.tree,
bodies,
colliders,
filter,
}
}
/// Initialize a [`QueryPipelineMut`] for scene queries from this broad-phase.
pub fn as_query_pipeline_mut<'a>(
&'a self,
dispatcher: &'a dyn QueryDispatcher,
bodies: &'a mut RigidBodySet,
colliders: &'a mut ColliderSet,
filter: QueryFilter<'a>,
) -> QueryPipelineMut<'a> {
QueryPipelineMut {
dispatcher,
bvh: &self.tree,
bodies,
colliders,
filter,
}
}
}
impl<'a> QueryPipeline<'a> {
fn id_to_handle<T>(&self, (id, data): (u32, T)) -> Option<(ColliderHandle, T)> {
self.colliders.get_unknown_gen(id).map(|(_, h)| (h, data))
}
/// Replaces [`Self::filter`] with different filtering rules.
pub fn with_filter(self, filter: QueryFilter<'a>) -> Self {
Self { filter, ..self }
}
/// Find the closest intersection between a ray and a set of colliders.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `ray`: the ray to cast.
/// * `max_toi`: the maximum time-of-impact that can be reported by this cast. This effectively
/// limits the length of the ray to `ray.dir.norm() * max_toi`. Use `Real::MAX` for an unbounded ray.
/// * `solid`: if this is `true` an impact at time 0.0 (i.e. at the ray origin) is returned if
/// it starts inside a shape. If this `false` then the ray will hit the shape's boundary
/// even if its starts inside of it.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn cast_ray(
&self,
ray: &Ray,
max_toi: Real,
solid: bool,
) -> Option<(ColliderHandle, Real)> {
CompositeShapeRef(self)
.cast_local_ray(ray, max_toi, solid)
.and_then(|hit| self.id_to_handle(hit))
}
/// Find the closest intersection between a ray and a set of colliders.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `ray`: the ray to cast.
/// * `max_toi`: the maximum time-of-impact that can be reported by this cast. This effectively
/// limits the length of the ray to `ray.dir.norm() * max_toi`. Use `Real::MAX` for an unbounded ray.
/// * `solid`: if this is `true` an impact at time 0.0 (i.e. at the ray origin) is returned if
/// it starts inside a shape. If this `false` then the ray will hit the shape's boundary
/// even if its starts inside of it.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn cast_ray_and_get_normal(
&self,
ray: &Ray,
max_toi: Real,
solid: bool,
) -> Option<(ColliderHandle, RayIntersection)> {
CompositeShapeRef(self)
.cast_local_ray_and_get_normal(ray, max_toi, solid)
.and_then(|hit| self.id_to_handle(hit))
}
/// Iterates through all the colliders intersecting a given ray.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `ray`: the ray to cast.
/// * `max_toi`: the maximum time-of-impact that can be reported by this cast. This effectively
/// limits the length of the ray to `ray.dir.norm() * max_toi`. Use `Real::MAX` for an unbounded ray.
/// * `solid`: if this is `true` an impact at time 0.0 (i.e. at the ray origin) is returned if
/// it starts inside a shape. If this `false` then the ray will hit the shape's boundary
/// even if its starts inside of it.
#[profiling::function]
pub fn intersect_ray(
&'a self,
ray: &'a Ray,
max_toi: Real,
solid: bool,
) -> impl Iterator<Item = (ColliderHandle, &'a Collider, RayIntersection)> + 'a {
// TODO: add this to CompositeShapeRef?
self.bvh
.leaves(move |node: &BvhNode| node.aabb().intersects_local_ray(ray, max_toi))
.filter_map(move |leaf| {
let (co, co_handle) = self.colliders.get_unknown_gen(leaf)?;
if self.filter.test(self.bodies, co_handle, co) {
if let Some(intersection) =
co.shape
.cast_ray_and_get_normal(co.position(), ray, max_toi, solid)
{
return Some((co_handle, co, intersection));
}
}
None
})
}
/// Find the projection of a point on the closest collider.
///
/// # Parameters
/// * `point` - The point to project.
/// * `solid` - If this is set to `true` then the collider shapes are considered to
/// be plain (if the point is located inside of a plain shape, its projection is the point
/// itself). If it is set to `false` the collider shapes are considered to be hollow
/// (if the point is located inside of an hollow shape, it is projected on the shape's
/// boundary).
#[profiling::function]
pub fn project_point(
&self,
point: &Point<Real>,
_max_dist: Real,
solid: bool,
) -> Option<(ColliderHandle, PointProjection)> {
self.id_to_handle(CompositeShapeRef(self).project_local_point(point, solid))
}
/// Find all the colliders containing the given point.
///
/// # Parameters
/// * `point` - The point used for the containment test.
#[profiling::function]
pub fn intersect_point(
&'a self,
point: &'a Point<Real>,
) -> impl Iterator<Item = (ColliderHandle, &'a Collider)> + 'a {
// TODO: add to CompositeShapeRef?
self.bvh
.leaves(move |node: &BvhNode| node.aabb().contains_local_point(point))
.filter_map(move |leaf| {
let (co, co_handle) = self.colliders.get_unknown_gen(leaf)?;
if self.filter.test(self.bodies, co_handle, co)
&& co.shape.contains_point(co.position(), point)
{
return Some((co_handle, co));
}
None
})
}
/// Find the projection of a point on the closest collider.
///
/// The results include the ID of the feature hit by the point.
///
/// # Parameters
/// * `point` - The point to project.
#[profiling::function]
pub fn project_point_and_get_feature(
&self,
point: &Point<Real>,
) -> Option<(ColliderHandle, PointProjection, FeatureId)> {
let (id, (proj, feat)) = CompositeShapeRef(self).project_local_point_and_get_feature(point);
let handle = self.colliders.get_unknown_gen(id)?.1;
Some((handle, proj, feat))
}
/// Finds all handles of all the colliders with an [`Aabb`] intersecting the given [`Aabb`].
///
/// Note that the collider AABB taken into account is the one currently stored in the query
/// pipelines BVH. It doesnt recompute the latest collider AABB.
#[profiling::function]
pub fn intersect_aabb_conservative(
&'a self,
aabb: &'a Aabb,
) -> impl Iterator<Item = (ColliderHandle, &'a Collider)> + 'a {
// TODO: add to ColliderRef?
self.bvh
.leaves(move |node: &BvhNode| node.aabb().intersects(aabb))
.filter_map(move |leaf| {
let (co, co_handle) = self.colliders.get_unknown_gen(leaf)?;
// NOTE: do **not** recompute and check the latest collider AABB.
// Checking only against the one in the BVH is useful, e.g., for conservative
// scene queries for CCD.
if self.filter.test(self.bodies, co_handle, co) {
return Some((co_handle, co));
}
None
})
}
/// Casts a shape at a constant linear velocity and retrieve the first collider it hits.
///
/// This is similar to ray-casting except that we are casting a whole shape instead of just a
/// point (the ray origin). In the resulting `TOI`, witness and normal 1 refer to the world
/// collider, and are in world space.
///
/// # Parameters
/// * `shape_pos` - The initial position of the shape to cast.
/// * `shape_vel` - The constant velocity of the shape to cast (i.e. the cast direction).
/// * `shape` - The shape to cast.
/// * `options` - Options controlling the shape cast limits and behavior.
#[profiling::function]
pub fn cast_shape(
&self,
shape_pos: &Isometry<Real>,
shape_vel: &Vector<Real>,
shape: &dyn Shape,
options: ShapeCastOptions,
) -> Option<(ColliderHandle, ShapeCastHit)> {
CompositeShapeRef(self)
.cast_shape(self.dispatcher, shape_pos, shape_vel, shape, options)
.and_then(|hit| self.id_to_handle(hit))
}
/// Casts a shape with an arbitrary continuous motion and retrieve the first collider it hits.
///
/// In the resulting `TOI`, witness and normal 1 refer to the world collider, and are in world
/// space.
///
/// # Parameters
/// * `shape_motion` - The motion of the shape.
/// * `shape` - The shape to cast.
/// * `start_time` - The starting time of the interval where the motion takes place.
/// * `end_time` - The end time of the interval where the motion takes place.
/// * `stop_at_penetration` - If the casted shape starts in a penetration state with any
/// collider, two results are possible. If `stop_at_penetration` is `true` then, the
/// result will have a `toi` equal to `start_time`. If `stop_at_penetration` is `false`
/// then the nonlinear shape-casting will see if further motion with respect to the penetration normal
/// would result in tunnelling. If it does not (i.e. we have a separating velocity along
/// that normal) then the nonlinear shape-casting will attempt to find another impact,
/// at a time `> start_time` that could result in tunnelling.
#[profiling::function]
pub fn cast_shape_nonlinear(
&self,
shape_motion: &NonlinearRigidMotion,
shape: &dyn Shape,
start_time: Real,
end_time: Real,
stop_at_penetration: bool,
) -> Option<(ColliderHandle, ShapeCastHit)> {
CompositeShapeRef(self)
.cast_shape_nonlinear(
self.dispatcher,
&NonlinearRigidMotion::identity(),
shape_motion,
shape,
start_time,
end_time,
stop_at_penetration,
)
.and_then(|hit| self.id_to_handle(hit))
}
/// Retrieve all the colliders intersecting the given shape.
///
/// # Parameters
/// * `shapePos` - The pose of the shape to test.
/// * `shape` - The shape to test.
#[profiling::function]
pub fn intersect_shape(
&'a self,
shape_pos: &'a Isometry<Real>,
shape: &'a dyn Shape,
) -> impl Iterator<Item = (ColliderHandle, &'a Collider)> + 'a {
// TODO: add this to CompositeShapeRef?
let shape_aabb = shape.compute_aabb(shape_pos);
self.bvh
.leaves(move |node: &BvhNode| node.aabb().intersects(&shape_aabb))
.filter_map(move |leaf| {
let (co, co_handle) = self.colliders.get_unknown_gen(leaf)?;
if self.filter.test(self.bodies, co_handle, co) {
let pos12 = shape_pos.inv_mul(co.position());
if self.dispatcher.intersection_test(&pos12, shape, co.shape()) == Ok(true) {
return Some((co_handle, co));
}
}
None
})
}
}
bitflags::bitflags! {
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
/// Flags for excluding whole sets of colliders from a scene query.
pub struct QueryFilterFlags: u32 {
/// Exclude from the query any collider attached to a fixed rigid-body and colliders with no rigid-body attached.
const EXCLUDE_FIXED = 1 << 0;
/// Exclude from the query any collider attached to a kinematic rigid-body.
const EXCLUDE_KINEMATIC = 1 << 1;
/// Exclude from the query any collider attached to a dynamic rigid-body.
const EXCLUDE_DYNAMIC = 1 << 2;
/// Exclude from the query any collider that is a sensor.
const EXCLUDE_SENSORS = 1 << 3;
/// Exclude from the query any collider that is not a sensor.
const EXCLUDE_SOLIDS = 1 << 4;
/// Excludes all colliders not attached to a dynamic rigid-body.
const ONLY_DYNAMIC = Self::EXCLUDE_FIXED.bits() | Self::EXCLUDE_KINEMATIC.bits();
/// Excludes all colliders not attached to a kinematic rigid-body.
const ONLY_KINEMATIC = Self::EXCLUDE_DYNAMIC.bits() | Self::EXCLUDE_FIXED.bits();
/// Exclude all colliders attached to a non-fixed rigid-body
/// (this will not exclude colliders not attached to any rigid-body).
const ONLY_FIXED = Self::EXCLUDE_DYNAMIC.bits() | Self::EXCLUDE_KINEMATIC.bits();
}
}
impl QueryFilterFlags {
/// Tests if the given collider should be taken into account by a scene query, based
/// on the flags on `self`.
#[inline]
pub fn test(&self, bodies: &RigidBodySet, collider: &Collider) -> bool {
if self.is_empty() {
// No filter.
return true;
}
if (self.contains(QueryFilterFlags::EXCLUDE_SENSORS) && collider.is_sensor())
|| (self.contains(QueryFilterFlags::EXCLUDE_SOLIDS) && !collider.is_sensor())
{
return false;
}
if self.contains(QueryFilterFlags::EXCLUDE_FIXED) && collider.parent.is_none() {
return false;
}
if let Some(parent) = collider.parent.and_then(|p| bodies.get(p.handle)) {
let parent_type = parent.body_type();
if (self.contains(QueryFilterFlags::EXCLUDE_FIXED) && parent_type.is_fixed())
|| (self.contains(QueryFilterFlags::EXCLUDE_KINEMATIC)
&& parent_type.is_kinematic())
|| (self.contains(QueryFilterFlags::EXCLUDE_DYNAMIC) && parent_type.is_dynamic())
{
return false;
}
}
true
}
}
/// A filter that describes what collider should be included or excluded from a scene query.
#[derive(Copy, Clone, Default)]
pub struct QueryFilter<'a> {
/// Flags indicating what particular type of colliders should be excluded from the scene query.
pub flags: QueryFilterFlags,
/// If set, only colliders with collision groups compatible with this one will
/// be included in the scene query.
pub groups: Option<InteractionGroups>,
/// If set, this collider will be excluded from the scene query.
pub exclude_collider: Option<ColliderHandle>,
/// If set, any collider attached to this rigid-body will be excluded from the scene query.
pub exclude_rigid_body: Option<RigidBodyHandle>,
/// If set, any collider for which this closure returns false will be excluded from the scene query.
#[allow(clippy::type_complexity)] // Type doesnt look really complex?
pub predicate: Option<&'a dyn Fn(ColliderHandle, &Collider) -> bool>,
}
impl QueryFilter<'_> {
/// Applies the filters described by `self` to a collider to determine if it has to be
/// included in a scene query (`true`) or not (`false`).
#[inline]
pub fn test(&self, bodies: &RigidBodySet, handle: ColliderHandle, collider: &Collider) -> bool {
self.exclude_collider != Some(handle)
&& (self.exclude_rigid_body.is_none() // NOTE: deal with the `None` case separately otherwise the next test is incorrect if the colliders parent is `None` too.
|| self.exclude_rigid_body != collider.parent.map(|p| p.handle))
&& self
.groups
.map(|grps| collider.flags.collision_groups.test(grps))
.unwrap_or(true)
&& self.flags.test(bodies, collider)
&& self.predicate.map(|f| f(handle, collider)).unwrap_or(true)
}
}
impl From<QueryFilterFlags> for QueryFilter<'_> {
fn from(flags: QueryFilterFlags) -> Self {
Self {
flags,
..QueryFilter::default()
}
}
}
impl From<InteractionGroups> for QueryFilter<'_> {
fn from(groups: InteractionGroups) -> Self {
Self {
groups: Some(groups),
..QueryFilter::default()
}
}
}
impl<'a> QueryFilter<'a> {
/// A query filter that doesnt exclude any collider.
pub fn new() -> Self {
Self::default()
}
/// Exclude from the query any collider attached to a fixed rigid-body and colliders with no rigid-body attached.
pub fn exclude_fixed() -> Self {
QueryFilterFlags::EXCLUDE_FIXED.into()
}
/// Exclude from the query any collider attached to a kinematic rigid-body.
pub fn exclude_kinematic() -> Self {
QueryFilterFlags::EXCLUDE_KINEMATIC.into()
}
/// Exclude from the query any collider attached to a dynamic rigid-body.
pub fn exclude_dynamic() -> Self {
QueryFilterFlags::EXCLUDE_DYNAMIC.into()
}
/// Excludes all colliders not attached to a dynamic rigid-body.
pub fn only_dynamic() -> Self {
QueryFilterFlags::ONLY_DYNAMIC.into()
}
/// Excludes all colliders not attached to a kinematic rigid-body.
pub fn only_kinematic() -> Self {
QueryFilterFlags::ONLY_KINEMATIC.into()
}
/// Exclude all colliders attached to a non-fixed rigid-body
/// (this will not exclude colliders not attached to any rigid-body).
pub fn only_fixed() -> Self {
QueryFilterFlags::ONLY_FIXED.into()
}
/// Exclude from the query any collider that is a sensor.
pub fn exclude_sensors(mut self) -> Self {
self.flags |= QueryFilterFlags::EXCLUDE_SENSORS;
self
}
/// Exclude from the query any collider that is not a sensor.
pub fn exclude_solids(mut self) -> Self {
self.flags |= QueryFilterFlags::EXCLUDE_SOLIDS;
self
}
/// Only colliders with collision groups compatible with this one will
/// be included in the scene query.
pub fn groups(mut self, groups: InteractionGroups) -> Self {
self.groups = Some(groups);
self
}
/// Set the collider that will be excluded from the scene query.
pub fn exclude_collider(mut self, collider: ColliderHandle) -> Self {
self.exclude_collider = Some(collider);
self
}
/// Set the rigid-body that will be excluded from the scene query.
pub fn exclude_rigid_body(mut self, rigid_body: RigidBodyHandle) -> Self {
self.exclude_rigid_body = Some(rigid_body);
self
}
/// Set the predicate to apply a custom collider filtering during the scene query.
pub fn predicate(mut self, predicate: &'a impl Fn(ColliderHandle, &Collider) -> bool) -> Self {
self.predicate = Some(predicate);
self
}
}

View File

@@ -1,110 +0,0 @@
//! Structs implementing [`QbvhDataGenerator<ColliderHandle>`] to be used with [`QueryPipeline::update_with_generator`].
use parry::partitioning::QbvhDataGenerator;
use crate::math::Real;
use crate::prelude::{Aabb, ColliderHandle, ColliderSet, RigidBodySet};
#[cfg(doc)]
use crate::{
dynamics::{IntegrationParameters, RigidBody, RigidBodyPosition},
pipeline::QueryPipeline,
prelude::Collider,
};
/// Generates collider AABBs based on the union of their current AABB and the AABB predicted
/// from the velocity and forces of their parent rigid-body.
///
/// The main purpose of this struct is to be passed as a parameter to
/// [`QueryPipeline::update_with_generator`] to update the [`QueryPipeline`].
///
/// The predicted position is calculated as
/// `RigidBody::predict_position_using_velocity_and_forces * Collider::position_wrt_parent`.
pub struct SweptAabbWithPredictedPosition<'a> {
/// The rigid bodies of your simulation.
pub bodies: &'a RigidBodySet,
/// The colliders of your simulation.
pub colliders: &'a ColliderSet,
/// The delta time to compute predicted position.
///
/// You probably want to set it to [`IntegrationParameters::dt`].
pub dt: Real,
}
impl QbvhDataGenerator<ColliderHandle> for SweptAabbWithPredictedPosition<'_> {
fn size_hint(&self) -> usize {
self.colliders.len()
}
#[inline(always)]
fn for_each(&mut self, mut f: impl FnMut(ColliderHandle, Aabb)) {
for (h, co) in self.colliders.iter_enabled() {
if let Some(co_parent) = co.parent {
let rb = &self.bodies[co_parent.handle];
let predicted_pos = rb
.pos
.integrate_forces_and_velocities(self.dt, &rb.forces, &rb.vels, &rb.mprops);
let next_position = predicted_pos * co_parent.pos_wrt_parent;
f(h, co.shape.compute_swept_aabb(&co.pos, &next_position))
} else {
f(h, co.shape.compute_aabb(&co.pos))
}
}
}
}
/// Generates collider AABBs based on the union of their AABB at their current [`Collider::position`]
/// and the AABB predicted from their parents [`RigidBody::next_position`].
///
/// The main purpose of this struct is to be passed as a parameter to
/// [`QueryPipeline::update_with_generator`] to update the [`QueryPipeline`].
///
/// The predicted position is calculated as
/// `RigidBody::next_position * Collider::position_wrt_parent`.
pub struct SweptAabbWithNextPosition<'a> {
/// The rigid bodies of your simulation.
pub bodies: &'a RigidBodySet,
/// The colliders of your simulation.
pub colliders: &'a ColliderSet,
}
impl QbvhDataGenerator<ColliderHandle> for SweptAabbWithNextPosition<'_> {
fn size_hint(&self) -> usize {
self.colliders.len()
}
#[inline(always)]
fn for_each(&mut self, mut f: impl FnMut(ColliderHandle, Aabb)) {
for (h, co) in self.colliders.iter_enabled() {
if let Some(co_parent) = co.parent {
let rb_next_pos = &self.bodies[co_parent.handle].pos.next_position;
let next_position = rb_next_pos * co_parent.pos_wrt_parent;
f(h, co.shape.compute_swept_aabb(&co.pos, &next_position))
} else {
f(h, co.shape.compute_aabb(&co.pos))
}
}
}
}
/// Generates collider AABBs based on the AABB at their current [`Collider::position`].
///
/// The main purpose of this struct is to be passed as a parameter to
/// [`QueryPipeline::update_with_generator`] to update the [`QueryPipeline`].
pub struct CurrentAabb<'a> {
/// The colliders of your simulation.
pub colliders: &'a ColliderSet,
}
impl QbvhDataGenerator<ColliderHandle> for CurrentAabb<'_> {
fn size_hint(&self) -> usize {
self.colliders.len()
}
#[inline(always)]
fn for_each(&mut self, mut f: impl FnMut(ColliderHandle, Aabb)) {
for (h, co) in self.colliders.iter_enabled() {
f(h, co.shape.compute_aabb(&co.pos))
}
}
}

View File

@@ -1,724 +0,0 @@
pub mod generators;
use crate::dynamics::RigidBodyHandle;
use crate::geometry::{
Aabb, Collider, ColliderHandle, InteractionGroups, PointProjection, Qbvh, Ray, RayIntersection,
};
use crate::math::{Isometry, Point, Real, Vector};
use crate::{dynamics::RigidBodySet, geometry::ColliderSet};
use parry::partitioning::{QbvhDataGenerator, QbvhUpdateWorkspace};
use parry::query::details::{
NonlinearTOICompositeShapeShapeBestFirstVisitor, NormalConstraints,
PointCompositeShapeProjBestFirstVisitor, PointCompositeShapeProjWithFeatureBestFirstVisitor,
RayCompositeShapeToiAndNormalBestFirstVisitor, RayCompositeShapeToiBestFirstVisitor,
ShapeCastOptions, TOICompositeShapeShapeBestFirstVisitor,
};
use parry::query::visitors::{
BoundingVolumeIntersectionsVisitor, PointIntersectionsVisitor, RayIntersectionsVisitor,
};
use parry::query::{DefaultQueryDispatcher, NonlinearRigidMotion, QueryDispatcher, ShapeCastHit};
use parry::shape::{FeatureId, Shape, TypedSimdCompositeShape};
use std::sync::Arc;
/// A pipeline for performing queries on all the colliders of a scene.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct QueryPipeline {
#[cfg_attr(
feature = "serde-serialize",
serde(skip, default = "crate::geometry::default_query_dispatcher")
)]
query_dispatcher: Arc<dyn QueryDispatcher>,
qbvh: Qbvh<ColliderHandle>,
dilation_factor: Real,
#[cfg_attr(feature = "serde-serialize", serde(skip))]
workspace: QbvhUpdateWorkspace,
}
struct QueryPipelineAsCompositeShape<'a> {
query_pipeline: &'a QueryPipeline,
bodies: &'a RigidBodySet,
colliders: &'a ColliderSet,
filter: QueryFilter<'a>,
}
bitflags::bitflags! {
#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
/// Flags for excluding whole sets of colliders from a scene query.
pub struct QueryFilterFlags: u32 {
/// Exclude from the query any collider attached to a fixed rigid-body and colliders with no rigid-body attached.
const EXCLUDE_FIXED = 1 << 0;
/// Exclude from the query any collider attached to a kinematic rigid-body.
const EXCLUDE_KINEMATIC = 1 << 1;
/// Exclude from the query any collider attached to a dynamic rigid-body.
const EXCLUDE_DYNAMIC = 1 << 2;
/// Exclude from the query any collider that is a sensor.
const EXCLUDE_SENSORS = 1 << 3;
/// Exclude from the query any collider that is not a sensor.
const EXCLUDE_SOLIDS = 1 << 4;
/// Excludes all colliders not attached to a dynamic rigid-body.
const ONLY_DYNAMIC = Self::EXCLUDE_FIXED.bits() | Self::EXCLUDE_KINEMATIC.bits();
/// Excludes all colliders not attached to a kinematic rigid-body.
const ONLY_KINEMATIC = Self::EXCLUDE_DYNAMIC.bits() | Self::EXCLUDE_FIXED.bits();
/// Exclude all colliders attached to a non-fixed rigid-body
/// (this will not exclude colliders not attached to any rigid-body).
const ONLY_FIXED = Self::EXCLUDE_DYNAMIC.bits() | Self::EXCLUDE_KINEMATIC.bits();
}
}
impl QueryFilterFlags {
/// Tests if the given collider should be taken into account by a scene query, based
/// on the flags on `self`.
#[inline]
pub fn test(&self, bodies: &RigidBodySet, collider: &Collider) -> bool {
if self.is_empty() {
// No filter.
return true;
}
if (self.contains(QueryFilterFlags::EXCLUDE_SENSORS) && collider.is_sensor())
|| (self.contains(QueryFilterFlags::EXCLUDE_SOLIDS) && !collider.is_sensor())
{
return false;
}
if self.contains(QueryFilterFlags::EXCLUDE_FIXED) && collider.parent.is_none() {
return false;
}
if let Some(parent) = collider.parent.and_then(|p| bodies.get(p.handle)) {
let parent_type = parent.body_type();
if (self.contains(QueryFilterFlags::EXCLUDE_FIXED) && parent_type.is_fixed())
|| (self.contains(QueryFilterFlags::EXCLUDE_KINEMATIC)
&& parent_type.is_kinematic())
|| (self.contains(QueryFilterFlags::EXCLUDE_DYNAMIC) && parent_type.is_dynamic())
{
return false;
}
}
true
}
}
/// A filter that describes what collider should be included or excluded from a scene query.
#[derive(Copy, Clone, Default)]
pub struct QueryFilter<'a> {
/// Flags indicating what particular type of colliders should be excluded from the scene query.
pub flags: QueryFilterFlags,
/// If set, only colliders with collision groups compatible with this one will
/// be included in the scene query.
pub groups: Option<InteractionGroups>,
/// If set, this collider will be excluded from the scene query.
pub exclude_collider: Option<ColliderHandle>,
/// If set, any collider attached to this rigid-body will be excluded from the scene query.
pub exclude_rigid_body: Option<RigidBodyHandle>,
/// If set, any collider for which this closure returns false will be excluded from the scene query.
#[allow(clippy::type_complexity)] // Type doesnt look really complex?
pub predicate: Option<&'a dyn Fn(ColliderHandle, &Collider) -> bool>,
}
impl QueryFilter<'_> {
/// Applies the filters described by `self` to a collider to determine if it has to be
/// included in a scene query (`true`) or not (`false`).
#[inline]
pub fn test(&self, bodies: &RigidBodySet, handle: ColliderHandle, collider: &Collider) -> bool {
self.exclude_collider != Some(handle)
&& (self.exclude_rigid_body.is_none() // NOTE: deal with the `None` case separately otherwise the next test is incorrect if the colliders parent is `None` too.
|| self.exclude_rigid_body != collider.parent.map(|p| p.handle))
&& self
.groups
.map(|grps| collider.flags.collision_groups.test(grps))
.unwrap_or(true)
&& self.flags.test(bodies, collider)
&& self.predicate.map(|f| f(handle, collider)).unwrap_or(true)
}
}
impl From<QueryFilterFlags> for QueryFilter<'_> {
fn from(flags: QueryFilterFlags) -> Self {
Self {
flags,
..QueryFilter::default()
}
}
}
impl From<InteractionGroups> for QueryFilter<'_> {
fn from(groups: InteractionGroups) -> Self {
Self {
groups: Some(groups),
..QueryFilter::default()
}
}
}
impl<'a> QueryFilter<'a> {
/// A query filter that doesnt exclude any collider.
pub fn new() -> Self {
Self::default()
}
/// Exclude from the query any collider attached to a fixed rigid-body and colliders with no rigid-body attached.
pub fn exclude_fixed() -> Self {
QueryFilterFlags::EXCLUDE_FIXED.into()
}
/// Exclude from the query any collider attached to a kinematic rigid-body.
pub fn exclude_kinematic() -> Self {
QueryFilterFlags::EXCLUDE_KINEMATIC.into()
}
/// Exclude from the query any collider attached to a dynamic rigid-body.
pub fn exclude_dynamic() -> Self {
QueryFilterFlags::EXCLUDE_DYNAMIC.into()
}
/// Excludes all colliders not attached to a dynamic rigid-body.
pub fn only_dynamic() -> Self {
QueryFilterFlags::ONLY_DYNAMIC.into()
}
/// Excludes all colliders not attached to a kinematic rigid-body.
pub fn only_kinematic() -> Self {
QueryFilterFlags::ONLY_KINEMATIC.into()
}
/// Exclude all colliders attached to a non-fixed rigid-body
/// (this will not exclude colliders not attached to any rigid-body).
pub fn only_fixed() -> Self {
QueryFilterFlags::ONLY_FIXED.into()
}
/// Exclude from the query any collider that is a sensor.
pub fn exclude_sensors(mut self) -> Self {
self.flags |= QueryFilterFlags::EXCLUDE_SENSORS;
self
}
/// Exclude from the query any collider that is not a sensor.
pub fn exclude_solids(mut self) -> Self {
self.flags |= QueryFilterFlags::EXCLUDE_SOLIDS;
self
}
/// Only colliders with collision groups compatible with this one will
/// be included in the scene query.
pub fn groups(mut self, groups: InteractionGroups) -> Self {
self.groups = Some(groups);
self
}
/// Set the collider that will be excluded from the scene query.
pub fn exclude_collider(mut self, collider: ColliderHandle) -> Self {
self.exclude_collider = Some(collider);
self
}
/// Set the rigid-body that will be excluded from the scene query.
pub fn exclude_rigid_body(mut self, rigid_body: RigidBodyHandle) -> Self {
self.exclude_rigid_body = Some(rigid_body);
self
}
/// Set the predicate to apply a custom collider filtering during the scene query.
pub fn predicate(mut self, predicate: &'a impl Fn(ColliderHandle, &Collider) -> bool) -> Self {
self.predicate = Some(predicate);
self
}
}
impl TypedSimdCompositeShape for QueryPipelineAsCompositeShape<'_> {
type PartShape = dyn Shape;
type PartNormalConstraints = dyn NormalConstraints;
type PartId = ColliderHandle;
fn map_typed_part_at(
&self,
shape_id: Self::PartId,
mut f: impl FnMut(
Option<&Isometry<Real>>,
&Self::PartShape,
Option<&Self::PartNormalConstraints>,
),
) {
if let Some(co) = self.colliders.get(shape_id) {
if self.filter.test(self.bodies, shape_id, co) {
f(Some(&co.pos), &*co.shape, None)
}
}
}
fn map_untyped_part_at(
&self,
shape_id: Self::PartId,
f: impl FnMut(Option<&Isometry<Real>>, &Self::PartShape, Option<&dyn NormalConstraints>),
) {
self.map_typed_part_at(shape_id, f);
}
fn typed_qbvh(&self) -> &Qbvh<ColliderHandle> {
&self.query_pipeline.qbvh
}
}
impl Default for QueryPipeline {
fn default() -> Self {
Self::new()
}
}
impl QueryPipeline {
/// Initializes an empty query pipeline.
pub fn new() -> Self {
Self::with_query_dispatcher(DefaultQueryDispatcher)
}
fn as_composite_shape<'a>(
&'a self,
bodies: &'a RigidBodySet,
colliders: &'a ColliderSet,
filter: QueryFilter<'a>,
) -> QueryPipelineAsCompositeShape<'a> {
QueryPipelineAsCompositeShape {
query_pipeline: self,
bodies,
colliders,
filter,
}
}
/// Initializes an empty query pipeline with a custom `QueryDispatcher`.
///
/// Use this constructor in order to use a custom `QueryDispatcher` that is
/// aware of your own user-defined shapes.
pub fn with_query_dispatcher<D>(d: D) -> Self
where
D: 'static + QueryDispatcher,
{
Self {
query_dispatcher: Arc::new(d),
qbvh: Qbvh::new(),
dilation_factor: 0.01,
workspace: QbvhUpdateWorkspace::default(),
}
}
/// The query dispatcher used by this query pipeline for running scene queries.
pub fn query_dispatcher(&self) -> &dyn QueryDispatcher {
&*self.query_dispatcher
}
/// Update the query pipeline incrementally, avoiding a complete rebuild of its
/// internal data-structure.
#[profiling::function]
pub fn update_incremental(
&mut self,
colliders: &ColliderSet,
modified_colliders: &[ColliderHandle],
removed_colliders: &[ColliderHandle],
refit_and_rebalance: bool,
) {
// We remove first. This is needed to avoid the ABA problem: if a collider was removed
// and another added right after with the same handle index, we can remove first, and
// then update the new one (but only if its actually exists, to address the case where
// a collider was added/modified and then removed during the same frame).
for removed in removed_colliders {
self.qbvh.remove(*removed);
}
for modified in modified_colliders {
// Check that the collider still exists as it may have been removed.
if colliders.contains(*modified) {
self.qbvh.pre_update_or_insert(*modified);
}
}
if refit_and_rebalance {
let _ = self.qbvh.refit(0.0, &mut self.workspace, |handle| {
colliders[*handle].compute_aabb()
});
self.qbvh.rebalance(0.0, &mut self.workspace);
}
}
/// Update the acceleration structure on the query pipeline.
///
/// Uses [`generators::CurrentAabb`] to update.
pub fn update(&mut self, colliders: &ColliderSet) {
self.update_with_generator(generators::CurrentAabb { colliders })
}
/// Update the acceleration structure on the query pipeline using a custom collider bounding
/// volume generator.
///
/// See [`generators`] for available generators.
#[profiling::function]
pub fn update_with_generator(&mut self, mode: impl QbvhDataGenerator<ColliderHandle>) {
self.qbvh.clear_and_rebuild(mode, self.dilation_factor);
}
/// Find the closest intersection between a ray and a set of colliders.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `ray`: the ray to cast.
/// * `max_toi`: the maximum time-of-impact that can be reported by this cast. This effectively
/// limits the length of the ray to `ray.dir.norm() * max_toi`. Use `Real::MAX` for an unbounded ray.
/// * `solid`: if this is `true` an impact at time 0.0 (i.e. at the ray origin) is returned if
/// it starts inside of a shape. If this `false` then the ray will hit the shape's boundary
/// even if its starts inside of it.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn cast_ray(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
ray: &Ray,
max_toi: Real,
solid: bool,
filter: QueryFilter,
) -> Option<(ColliderHandle, Real)> {
let pipeline_shape = self.as_composite_shape(bodies, colliders, filter);
let mut visitor =
RayCompositeShapeToiBestFirstVisitor::new(&pipeline_shape, ray, max_toi, solid);
self.qbvh.traverse_best_first(&mut visitor).map(|h| h.1)
}
/// Find the closest intersection between a ray and a set of colliders.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `ray`: the ray to cast.
/// * `max_toi`: the maximum time-of-impact that can be reported by this cast. This effectively
/// limits the length of the ray to `ray.dir.norm() * max_toi`. Use `Real::MAX` for an unbounded ray.
/// * `solid`: if this is `true` an impact at time 0.0 (i.e. at the ray origin) is returned if
/// it starts inside of a shape. If this `false` then the ray will hit the shape's boundary
/// even if its starts inside of it.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn cast_ray_and_get_normal(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
ray: &Ray,
max_toi: Real,
solid: bool,
filter: QueryFilter,
) -> Option<(ColliderHandle, RayIntersection)> {
let pipeline_shape = self.as_composite_shape(bodies, colliders, filter);
let mut visitor = RayCompositeShapeToiAndNormalBestFirstVisitor::new(
&pipeline_shape,
ray,
max_toi,
solid,
);
self.qbvh.traverse_best_first(&mut visitor).map(|h| h.1)
}
/// Find the all intersections between a ray and a set of colliders and passes them to a callback.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `ray`: the ray to cast.
/// * `max_toi`: the maximum time-of-impact that can be reported by this cast. This effectively
/// limits the length of the ray to `ray.dir.norm() * max_toi`. Use `Real::MAX` for an unbounded ray.
/// * `solid`: if this is `true` an impact at time 0.0 (i.e. at the ray origin) is returned if
/// it starts inside of a shape. If this `false` then the ray will hit the shape's boundary
/// even if its starts inside of it.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
/// * `callback`: function executed on each collider for which a ray intersection has been found.
/// There is no guarantees on the order the results will be yielded. If this callback returns `false`,
/// this method will exit early, ignore any further raycast.
#[profiling::function]
pub fn intersections_with_ray<'a>(
&self,
bodies: &'a RigidBodySet,
colliders: &'a ColliderSet,
ray: &Ray,
max_toi: Real,
solid: bool,
filter: QueryFilter,
mut callback: impl FnMut(ColliderHandle, RayIntersection) -> bool,
) {
let mut leaf_callback = &mut |handle: &ColliderHandle| {
if let Some(co) = colliders.get(*handle) {
if filter.test(bodies, *handle, co) {
if let Some(hit) = co
.shape
.cast_ray_and_get_normal(&co.pos, ray, max_toi, solid)
{
return callback(*handle, hit);
}
}
}
true
};
let mut visitor = RayIntersectionsVisitor::new(ray, max_toi, &mut leaf_callback);
self.qbvh.traverse_depth_first(&mut visitor);
}
/// Gets the handle of up to one collider intersecting the given shape.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `shape_pos` - The position of the shape used for the intersection test.
/// * `shape` - The shape used for the intersection test.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn intersection_with_shape(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
shape_pos: &Isometry<Real>,
shape: &dyn Shape,
filter: QueryFilter,
) -> Option<ColliderHandle> {
let pipeline_shape = self.as_composite_shape(bodies, colliders, filter);
// TODO: replace this with IntersectionCompositeShapeShapeVisitor when it
// can return the shape part id.
let mut visitor = parry::query::details::IntersectionCompositeShapeShapeVisitor::new(
&*self.query_dispatcher,
shape_pos,
&pipeline_shape,
shape,
);
self.qbvh.traverse_depth_first(&mut visitor);
visitor.found_intersection
}
/// Find the projection of a point on the closest collider.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `point` - The point to project.
/// * `solid` - If this is set to `true` then the collider shapes are considered to
/// be plain (if the point is located inside of a plain shape, its projection is the point
/// itself). If it is set to `false` the collider shapes are considered to be hollow
/// (if the point is located inside of an hollow shape, it is projected on the shape's
/// boundary).
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn project_point(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
point: &Point<Real>,
solid: bool,
filter: QueryFilter,
) -> Option<(ColliderHandle, PointProjection)> {
let pipeline_shape = self.as_composite_shape(bodies, colliders, filter);
let mut visitor =
PointCompositeShapeProjBestFirstVisitor::new(&pipeline_shape, point, solid);
self.qbvh
.traverse_best_first(&mut visitor)
.map(|h| (h.1 .1, h.1 .0))
}
/// Find all the colliders containing the given point.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `point` - The point used for the containment test.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
/// * `callback` - A function called with each collider with a shape
/// containing the `point`.
#[profiling::function]
pub fn intersections_with_point(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
point: &Point<Real>,
filter: QueryFilter,
mut callback: impl FnMut(ColliderHandle) -> bool,
) {
let mut leaf_callback = &mut |handle: &ColliderHandle| {
if let Some(co) = colliders.get(*handle) {
if filter.test(bodies, *handle, co) && co.shape.contains_point(&co.pos, point) {
return callback(*handle);
}
}
true
};
let mut visitor = PointIntersectionsVisitor::new(point, &mut leaf_callback);
self.qbvh.traverse_depth_first(&mut visitor);
}
/// Find the projection of a point on the closest collider.
///
/// The results include the ID of the feature hit by the point.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `point` - The point to project.
/// * `solid` - If this is set to `true` then the collider shapes are considered to
/// be plain (if the point is located inside of a plain shape, its projection is the point
/// itself). If it is set to `false` the collider shapes are considered to be hollow
/// (if the point is located inside of an hollow shape, it is projected on the shape's
/// boundary).
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn project_point_and_get_feature(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
point: &Point<Real>,
filter: QueryFilter,
) -> Option<(ColliderHandle, PointProjection, FeatureId)> {
let pipeline_shape = self.as_composite_shape(bodies, colliders, filter);
let mut visitor =
PointCompositeShapeProjWithFeatureBestFirstVisitor::new(&pipeline_shape, point, false);
self.qbvh
.traverse_best_first(&mut visitor)
.map(|h| (h.1 .1 .0, h.1 .0, h.1 .1 .1))
}
/// Finds all handles of all the colliders with an [`Aabb`] intersecting the given [`Aabb`].
#[profiling::function]
pub fn colliders_with_aabb_intersecting_aabb(
&self,
aabb: &Aabb,
mut callback: impl FnMut(&ColliderHandle) -> bool,
) {
let mut visitor = BoundingVolumeIntersectionsVisitor::new(aabb, &mut callback);
self.qbvh.traverse_depth_first(&mut visitor);
}
/// Casts a shape at a constant linear velocity and retrieve the first collider it hits.
///
/// This is similar to ray-casting except that we are casting a whole shape instead of just a
/// point (the ray origin). In the resulting `TOI`, witness and normal 1 refer to the world
/// collider, and are in world space.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `shape_pos` - The initial position of the shape to cast.
/// * `shape_vel` - The constant velocity of the shape to cast (i.e. the cast direction).
/// * `shape` - The shape to cast.
/// * `max_toi` - The maximum time-of-impact that can be reported by this cast. This effectively
/// limits the distance traveled by the shape to `shape_vel.norm() * maxToi`.
/// * `stop_at_penetration` - If set to `false`, the linear shape-cast wont immediately stop if
/// the shape is penetrating another shape at its starting point **and** its trajectory is such
/// that its on a path to exit that penetration state.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn cast_shape(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
shape_pos: &Isometry<Real>,
shape_vel: &Vector<Real>,
shape: &dyn Shape,
options: ShapeCastOptions,
filter: QueryFilter,
) -> Option<(ColliderHandle, ShapeCastHit)> {
let pipeline_shape = self.as_composite_shape(bodies, colliders, filter);
let mut visitor = TOICompositeShapeShapeBestFirstVisitor::new(
&*self.query_dispatcher,
shape_pos,
shape_vel,
&pipeline_shape,
shape,
options,
);
self.qbvh.traverse_best_first(&mut visitor).map(|h| h.1)
}
/// Casts a shape with an arbitrary continuous motion and retrieve the first collider it hits.
///
/// In the resulting `TOI`, witness and normal 1 refer to the world collider, and are in world
/// space.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `shape_motion` - The motion of the shape.
/// * `shape` - The shape to cast.
/// * `start_time` - The starting time of the interval where the motion takes place.
/// * `end_time` - The end time of the interval where the motion takes place.
/// * `stop_at_penetration` - If the casted shape starts in a penetration state with any
/// collider, two results are possible. If `stop_at_penetration` is `true` then, the
/// result will have a `toi` equal to `start_time`. If `stop_at_penetration` is `false`
/// then the nonlinear shape-casting will see if further motion with respect to the penetration normal
/// would result in tunnelling. If it does not (i.e. we have a separating velocity along
/// that normal) then the nonlinear shape-casting will attempt to find another impact,
/// at a time `> start_time` that could result in tunnelling.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
#[profiling::function]
pub fn nonlinear_cast_shape(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
shape_motion: &NonlinearRigidMotion,
shape: &dyn Shape,
start_time: Real,
end_time: Real,
stop_at_penetration: bool,
filter: QueryFilter,
) -> Option<(ColliderHandle, ShapeCastHit)> {
let pipeline_shape = self.as_composite_shape(bodies, colliders, filter);
let pipeline_motion = NonlinearRigidMotion::identity();
let mut visitor = NonlinearTOICompositeShapeShapeBestFirstVisitor::new(
&*self.query_dispatcher,
&pipeline_motion,
&pipeline_shape,
shape_motion,
shape,
start_time,
end_time,
stop_at_penetration,
);
self.qbvh.traverse_best_first(&mut visitor).map(|h| h.1)
}
/// Retrieve all the colliders intersecting the given shape.
///
/// # Parameters
/// * `colliders` - The set of colliders taking part in this pipeline.
/// * `shapePos` - The position of the shape to test.
/// * `shapeRot` - The orientation of the shape to test.
/// * `shape` - The shape to test.
/// * `filter`: set of rules used to determine which collider is taken into account by this scene query.
/// * `callback` - A function called with the handles of each collider intersecting the `shape`.
#[profiling::function]
pub fn intersections_with_shape(
&self,
bodies: &RigidBodySet,
colliders: &ColliderSet,
shape_pos: &Isometry<Real>,
shape: &dyn Shape,
filter: QueryFilter,
mut callback: impl FnMut(ColliderHandle) -> bool,
) {
let dispatcher = &*self.query_dispatcher;
let inv_shape_pos = shape_pos.inverse();
let mut leaf_callback = &mut |handle: &ColliderHandle| {
if let Some(co) = colliders.get(*handle) {
if filter.test(bodies, *handle, co) {
let pos12 = inv_shape_pos * co.pos.as_ref();
if dispatcher.intersection_test(&pos12, shape, &*co.shape) == Ok(true) {
return callback(*handle);
}
}
}
true
};
let shape_aabb = shape.compute_aabb(shape_pos);
let mut visitor = BoundingVolumeIntersectionsVisitor::new(&shape_aabb, &mut leaf_callback);
self.qbvh.traverse_depth_first(&mut visitor);
}
}

View File

@@ -513,9 +513,9 @@ impl FlushToZeroDenormalsAreZeroFlags {
pub fn flush_denormal_to_zero() -> Self {
unsafe {
#[cfg(target_arch = "x86")]
use std::arch::x86::{_mm_getcsr, _mm_setcsr, _MM_FLUSH_ZERO_ON};
use std::arch::x86::{_MM_FLUSH_ZERO_ON, _mm_getcsr, _mm_setcsr};
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::{_mm_getcsr, _mm_setcsr, _MM_FLUSH_ZERO_ON};
use std::arch::x86_64::{_MM_FLUSH_ZERO_ON, _mm_getcsr, _mm_setcsr};
// Flush denormals & underflows to zero as this as a significant impact on the solver's performances.
// To enable this we need to set the bit 15 (given by _MM_FLUSH_ZERO_ON) and the bit 6 (for denormals-are-zero).
@@ -596,11 +596,7 @@ impl Drop for DisableFloatingPointExceptionsFlags {
}
pub(crate) fn select_other<T: PartialEq>(pair: (T, T), elt: T) -> T {
if pair.0 == elt {
pair.1
} else {
pair.0
}
if pair.0 == elt { pair.1 } else { pair.0 }
}
/// Methods for simultaneously indexing a container with two distinct indices.