# Source code for elastica._elastica_numpy._joint

__doc__ = """ Joint between rods module of Elastica Numpy implementation """

import numpy as np
from elastica.utils import Tolerance, MaxDimension
from elastica.joint import FreeJoint

[docs]class ExternalContact(FreeJoint): """ Assumes that the second entity is a rigid body for now, can be changed at a later time Most of the cylinder-cylinder contact SHOULD be implemented as given in this paper: http://larochelle.sdsmt.edu/publications/2005-2009/Collision%20Detection%20of%20Cylindrical%20Rigid%20Bodies%20Using%20Line%20Geometry.pdf but, it isn't (the elastica-cpp kernels are implented)! This is maybe to speed-up the kernel, but it's potentially dangerous as it does not deal with "end" conditions correctly. """
[docs] def __init__(self, k, nu): super().__init__(k, nu) # 0 is min, 1 is max self.aabb_rod = np.empty((MaxDimension.value(), 2)) self.aabb_cylinder = np.empty((MaxDimension.value(), 2))
# Should be a free function, can be jitted def __aabbs_not_intersecting(self, aabb_one, aabb_two): # FIXME : Not memory friendly, Not early exit first_max_second_min = aabb_one[..., 1] < aabb_two[..., 0] first_min_second_max = aabb_two[..., 1] < aabb_one[..., 0] # Returns true if aabbs not intersecting, else if it intersects returns false return np.any(np.logical_or(first_max_second_min, first_min_second_max)) def __find_min_dist(self, x1, e1, x2, e2): """Assumes x2, e2 is one elment for now Will definitely get speedup from numba """ def _batch_inner(first, second): return np.einsum("ij,ij->j", first, second) def _batch_inner_spec(first, second): return np.einsum("ij,i->j", first, second) # Compute distances for all cases def _difference_impl_for_multiple_st(ax1, ae1, at1, ax2, ae2, as2): return ( ax2.reshape(3, 1, -1) + np.einsum("i,jk->ijk", ae2, as2) - ax1.reshape(3, 1, -1) - np.einsum("ik,jk->ijk", ae1, at1) ) def _difference_impl(ax1, ae1, at1, ax2, ae2, as2): return ( ax2.reshape(3, -1) + np.einsum("i,j->ij", ae2, as2) - ax1.reshape(3, -1) - np.einsum("ik,k->ik", ae1, at1) ) e1e1 = _batch_inner(e1, e1) e1e2 = _batch_inner_spec(e1, e2) e2e2 = np.inner(e2, e2) x1e1 = _batch_inner(x1, e1) x1e2 = _batch_inner_spec(x1, e2) x2e1 = _batch_inner_spec(e1, x2) x2e2 = np.inner(x2, e2) # Parameteric representation of line # First line is x_1 + e_1 * t # Second line is x_2 + e_2 * s s = np.empty_like(e1e1) t = np.empty_like(e1e1) parallel_condition = np.abs(1.0 - e1e2 ** 2 / (e1e1 * e2e2)) parallel_idx = parallel_condition < 1e-5 not_parallel_idx = np.bitwise_not(parallel_idx) anything_not_parallel = np.any(not_parallel_idx) if np.any(parallel_idx): # Some are parallel, so do processing t[parallel_idx] = (x2e1[parallel_idx] - x1e1[parallel_idx]) / e1e1[ parallel_idx ] # Comes from taking dot of e1 with a normal t[parallel_idx] = np.clip( t[parallel_idx], 0.0, 1.0 ) # Note : the out version doesn't work s[parallel_idx] = ( x1e2[parallel_idx] + t[parallel_idx] * e1e2[parallel_idx] - x2e2 ) / e2e2 # Same as before s[parallel_idx] = np.clip(s[parallel_idx], 0.0, 1.0) if anything_not_parallel: # Using the Cauchy-Binet formula on eq(7) in docstring referenc s[not_parallel_idx] = ( e1e1[not_parallel_idx] * (x1e2[not_parallel_idx] - x2e2) + e1e2[not_parallel_idx] * (x2e1[not_parallel_idx] - x1e1[not_parallel_idx]) ) / (e1e1[not_parallel_idx] * e2e2 - (e1e2[not_parallel_idx]) ** 2) t[not_parallel_idx] = ( e1e2[not_parallel_idx] * s[not_parallel_idx] + x2e1[not_parallel_idx] - x1e1[not_parallel_idx] ) / e1e1[not_parallel_idx] if anything_not_parallel: # Done here rather than the other loop to avoid # creating copies by selection of selections such as s[not_parallel_idx][idx] # which creates copies and bor # Remnants for non-parallel indices # as parallel selections are always clipped idx1 = s < 0.0 idx2 = s > 1.0 idx3 = t < 0.0 idx4 = t > 1.0 idx = idx1 | idx2 | idx3 | idx4 if np.any(idx): local_e1e1 = e1e1[idx] local_e1e2 = e1e2[idx] local_x1e1 = x1e1[idx] local_x1e2 = x1e2[idx] local_x2e1 = x2e1[idx] potential_t = np.empty((4, local_e1e1.shape[0])) potential_s = np.empty_like(potential_t) potential_dist = np.empty_like(potential_t) # Fill in the possibilities potential_t[0, ...] = (local_x2e1 - local_x1e1) / local_e1e1 potential_s[0, ...] = 0.0 potential_t[1, ...] = ( local_x2e1 + local_e1e2 - local_x1e1 ) / local_e1e1 potential_s[1, ...] = 1.0 potential_t[2, ...] = 0.0 potential_s[2, ...] = (local_x1e2 - x2e2) / e2e2 potential_t[3, ...] = 1.0 potential_s[3, ...] = (local_x1e2 + local_e1e2 - x2e2) / e2e2 np.clip(potential_t, 0.0, 1.0, out=potential_t) np.clip(potential_s, 0.0, 1.0, out=potential_s) potential_difference = _difference_impl_for_multiple_st( x1[..., idx], e1[..., idx], potential_t, x2, e2, potential_s ) np.sqrt( np.einsum( "ijk,ijk->jk", potential_difference, potential_difference ), out=potential_dist, ) min_idx = np.expand_dims(np.argmin(potential_dist, axis=0), axis=0) s[idx] = np.take_along_axis(potential_s, min_idx, axis=0)[ 0 ] # [0] at the end reduces the dimension, you can also squeeze t[idx] = np.take_along_axis(potential_t, min_idx, axis=0)[ 0 ] # [0] at the end reduces the dimension, you can also squeeze return _difference_impl(x1, e1, t, x2, e2, s)