From 4591175c85e6c458200e3891a73c7e2edebde06b Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Thu, 23 Jul 2020 17:30:19 -0700 Subject: [PATCH 01/19] Add a filtered tetmesh surface extractor Extract the surface of the tetmesh according to a given filter function. This allows users to skip faces they don't care about. --- src/mesh/tetmesh/surface.rs | 107 ++++++++++++++++++++++++++---------- 1 file changed, 77 insertions(+), 30 deletions(-) diff --git a/src/mesh/tetmesh/surface.rs b/src/mesh/tetmesh/surface.rs index bf19660..9ad1779 100644 --- a/src/mesh/tetmesh/surface.rs +++ b/src/mesh/tetmesh/surface.rs @@ -39,14 +39,18 @@ impl SortedTri { } } +/// A triangle face of a tetrahedron within a `TetMesh`. #[derive(Copy, Clone, Eq)] -struct TriFace { +pub struct TetFace { + /// Vertex indices in the source tetmesh forming this face. pub tri: [usize; 3], - pub tet_index: usize, // Index of the original tet. - pub face_index: usize, // Index of the face within the tet betweeen 0 and 4 + /// Index of the corresponding tet within the source tetmesh. + pub tet_index: usize, + /// Index of the face within the tet betweeen 0 and 4. + pub face_index: usize, } -impl TriFace { +impl TetFace { const PERMUTATIONS: [[usize; 3]; 6] = [ [0, 1, 2], [1, 2, 0], @@ -64,8 +68,8 @@ fn tri_at(slice: &[T], tri: &[usize; 3]) -> [T; 3] { } /// Consider any permutation of the triangle to be equivalent to the original. -impl PartialEq for TriFace { - fn eq(&self, other: &TriFace) -> bool { +impl PartialEq for TetFace { + fn eq(&self, other: &TetFace) -> bool { for p in Self::PERMUTATIONS.iter() { if tri_at(&other.tri, p) == self.tri { return true; @@ -76,15 +80,15 @@ impl PartialEq for TriFace { } } -impl PartialOrd for TriFace { - fn partial_cmp(&self, other: &TriFace) -> Option { +impl PartialOrd for TetFace { + fn partial_cmp(&self, other: &TetFace) -> Option { Some(self.cmp(other)) } } /// Lexicographic ordering of the sorted indices. -impl Ord for TriFace { - fn cmp(&self, other: &TriFace) -> std::cmp::Ordering { +impl Ord for TetFace { + fn cmp(&self, other: &TetFace) -> std::cmp::Ordering { let mut tri = self.tri; tri.sort(); let mut other_tri = other.tri; @@ -94,17 +98,20 @@ impl Ord for TriFace { } impl TetMesh { - /// A helper function to compute surface topology of this `TetMesh`. The algorithm is to - /// iterate over every tet face and upon seeing a duplicate, remove it from the list. this will - /// leave only unique faces, which correspond to the surface of the `TetMesh`. + /// A helper function to compute surface topology of this `TetMesh`. + /// + /// The algorithm is to iterate over every tet face and upon seeing a duplicate, remove it from + /// the list. this will leave only unique faces, which correspond to the surface of the + /// `TetMesh`. + /// /// This function assumes that the given tetmesh is a manifold. - fn surface_triangle_set(&self) -> HashMap { - let mut triangles: HashMap = + fn surface_triangle_set(&self) -> HashMap { + let mut triangles: HashMap = HashMap::with_capacity_and_hasher(self.num_cells() * 4, Default::default()); let add_tet_faces = |(i, cell): (usize, &[usize; 4])| { for (face_idx, tet_face) in Self::TET_FACES.iter().enumerate() { - let face = TriFace { + let face = TetFace { tri: tri_at(cell, tet_face), tet_index: i, face_index: face_idx, @@ -135,20 +142,27 @@ impl TetMesh { surface_topo } - /// Extract the surface triangle information of the `TetMesh`. This includes the triangle - /// topology, which tet each triangle came from and which face on the originating tet it - /// belongs to. The returned vectors have the same size. This function assumes that - /// the given tetmesh is a manifold. - pub fn surface_triangle_data(&self) -> (Vec<[usize; 3]>, Vec, Vec) { + /// Extract the surface triangle information of the `TetMesh`. + /// + /// Only record those faces that are accepted by `filter`. + /// + /// This includes the triangle topology, which tet each triangle came from and which face on + /// the originating tet it belongs to. The returned vectors have the same size. + /// + /// This function assumes that the given tetmesh is a manifold. + pub fn surface_triangle_data(&self, filter: F) -> (Vec<[usize; 3]>, Vec, Vec) + where + F: FnMut(&TetFace) -> bool, + { let triangles = self.surface_triangle_set(); let mut surface_topo = Vec::with_capacity(triangles.len()); let mut tet_indices = Vec::with_capacity(triangles.len()); let mut tet_face_indices = Vec::with_capacity(triangles.len()); - for (_, elem) in triangles.into_iter() { - surface_topo.push(elem.tri); - tet_indices.push(elem.tet_index); - tet_face_indices.push(elem.face_index); + for face in triangles.into_iter().map(|(_, face)| face).filter(filter) { + surface_topo.push(face.tri); + tet_indices.push(face.tet_index); + tet_face_indices.push(face.face_index); } (surface_topo, tet_indices, tet_face_indices) @@ -164,10 +178,11 @@ impl TetMesh { /// Convert into a mesh of triangles representing the surface of this `TetMesh`. pub fn surface_trimesh(&self) -> TriMesh { - self.surface_trimesh_with_mapping(None, None, None, None) + self.surface_trimesh_with_mapping_and_filter(None, None, None, None, |_| true) } /// Convert into a mesh of triangles representing the surface of this `TetMesh`. + /// /// Additionally this function adds attributes that map the new triangle mesh to the original /// tetmesh. /// @@ -179,18 +194,45 @@ impl TetMesh { original_tet_index_name: Option<&str>, original_tet_vertex_index_name: Option<&str>, original_tet_face_index_name: Option<&str>, + ) -> TriMesh { + self.surface_trimesh_with_mapping_and_filter( + original_vertex_index_name, + original_tet_index_name, + original_tet_vertex_index_name, + original_tet_face_index_name, + |_| true, + ) + } + + /// Convert into a mesh of triangles representing the surface of this `TetMesh`. + /// + /// Filter out surface triangles using the `filter` closure which takes a references to a + /// `TetFace` representing the triangular face of a tetrahedron from this `TetMesh`. + /// + /// Additionally this function adds attributes that map the new triangle mesh to the original + /// tetmesh. + /// + /// Note that if the given attribute name coincides with an existing vertex attribute, that + /// attribute will be replaced with the original tetmesh vertex attribute. + pub fn surface_trimesh_with_mapping_and_filter( + &self, + original_vertex_index_name: Option<&str>, + original_tet_index_name: Option<&str>, + original_tet_vertex_index_name: Option<&str>, + original_tet_face_index_name: Option<&str>, + filter: impl FnMut(&TetFace) -> bool, ) -> TriMesh { // Get the surface topology of this tetmesh. - let (mut topo, tet_indices, tet_face_indices) = self.surface_triangle_data(); + let (mut topo, tet_indices, tet_face_indices) = self.surface_triangle_data(filter); // Record which vertices we have already handled. let mut seen = vec![-1isize; self.num_vertices()]; // Record the mapping back to tet vertices. - let mut original_vertex_index = Vec::new(); + let mut original_vertex_index = Vec::with_capacity(topo.len()); // Accumulate surface vertex positions for the new trimesh. - let mut surf_vert_pos = Vec::new(); + let mut surf_vert_pos = Vec::with_capacity(topo.len()); for face in topo.iter_mut() { for idx in face.iter_mut() { @@ -203,6 +245,9 @@ impl TetMesh { } } + surf_vert_pos.shrink_to_fit(); + original_vertex_index.shrink_to_fit(); + let num_surf_verts = surf_vert_pos.len(); // Transfer vertex attributes. @@ -236,6 +281,7 @@ impl TetMesh { // Mapping from face vertex index to its original tet vertex index. let mut tet_vertex_index = Vec::new(); if original_tet_vertex_index_name.is_some() { + tet_vertex_index.reserve(topo.len() * 3); for (&tet_idx, &tet_face_idx) in tet_indices.iter().zip(tet_face_indices.iter()) { let tri = &Self::TET_FACES[tet_face_idx]; for &i in tri.iter() { @@ -476,11 +522,12 @@ mod tests { mesh.add_attrib_data::<_, CellVertexIndex>("cell_vtx_attrib", cell_vtx_data) .unwrap(); - let trimesh = mesh.surface_trimesh_with_mapping( + let trimesh = mesh.surface_trimesh_with_mapping_and_filter( Some("vtx_idx"), Some("face_idx"), Some("face_vtx_idx"), Some("tet_face_idx"), + |_| true, ); assert_eq!(trimesh.num_vertices(), expected_pos.len()); -- GitLab From 380fb2e3b8e5010d4332fb5dcc2ad4de285fe7ab Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sat, 25 Jul 2020 19:39:10 -0700 Subject: [PATCH 02/19] Refactor + partition + bytemuck Added a partitioning algorithm to split meshes by. Refactored code for splitting meshes. Added an AttribValueCache to tetmeshes. Moved some of the slice casts to the safe bytemuck crate instead of using the unsafe reinterpret crate. --- Cargo.toml | 3 +- src/algo/connectivity.rs | 1251 +------------------------------ src/algo/merge.rs | 11 + src/algo/mod.rs | 4 + src/algo/partition.rs | 264 +++++++ src/algo/split.rs | 1294 +++++++++++++++++++++++++++++++++ src/index.rs | 5 + src/lib.rs | 7 +- src/mesh/attrib.rs | 9 +- src/mesh/attrib/attribute.rs | 5 +- src/mesh/polymesh.rs | 4 +- src/mesh/tetmesh.rs | 3 + src/mesh/tetmesh/surface.rs | 2 +- src/mesh/uniform_poly_mesh.rs | 11 +- src/utils/slice.rs | 44 +- 15 files changed, 1649 insertions(+), 1268 deletions(-) create mode 100644 src/algo/partition.rs create mode 100644 src/algo/split.rs diff --git a/Cargo.toml b/Cargo.toml index 03c24a5..ccd6e00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,13 @@ gut-derive = { path = "gut-derive", version = "0.5.0" } num-traits = "0.2" dync = { version = "0.4", features = ['numeric'] } reinterpret = "0.2" +bytemuck = "1.2" autodiff = { version = "0.2", features = ["cgmath"] } vtkio = { version = "0.3", optional = true } serde = { version = "1.0", features = ["derive"], optional = true } chashmap = { version = "2", optional = true } rayon = { version = "1", optional = true } -fnv = { version = "1" } +hashbrown = { version = "0.8" } math = { package = "cgmath", git = "https://github.com/elrnv/cgmath.git", version = "0.17" } [dependencies.objio] diff --git a/src/algo/connectivity.rs b/src/algo/connectivity.rs index 4b85d8c..727313c 100644 --- a/src/algo/connectivity.rs +++ b/src/algo/connectivity.rs @@ -3,8 +3,9 @@ */ use crate::index::*; -use crate::mesh::attrib::AttribValueCache; use crate::mesh::topology::*; +use crate::mesh::{attrib::*, PolyMesh, TetMeshExt, TriMesh}; +use crate::Real; /// A trait defining the primary method for determining connectivity in a mesh. /// @@ -17,6 +18,8 @@ pub trait Connectivity, Via: ElementIndex> { /// This is computed with `precompute_topo` and used in `push_neighbours`. type Topo: Default; + /// Precompute additional topology information prior to determining connectivity. + /// /// An optional function that allows implementers to precompute topology information to help /// with the implementation of `push_neighbours` when the mesh doesn't already support a /// certain type of topology. @@ -24,13 +27,15 @@ pub trait Connectivity, Via: ElementIndex> { Default::default() } - /// Get a list of indices for the elements which are considered for connectivity (e.g. - /// triangles in triangle meshes or tets in a tetmesh). + /// Get the number of elements which are considered for connectivity + /// + /// E.g. triangles in triangle meshes or tets in a tetmesh. fn num_elements(&self) -> usize; /// Push all neighbours of the element at the given `index` to the given `stack`. + /// /// Additionally, topology data `topo` computed using `precomute_topo` and an - /// optional `attribute` on the target topology is provided to help determine connectivity. + /// optional `attribute` on the `Src` topology is provided to help determine connectivity. fn push_neighbours( &self, index: Src, @@ -41,7 +46,7 @@ pub trait Connectivity, Via: ElementIndex> { /// Determine the connectivity of a set of meshes. /// - /// Return a `Vec` with the size of `self.indices().len()` indicating a unique ID of the + /// Return a `Vec` with the size of `self.num_elements()` indicating a unique ID of the /// connected component each element belongs to. For instance, if two triangles in a triangle /// mesh blong to the same connected component, they will have the same ID. Also return the /// total number of components generated. @@ -51,13 +56,13 @@ pub trait Connectivity, Via: ElementIndex> { /// Determine the connectivity of a set of meshes. /// - /// Return a `Vec` with the size of `self.indices().len()` indicating a unique ID of the + /// Return a `Vec` with the size of `self.num_elements()` indicating a unique ID of the /// connected component each element belongs to. For instance, if two triangles in a triangle /// mesh blong to the same connected component, they will have the same ID. Also return the /// total number of components generated. /// /// This is a more general version of `connectivity` that accepts an optional attribute of type - /// `T` on the target topology to determine connectivity. + /// `T` on the `Src` topology to determine connectivity. fn connectivity_via_attrib(&self, attrib: Option<&str>) -> (Vec, usize) where Self: Attrib, @@ -71,13 +76,13 @@ pub trait Connectivity, Via: ElementIndex> { /// Determine the connectivity of a set of meshes. /// - /// Return a `Vec` with the size of `self.indices().len()` indicating a unique ID of the + /// Return a `Vec` with the size of `self.num_elements()` indicating a unique ID of the /// connected component each element belongs to. For instance, if two triangles in a triangle /// mesh blong to the same connected component, they will have the same ID. Also return the /// total number of components generated. /// /// This is the most general version of `connectivity` that accepts a function that providees - /// attribute data of type `T` on the target topology to determine connectivity. + /// attribute data of type `T` on the `Src` topology to determine connectivity. /// Note that the provided slice must have the same length as the number of `Src` indices. fn connectivity_via_attrib_fn<'a, T, F>(&self, f: F) -> (Vec, usize) where @@ -251,704 +256,9 @@ impl TetMeshExt { } } -/// Helper to split attributes based on the given connectivity info. -fn split_attributes>>( - src_dict: &AttribDict, - num_components: usize, - connectivity: impl Iterator + Clone, - caches: &mut [AttribValueCache], -) -> Vec> { - split_attributes_with(src_dict, num_components, |attrib, num_components| { - let mut new_attribs = vec![attrib.duplicate_empty(); num_components]; - // Get an iterator of typeless values for this attribute. - match &attrib.data { - AttributeData::Direct(d) => { - connectivity - .clone() - .zip(d.data_ref().iter()) - .filter_map(|(comp_id, val_ref)| { - comp_id.into().map(|comp_id| (comp_id, val_ref)) - }) - .for_each(|(valid_idx, val_ref)| { - new_attribs[valid_idx] - .data - .direct_data_mut() - .unwrap() - .push_cloned(val_ref) - .unwrap(); - }); - } - AttributeData::Indirect(i) => { - for (valid_comp_id, val_ref) in - connectivity.clone().zip(i.data_ref().iter()).filter_map( - |(comp_id, val_ref)| comp_id.into().map(|comp_id| (comp_id, val_ref)), - ) - { - new_attribs[valid_comp_id] - .data - .indirect_data_mut() - .unwrap() - .push_cloned(val_ref, &mut caches[valid_comp_id]) - .unwrap(); - } - } - } - - new_attribs - }) -} - -/// Helper to split attributes using a given closure to transfer data from each source attribute to -/// the destination collection of individual empty component attributes. -fn split_attributes_with( - src_dict: &AttribDict, - num_components: usize, - mut split_attribute: impl FnMut(&Attribute, usize) -> Vec>, -) -> Vec> { - let mut comp_attributes = vec![AttribDict::new(); num_components]; - for (name, attrib) in src_dict.iter() { - // Split the given attribute into one attribute per component. - let new_attribs = split_attribute(&attrib, num_components); - assert_eq!(new_attribs.len(), num_components); - - // Save the new attributes to their corresponding attribute dictionaries. - for (attrib_dict, new_attrib) in comp_attributes.iter_mut().zip(new_attribs.into_iter()) { - attrib_dict.insert(name.to_string(), new_attrib); - } - } - comp_attributes -} - -use crate::mesh::{attrib::*, PolyMesh, TetMesh, TetMeshExt, TriMesh}; -use crate::Real; -use reinterpret::reinterpret_vec; - -pub trait SplitIntoConnectedComponents -where - Src: ElementIndex, - Via: ElementIndex, - Self: Sized, -{ - fn split_into_connected_components(self) -> Vec; -} - -// TODO: Refactor the below two implementations by extracting common patterns. This can also be -// combined with implementations conversions between meshes. - -impl SplitIntoConnectedComponents for TetMesh { - fn split_into_connected_components(self) -> Vec { - let tetmesh_ext = TetMeshExt::from(self); - tetmesh_ext - .split_into_connected_components() - .into_iter() - .map(TetMesh::from) - .collect() - } -} - -impl SplitIntoConnectedComponents for TetMeshExt { - fn split_into_connected_components(self) -> Vec { - // First we partition the vertices. - let (vertex_connectivity, num_components) = self.connectivity(); - - // Fast path, when everything is connected. - if num_components == 1 { - return vec![self]; - } - - // Deconstruct the original mesh. - let TetMeshExt { - tetmesh: - TetMesh { - vertex_positions, - indices, - vertex_attributes, - cell_attributes, - cell_vertex_attributes, - cell_face_attributes, - }, - cell_offsets, - cell_indices, - vertex_cell_attributes, - .. - } = self; - - // Record where the new vertices end up (just the index within their respective - // components). The component ids themselves are recorded separately. - let mut new_vertex_indices = vec![Index::INVALID; vertex_positions.len()]; - - // Transfer vertex positions - let mut comp_vertex_positions = vec![Vec::new(); num_components]; - for (vidx, &comp_id) in vertex_connectivity.iter().enumerate() { - new_vertex_indices[vidx] = comp_vertex_positions[comp_id].len().into(); - comp_vertex_positions[comp_id].push(vertex_positions[vidx]); - } - - // Validate that all vertices have been properly mapped. - debug_assert!(new_vertex_indices.iter().all(|&idx| idx.is_valid())); - let new_vertex_indices: Vec = unsafe { reinterpret_vec(new_vertex_indices) }; - - // Record cell connectivity. Note that if cells have vertices on different components, - // they will be ignored in the output and their connectivity will be "invalid". - let mut cell_connectivity = vec![Index::INVALID; indices.len()]; - let mut new_cell_indices = vec![Index::INVALID; indices.len()]; - - // Transfer cells - let mut comp_vertex_indices = vec![Vec::new(); num_components]; - for (cell_idx, &cell) in indices.iter().enumerate() { - let comp_id = vertex_connectivity[cell[0]]; - if cell.iter().all(|&i| vertex_connectivity[i] == comp_id) { - let new_cell = [ - new_vertex_indices[cell[0]], - new_vertex_indices[cell[1]], - new_vertex_indices[cell[2]], - new_vertex_indices[cell[3]], - ]; - new_cell_indices[cell_idx] = comp_vertex_indices[comp_id].len().into(); - comp_vertex_indices[comp_id].push(new_cell); - cell_connectivity[cell_idx] = Index::from(comp_id); - } - } - - // Transfer vertex to cell topology - let mut comp_cell_indices = vec![Vec::new(); num_components]; - let mut comp_cell_offsets = vec![vec![0]; num_components]; - for (vidx, &comp_id) in vertex_connectivity.iter().enumerate() { - let off = cell_offsets[vidx]; - for &cell_idx in &cell_indices[off..cell_offsets[vidx + 1]] { - new_cell_indices[cell_idx] - .if_valid(|new_cidx| comp_cell_indices[comp_id].push(new_cidx)); - } - comp_cell_offsets[comp_id].push(comp_cell_indices[comp_id].len()); - } - - // Initialize attribute value caches for indirect attributes. - let mut comp_attribute_value_caches = - vec![AttribValueCache::with_hasher(Default::default()); num_components]; - - // Transfer vertex-cell attributes - - // A helper closure to map a given attribute value to the corresponding component id if any - // `i` is the index of the original attribute value. - let transfer_comp_id = |vtx_idx: &mut usize, i| -> Option { - // Determine the vertex index here using offsets - let off = cell_offsets[*vtx_idx + 1]; - if i == off { - *vtx_idx += 1; - } - let comp_id = vertex_connectivity[*vtx_idx]; - let cell_idx = cell_indices[i]; - - // Add value for this vertex to the appropriate component data. - let idx: Index = new_cell_indices[cell_idx]; - idx.map(|_| comp_id).into() - }; - - let comp_vertex_cell_attributes = split_attributes_with( - &vertex_cell_attributes, - num_components, - |attrib, num_components| { - let mut new_attribs = vec![attrib.duplicate_empty(); num_components]; - - let mut vtx_idx = 0; - - match &attrib.data { - AttributeData::Direct(direct) => { - for (i, val_ref) in direct.data_ref().iter().enumerate() { - if let Some(comp_id) = transfer_comp_id(&mut vtx_idx, i) { - new_attribs[comp_id] - .data - .direct_data_mut() - .unwrap() - .push_cloned(val_ref) - .unwrap(); - } - } - } - AttributeData::Indirect(indirect) => { - for (i, val_ref) in indirect.data_ref().iter().enumerate() { - if let Some(comp_id) = transfer_comp_id(&mut vtx_idx, i) { - new_attribs[comp_id] - .data - .indirect_data_mut() - .unwrap() - .push_cloned(val_ref, &mut comp_attribute_value_caches[comp_id]) - .unwrap(); - } - } - } - }; - - new_attribs - }, - ); - - // Transfer vertex attributes - let comp_vertex_attributes = split_attributes( - &vertex_attributes, - num_components, - vertex_connectivity.iter().cloned(), - &mut comp_attribute_value_caches, - ); - - // Transfer cell attributes - let comp_cell_attributes = split_attributes( - &cell_attributes, - num_components, - cell_connectivity.iter().cloned(), - &mut comp_attribute_value_caches, - ); - - // Transfer cell vertex attributes - let comp_cell_vertex_attributes = split_attributes( - &cell_vertex_attributes, - num_components, - cell_connectivity - .iter() - .flat_map(|c| std::iter::repeat(c).take(4).cloned()), - &mut comp_attribute_value_caches, - ); - - // Transfer cell face attributes - let comp_cell_face_attributes = split_attributes( - &cell_face_attributes, - num_components, - cell_connectivity - .iter() - .flat_map(|c| std::iter::repeat(c).take(4).cloned()), - &mut comp_attribute_value_caches, - ); - - // Generate a Vec of meshes. - comp_vertex_positions - .into_iter() - .zip(comp_vertex_indices.into_iter()) - .zip(comp_cell_indices.into_iter()) - .zip(comp_cell_offsets.into_iter()) - .zip(comp_vertex_attributes.into_iter()) - .zip(comp_cell_attributes.into_iter()) - .zip(comp_cell_vertex_attributes.into_iter()) - .zip(comp_cell_face_attributes.into_iter()) - .zip(comp_vertex_cell_attributes.into_iter()) - .map( - |((((((((vp, vi), ci), co), va), ca), cva), cfa), vca)| TetMeshExt { - tetmesh: TetMesh { - vertex_positions: vp.into(), - indices: vi.into(), - vertex_attributes: va, - cell_attributes: ca, - cell_vertex_attributes: cva, - cell_face_attributes: cfa, - }, - cell_indices: ci, - cell_offsets: co, - vertex_cell_attributes: vca, - }, - ) - .collect() - } -} - -impl SplitIntoConnectedComponents for PolyMesh { - fn split_into_connected_components(self) -> Vec { - // First we partition the vertices. - let (vertex_connectivity, num_components) = - Connectivity::::connectivity(&self); - - // Fast path, when everything is connected. - if num_components == 1 { - return vec![self]; - } - - // Record where the new vertices end up (just the index within their respective - // components). The component ids themselves are recorded separately. - let mut new_vertex_indices = vec![Index::INVALID; self.vertex_positions.len()]; - - // Transfer vertex positions - let mut comp_vertex_positions = vec![Vec::new(); num_components]; - for (vidx, &comp_id) in vertex_connectivity.iter().enumerate() { - new_vertex_indices[vidx] = comp_vertex_positions[comp_id].len().into(); - comp_vertex_positions[comp_id].push(self.vertex_positions[vidx]); - } - - // Validate that all vertices have been properly mapped. - debug_assert!(new_vertex_indices.iter().all(|&idx| idx.is_valid())); - let new_vertex_indices = crate::index_vec_into_usize(new_vertex_indices); - - // Record face connectivity. Note that if faces have vertices on different components, - // they will be ignored in the output and their connectivity will be "invalid". - let mut face_connectivity = vec![Index::INVALID; self.num_faces()]; - - // Transfer faces - let mut comp_indices = vec![Vec::new(); num_components]; - let mut comp_offsets = vec![vec![0]; num_components]; - for (face, face_comp_id) in self.face_iter().zip(face_connectivity.iter_mut()) { - let comp_id = vertex_connectivity[face[0]]; - if face.iter().all(|&i| vertex_connectivity[i] == comp_id) { - let new_face_vtx_iter = face.iter().map(|&vi| new_vertex_indices[vi]); - comp_indices[comp_id].extend(new_face_vtx_iter); - comp_offsets[comp_id].push(comp_indices[comp_id].len()); - *face_comp_id = Index::from(comp_id); - } - } - - // Initialize attribute value caches for indirect attributes. - let mut comp_attribute_value_caches = - vec![AttribValueCache::with_hasher(Default::default()); num_components]; - - // Transfer vertex attributes - let comp_vertex_attributes = split_attributes( - &self.vertex_attributes, - num_components, - vertex_connectivity.iter().cloned(), - &mut comp_attribute_value_caches, - ); - - // Transfer face attributes - let comp_face_attributes = split_attributes( - &self.face_attributes, - num_components, - face_connectivity.iter().cloned(), - &mut comp_attribute_value_caches, - ); - - // Transfer face vertex attributes - let comp_face_vertex_attributes = split_attributes( - &self.face_vertex_attributes, - num_components, - face_connectivity.iter().enumerate().flat_map(|(fi, c)| { - std::iter::repeat(c) - .take(self.num_vertices_at_face(fi)) - .cloned() - }), - &mut comp_attribute_value_caches, - ); - - // Transfer face edge attributes - let comp_face_edge_attributes = split_attributes( - &self.face_edge_attributes, - num_components, - face_connectivity.iter().enumerate().flat_map(|(fi, c)| { - std::iter::repeat(c) - .take(self.num_edges_at_face(fi)) - .cloned() - }), - &mut comp_attribute_value_caches, - ); - - // Generate a Vec of meshes. - comp_vertex_positions - .into_iter() - .zip(comp_indices.into_iter()) - .zip(comp_offsets.into_iter()) - .zip(comp_vertex_attributes.into_iter()) - .zip(comp_face_attributes.into_iter()) - .zip(comp_face_vertex_attributes.into_iter()) - .zip(comp_face_edge_attributes.into_iter()) - .zip(comp_attribute_value_caches.into_iter()) - .map(|(((((((vp, i), o), va), fa), fva), fea), avc)| PolyMesh { - vertex_positions: vp.into(), - indices: i, - offsets: o, - vertex_attributes: va, - face_attributes: fa, - face_vertex_attributes: fva, - face_edge_attributes: fea, - attribute_value_cache: avc, - }) - .collect() - } -} - -// TODO: Generalize split_vertices_by_attrib between the meshes. -// This will involve converging on how to represent/access indices for rewiring meshes -// through a trait. - -impl TriMesh { - /// Split vertices by a given face-vertex attribute. - /// - /// If a pair of face-vertices have different values for the same vertex, then they will be - /// split into distinct vertices. New vertex positions are appended at the end of the vertex - /// position array. - /// - /// If the given attribute doesn't exist, then nothing is changed. - pub fn split_vertices_by_attrib(&mut self, attrib_name: &str) { - // For each vertex, topo contains a set of face-vertex indices. - let (fv_indices, fv_offsets) = self.reverse_source_topo(); - - // This function doesn't affect the number of faces or face-vertex topology. - let TriMesh { - vertex_positions, - indices, - vertex_attributes, - face_vertex_attributes, - // Other attributes remain unchanged. - .. - } = self; - - if let Some(attrib) = face_vertex_attributes.get(attrib_name) { - let attrib_values = attrib.data_slice(); - - // The partitioning of unique values in the neighbourhood of one vertex. - let mut local_partition = Vec::new(); - let mut unique_values = Vec::new(); - - // Remember which vertices were newly created so we can transfer vertex attributes. - let mut new_vertices = Vec::new(); - - for vtx_idx in 0..vertex_positions.len() { - local_partition.clear(); - unique_values.clear(); - - for face_vertex in - (fv_offsets[vtx_idx]..fv_offsets[vtx_idx + 1]).map(|i| fv_indices[i]) - { - let val = attrib_values.get(face_vertex); - if let Some(idx) = unique_values.iter().position(|uv| uv == &val) { - local_partition.push((idx, face_vertex)); - } else { - local_partition.push((unique_values.len(), face_vertex)); - unique_values.push(val); - } - } - - local_partition.sort_by_key(|a| a.0); - let mut partition_iter = local_partition.iter(); - if let Some(mut prev) = partition_iter.next() { - // First element will have a unique vertex by definition. - for next in partition_iter { - if next.0 != prev.0 { - // Found a different face-vertex attribute. Split the vertex. - // Rewire appropriate vertex index to the new vertex. - let pos = vertex_positions[vtx_idx]; - indices[next.1 / 3][next.1 % 3] = vertex_positions.len(); - vertex_positions.as_mut_vec().push(pos); - new_vertices.push(vtx_idx); - prev = next; - } else { - // Same bucket but new vertices may have been created, so we must still - // rewire to the last newly created vertex. - indices[next.1 / 3][next.1 % 3] = indices[prev.1 / 3][prev.1 % 3]; - } - } - } - } - - // Duplicate vertex attributes for newly created vertices. - for (_, attrib) in vertex_attributes.iter_mut() { - let num = attrib.len(); - attrib.extend_by(new_vertices.len()); - - // Split the extended attribute into original byte slice and and newly extended - // uninitialized slice. - let mut data_slice = attrib.data_mut_slice(); - let (old, mut new) = data_slice.split_at(num); - for (&vtx_idx, mut new_val) in new_vertices.iter().zip(new.iter()) { - // Initialize the extended part. - //let bytes = &old[vtx_idx * element_size..(vtx_idx + 1) * element_size]; - //new[i * element_size..(i + 1) * element_size].copy_from_slice(bytes); - new_val.clone_from_other(old.get(vtx_idx)).unwrap(); - } - } - } - } -} - -impl PolyMesh { - /// Split vertices by a given face-vertex attribute. - /// - /// If a pair of face-vertices have different values for the same vertex, then they will be - /// split into distinct vertices. New vertex positions are appended at the end of the vertex - /// position array. - /// - /// If the given attribute doesn't exist, then nothing is changed. - pub fn split_vertices_by_attrib( - &mut self, - attrib: &str, - ) { - // For each vertex, topo contains a set of face-vertex indices. - let (fv_indices, fv_offsets) = self.reverse_source_topo(); - - // This function doesn't affect the number of faces or face-vertex topology. - let PolyMesh { - vertex_positions, - indices, - vertex_attributes, - face_vertex_attributes, - // Other attributes remain unchanged. - .. - } = self; - - if let Some(attrib) = face_vertex_attributes - .get(attrib) - .and_then(|a| a.as_slice::().ok()) - { - // The partitioning of unique values in the neighbourhood of one vertex. - let mut local_partition = Vec::new(); - - // Remember which vertices were newly created so we can transfer vertex attributes. - let mut new_vertices = Vec::new(); - - for vtx_idx in 0..vertex_positions.len() { - local_partition.clear(); - for face_vertex in - (fv_offsets[vtx_idx]..fv_offsets[vtx_idx + 1]).map(|i| fv_indices[i]) - { - local_partition.push((face_vertex, &attrib[face_vertex])); - } - local_partition - .sort_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Less)); - let mut partition_iter = local_partition.iter(); - if let Some(mut prev) = partition_iter.next() { - // First element will have a unique vertex by definition. - for next in partition_iter { - if next.1 != prev.1 { - // Found a different face-vertex attribute. Split the vertex. - // Rewire appropriate vertex index to the new vertex. - let pos = vertex_positions[vtx_idx]; - indices[next.0] = vertex_positions.len(); - vertex_positions.as_mut_vec().push(pos); - new_vertices.push(vtx_idx); - prev = next; - } else { - // Same bucket but new vertices may have been created, so we must still - // rewire to the last newly created vertex. - indices[next.0] = indices[prev.0]; - } - } - } - } - - // Duplicate vertex attributes for newly created vertices. - for (_, attrib) in vertex_attributes.iter_mut() { - let num = attrib.len(); - attrib.extend_by(new_vertices.len()); - - // Split the extended attribute into original byte slice and newly extended - // uninitialized slice. - let mut data_slice = attrib.data_mut_slice(); - let (old, mut new) = data_slice.split_at(num); - - for (&vtx_idx, mut new_val) in new_vertices.iter().zip(new.iter()) { - // Initialize the extended part. - //let bytes = &old[vtx_idx * element_size..(vtx_idx + 1) * element_size]; - //new[i * element_size..(i + 1) * element_size].copy_from_slice(bytes); - new_val.clone_from_other(old.get(vtx_idx)).unwrap(); - } - } - } - } -} - -impl SplitIntoConnectedComponents for TriMesh { - fn split_into_connected_components(self) -> Vec { - // First we partition the vertices. - let (vertex_connectivity, num_components) = - Connectivity::::connectivity(&self); - - // Fast path, when everything is connected. - if num_components == 1 { - return vec![self]; - } - - // Record where the new vertices end up (just the index within their respective - // components). The component ids themselves are recorded separately. - let mut new_vertex_indices = vec![Index::INVALID; self.vertex_positions.len()]; - - // Transfer vertex positions - let mut comp_vertex_positions = vec![Vec::new(); num_components]; - for (vidx, &comp_id) in vertex_connectivity.iter().enumerate() { - new_vertex_indices[vidx] = comp_vertex_positions[comp_id].len().into(); - comp_vertex_positions[comp_id].push(self.vertex_positions[vidx]); - } - - // Validate that all vertices have been properly mapped. - debug_assert!(new_vertex_indices.iter().all(|&idx| idx.is_valid())); - let new_vertex_indices = crate::index_vec_into_usize(new_vertex_indices); - - // Record face connectivity. Note that if faces have vertices on different components, - // they will be ignored in the output and their connectivity will be "invalid". - let mut face_connectivity = vec![Index::INVALID; self.num_faces()]; - - // Transfer faces - let mut comp_vertex_indices = vec![Vec::new(); num_components]; - for (face, face_comp_id) in self.face_iter().zip(face_connectivity.iter_mut()) { - let comp_id = vertex_connectivity[face[0]]; - if face.iter().all(|&i| vertex_connectivity[i] == comp_id) { - let new_face = [ - new_vertex_indices[face[0]], - new_vertex_indices[face[1]], - new_vertex_indices[face[2]], - ]; - comp_vertex_indices[comp_id].push(new_face); - *face_comp_id = Index::from(comp_id); - } - } - - // Initialize attribute value caches for indirect attributes. - let mut comp_attribute_value_caches = - vec![AttribValueCache::with_hasher(Default::default()); num_components]; - - // Transfer vertex attributes - let comp_vertex_attributes = split_attributes( - &self.vertex_attributes, - num_components, - vertex_connectivity.iter().cloned(), - &mut comp_attribute_value_caches, - ); - - // Transfer face attributes - let comp_face_attributes = split_attributes( - &self.face_attributes, - num_components, - face_connectivity.iter().cloned(), - &mut comp_attribute_value_caches, - ); - - // Transfer face vertex attributes - let comp_face_vertex_attributes = split_attributes( - &self.face_vertex_attributes, - num_components, - face_connectivity - .iter() - .flat_map(|f| std::iter::repeat(f).take(3).cloned()), - &mut comp_attribute_value_caches, - ); - - // Transfer face edge attributes - let comp_face_edge_attributes = split_attributes( - &self.face_edge_attributes, - num_components, - face_connectivity - .iter() - .flat_map(|f| std::iter::repeat(f).take(3).cloned()), - &mut comp_attribute_value_caches, - ); - - // Generate a Vec of meshes. - comp_vertex_positions - .into_iter() - .zip(comp_vertex_indices.into_iter()) - .zip(comp_vertex_attributes.into_iter()) - .zip(comp_face_attributes.into_iter()) - .zip(comp_face_vertex_attributes.into_iter()) - .zip(comp_face_edge_attributes.into_iter()) - .map(|(((((vp, i), va), fa), fva), fea)| TriMesh { - vertex_positions: vp.into(), - indices: i.into(), - vertex_attributes: va, - face_attributes: fa, - face_vertex_attributes: fva, - face_edge_attributes: fea, - }) - .collect() - } -} - #[cfg(test)] mod tests { use super::*; - use crate::algo::test_utils::*; use crate::mesh::{TetMeshExt, TriMesh}; #[test] @@ -984,537 +294,4 @@ mod tests { (vec![0, 0, 0, 0, 1, 1, 1], 2) ); } - - fn build_tetmesh_sample() -> (TetMeshExt, TetMeshExt, TetMeshExt) { - let verts = vec![ - [0.0, 0.0, 0.0], - [0.0, 0.0, 1.0], - [0.0, 1.0, 0.0], - [0.0, 1.0, 1.0], - [1.0, 0.0, 0.0], - [1.0, 0.0, 1.0], - [1.0, 1.0, 0.0], - [1.0, 1.0, 1.0], - [0.5, 0.0, 0.5], - ]; - - // One connected component consisting of two tets connected at a face, and another - // consisting of a single tet. - let indices = vec![[7, 6, 2, 4], [5, 7, 2, 4], [0, 1, 3, 8]]; - - let tetmesh = TetMeshExt::new(verts, indices); - let comp1 = TetMeshExt::new( - vec![ - [0.0, 0.0, 0.0], - [0.0, 0.0, 1.0], - [0.0, 1.0, 1.0], - [0.5, 0.0, 0.5], - ], - vec![[0, 1, 2, 3]], - ); - let comp2 = TetMeshExt::new( - vec![ - [0.0, 1.0, 0.0], - [1.0, 0.0, 0.0], - [1.0, 0.0, 1.0], - [1.0, 1.0, 0.0], - [1.0, 1.0, 1.0], - ], - vec![[4, 3, 0, 1], [2, 4, 0, 1]], - ); - (tetmesh, comp1, comp2) - } - - #[test] - fn tetmesh_split() { - let (tetmesh, comp1, comp2) = build_tetmesh_sample(); - - // First lets verify the vertex partitioning. - assert_eq!(tetmesh.connectivity(), (vec![0, 0, 1, 0, 1, 1, 1, 1, 0], 2)); - - let res = tetmesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn tetmesh_split_with_vertex_attributes() { - let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); - tetmesh - .add_attrib_data::("v", (0..tetmesh.num_vertices()).collect()) - .unwrap(); - comp1 - .add_attrib_data::("v", vec![0, 1, 3, 8]) - .unwrap(); - comp2 - .add_attrib_data::("v", vec![2, 4, 5, 6, 7]) - .unwrap(); - let res = tetmesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn tetmesh_split_with_cell_attributes() { - let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); - tetmesh - .add_attrib_data::("c", (0..tetmesh.num_cells()).collect()) - .unwrap(); - comp1 - .add_attrib_data::("c", vec![2]) - .unwrap(); - comp2 - .add_attrib_data::("c", vec![0, 1]) - .unwrap(); - let res = tetmesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn tetmesh_split_with_cell_vertex_attributes() { - let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); - tetmesh - .add_attrib_data::("cv", (0..tetmesh.num_cells() * 4).collect()) - .unwrap(); - - comp1 - .add_attrib_data::("cv", vec![8, 9, 10, 11]) - .unwrap(); - comp2 - .add_attrib_data::("cv", vec![0, 1, 2, 3, 4, 5, 6, 7]) - .unwrap(); - let res = tetmesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn tetmesh_split_with_cell_face_attributes() { - let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); - tetmesh - .add_attrib_data::("cf", (0..tetmesh.num_cells() * 4).collect()) - .unwrap(); - - comp1 - .add_attrib_data::("cf", vec![8, 9, 10, 11]) - .unwrap(); - comp2 - .add_attrib_data::("cf", vec![0, 1, 2, 3, 4, 5, 6, 7]) - .unwrap(); - let res = tetmesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn tetmesh_split_with_vertex_cell_attributes() { - let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); - tetmesh - .add_attrib_data::("vc", (0..tetmesh.num_cells() * 4).collect()) - .unwrap(); - - comp1 - .add_attrib_data::("vc", vec![0, 1, 4, 11]) - .unwrap(); - comp2 - .add_attrib_data::("vc", vec![2, 3, 5, 6, 7, 8, 9, 10]) - .unwrap(); - let res = tetmesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn tetmesh_split_with_all_attributes() { - let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); - tetmesh - .add_attrib_data::("v", (0..tetmesh.num_vertices()).collect()) - .unwrap(); - tetmesh - .add_attrib_data::("c", (0..tetmesh.num_cells()).collect()) - .unwrap(); - tetmesh - .add_attrib_data::("cv", (0..tetmesh.num_cells() * 4).collect()) - .unwrap(); - tetmesh - .add_attrib_data::("cf", (0..tetmesh.num_cells() * 4).collect()) - .unwrap(); - tetmesh - .add_attrib_data::("vc", (0..tetmesh.num_cells() * 4).collect()) - .unwrap(); - comp1 - .add_attrib_data::("v", vec![0, 1, 3, 8]) - .unwrap(); - comp1 - .add_attrib_data::("c", vec![2]) - .unwrap(); - comp1 - .add_attrib_data::("cv", vec![8, 9, 10, 11]) - .unwrap(); - comp1 - .add_attrib_data::("cf", vec![8, 9, 10, 11]) - .unwrap(); - comp1 - .add_attrib_data::("vc", vec![0, 1, 4, 11]) - .unwrap(); - - comp2 - .add_attrib_data::("v", vec![2, 4, 5, 6, 7]) - .unwrap(); - comp2 - .add_attrib_data::("c", vec![0, 1]) - .unwrap(); - comp2 - .add_attrib_data::("cv", vec![0, 1, 2, 3, 4, 5, 6, 7]) - .unwrap(); - comp2 - .add_attrib_data::("cf", vec![0, 1, 2, 3, 4, 5, 6, 7]) - .unwrap(); - comp2 - .add_attrib_data::("vc", vec![2, 3, 5, 6, 7, 8, 9, 10]) - .unwrap(); - let res = tetmesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn polymesh_split() { - let (mesh, comp1, comp2) = build_polymesh_sample(); - - // First lets verify the vertex partitioning. - assert_eq!( - mesh.vertex_connectivity(), - (vec![0, 0, 0, 0, 1, 1, 1, 1], 2) - ); - - let res = mesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn polymesh_split_with_attributes() { - let mut sample = build_polymesh_sample(); - add_attribs_to_polymeshes(&mut sample); - let (mesh, comp1, comp2) = sample; - let res = mesh.split_into_connected_components(); - assert_eq!(res, vec![comp1, comp2]); - } - - #[test] - fn polymesh_split_vertices_by_attrib() { - let verts = vec![ - [0.0, 0.0, 0.0], - [0.0, 0.0, 1.0], - [0.0, 1.0, 0.0], - [0.0, 1.0, 1.0], - [1.0, 0.0, 0.0], - [1.0, 0.0, 1.0], - [1.0, 1.0, 0.0], - [1.0, 1.0, 1.0], - ]; - - // Two triangles connected at an edge, a quad, and two triangles connecting these - // inbetweeen. - let indices = vec![ - 3, 0, 1, 2, 3, 2, 1, 3, 4, 4, 5, 7, 6, 3, 0, 1, 4, 3, 1, 5, 4, - ]; - - let mut polymesh = PolyMesh::new(verts, &indices); - - // Add an arbitrary vertex attribute - polymesh - .add_attrib_data::("v", (0..polymesh.num_vertices()).collect()) - .unwrap(); - - polymesh - .add_attrib_data::( - "no_split", - vec![0, 1, 2, 2, 1, 3, 4, 5, 7, 6, 0, 1, 4, 1, 5, 4], - ) - .unwrap(); - - let mut no_split = polymesh.clone(); - no_split.split_vertices_by_attrib::("no_split"); - assert_eq!(no_split, polymesh); - - polymesh - .add_attrib_data::( - "vertex1_split", - vec![0, 10, 2, 2, 11, 3, 4, 5, 7, 6, 0, 12, 4, 13, 5, 4], - ) - .unwrap(); - - let mut vertex1_split = polymesh.clone(); - vertex1_split.split_vertices_by_attrib::("vertex1_split"); - assert_eq!(vertex1_split.num_vertices(), polymesh.num_vertices() + 3); - assert_eq!( - vertex1_split.num_face_vertices(), - polymesh.num_face_vertices() - ); - assert_eq!( - vertex1_split.attrib::("vertex1_split"), - polymesh.attrib::("vertex1_split") - ); - assert_eq!( - vertex1_split.attrib_as_slice::("v"), - Ok(&[0, 1, 2, 3, 4, 5, 6, 7, 1, 1, 1][..]) - ); - - polymesh - .add_attrib_data::( - "full_split", - (0..polymesh.num_face_vertices()).collect(), - ) - .unwrap(); - - let mut full_split = polymesh.clone(); - full_split.split_vertices_by_attrib::("full_split"); - assert_eq!(full_split.num_vertices(), polymesh.num_face_vertices()); - assert_eq!(full_split.num_face_vertices(), polymesh.num_face_vertices()); - assert_eq!( - full_split.attrib_as_slice::("v"), - Ok(&[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 1, 1, 2, 4, 4, 5][..]) - ); - } - - #[test] - fn trimesh_split_vertices_by_attrib() { - let verts = vec![ - [0.0, 0.0, 0.0], - [0.0, 0.0, 1.0], - [0.0, 1.0, 0.0], - [0.0, 1.0, 1.0], - [1.0, 0.0, 0.0], - [1.0, 0.0, 1.0], - [1.0, 1.0, 0.0], - [1.0, 1.0, 1.0], - ]; - - let indices = vec![ - [0, 1, 2], - [2, 1, 3], - [4, 5, 6], - [6, 5, 7], - [0, 1, 4], - [1, 5, 4], - ]; - - let mut mesh = TriMesh::new(verts, indices); - - // Add an arbitrary vertex attribute - mesh.add_attrib_data::("v", (0..mesh.num_vertices()).collect()) - .unwrap(); - - mesh.add_attrib_data::( - "no_split", - vec![0, 1, 2, 2, 1, 3, 4, 5, 6, 6, 5, 7, 0, 1, 4, 1, 5, 4], - ) - .unwrap(); - - let mut no_split = mesh.clone(); - no_split.split_vertices_by_attrib("no_split"); - assert_eq!(no_split, mesh); - - mesh.add_attrib_data::( - "vertex1_split", - vec![ - 0.0f32, - 10.0 / 3.0, - 2.0, - 2.0, - 11.0, - 3.0, - 4.0, - 5.0, - 6.0 / 4.0, - 6.0 / 4.0, - 5.0, - 7.0, - 0.0, - 12.0, - 4.0, - 13.0, - 5.0, - 4.0, - ], - ) - .unwrap(); - - let mut vertex1_split = mesh.clone(); - vertex1_split.split_vertices_by_attrib("vertex1_split"); - assert_eq!(vertex1_split.num_vertices(), mesh.num_vertices() + 3); - assert_eq!(vertex1_split.num_face_vertices(), mesh.num_face_vertices()); - assert_eq!( - vertex1_split.attrib::("vertex1_split"), - mesh.attrib::("vertex1_split") - ); - assert_eq!( - vertex1_split.attrib_as_slice::("v"), - Ok(&[0, 1, 2, 3, 4, 5, 6, 7, 1, 1, 1][..]) - ); - - mesh.add_attrib_data::( - "full_split", - (0..mesh.num_face_vertices()).collect(), - ) - .unwrap(); - - let mut full_split = mesh.clone(); - full_split.split_vertices_by_attrib("full_split"); - assert_eq!(full_split.num_vertices(), mesh.num_face_vertices()); - assert_eq!(full_split.num_face_vertices(), mesh.num_face_vertices()); - assert_eq!( - full_split.attrib_as_slice::("v"), - Ok(&[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 1, 1, 2, 4, 4, 5, 5, 6][..]) - ); - } - - /// This is a more complex regression test for splitting vertices. - #[test] - fn trimesh_split_vertices_by_attrib_and_promote_complex() { - let verts = vec![ - [-0.520833, -0.5, 0.5], - [-0.520833, 0.5, 0.5], - [-0.520833, -0.5, -0.5], - [-0.520833, 0.5, -0.5], - [0.520833, -0.5, 0.5], - [0.520833, 0.5, 0.5], - [0.520833, -0.5, -0.5], - [0.520833, 0.5, -0.5], - ]; - - #[rustfmt::skip] - let indices = vec![ - [0, 1, 3], - [4, 5, 7], - [6, 7, 2], - [5, 4, 1], - [5, 0, 2], - [1, 4, 6], - [6, 3, 1], - [2, 7, 5], - [1, 0, 5], - [2, 3, 6], - [7, 6, 4], - [3, 2, 0], - ]; - - let mut mesh = TriMesh::new(verts, indices); - - // We split the vertices according to the following attribute and then test that - // there are no more collisions. - // This tests both functions: split_vertices_by_attrib and attrib_promote. - - mesh.add_attrib_data::<[f32; 2], FaceVertexIndex>( - "uv", - vec![ - [0.630043, 0.00107052], - [0.370129, 0.00107052], - [0.370129, 0.250588], - [0.370129, 0.749623], - [0.630043, 0.749623], - [0.630043, 0.500105], - [0.370129, 0.500105], - [0.630043, 0.500105], - [0.630043, 0.250588], - [0.630043, 0.749623], - [0.370129, 0.749623], - [0.370129, 0.99914], - [0.879561, 0.500105], - [0.879561, 0.250588], - [0.630043, 0.250588], - [0.120612, 0.250588], - [0.120612, 0.500105], - [0.370129, 0.500105], - [0.370129, 0.500105], - [0.370129, 0.250588], - [0.120612, 0.250588], - [0.630043, 0.250588], - [0.630043, 0.500105], - [0.879561, 0.500105], - [0.370129, 0.99914], - [0.630043, 0.99914], - [0.630043, 0.749623], - [0.630043, 0.250588], - [0.370129, 0.250588], - [0.370129, 0.500105], - [0.630043, 0.500105], - [0.370129, 0.500105], - [0.370129, 0.749623], - [0.370129, 0.250588], - [0.630043, 0.250588], - [0.630043, 0.00107052], - ], - ) - .unwrap(); - - mesh.split_vertices_by_attrib("uv"); - - mesh.attrib_promote::<[f32; 2], _>("uv", |a, b| assert_eq!(a, b)) - .unwrap(); - } - - /// The same test for polymeshes. - #[test] - fn polymesh_split_vertices_by_attrib_and_promote_complex() { - let verts = vec![ - [-0.520833, -0.5, 0.5], - [-0.520833, 0.5, 0.5], - [-0.520833, -0.5, -0.5], - [-0.520833, 0.5, -0.5], - [0.520833, -0.5, 0.5], - [0.520833, 0.5, 0.5], - [0.520833, -0.5, -0.5], - [0.520833, 0.5, -0.5], - ]; - - #[rustfmt::skip] - let indices = vec![ - [0, 1, 3], - [2, 4, 5], - [7, 6, 6], - [7, 2, 3], - [5, 4, 1], - [0, 5, 0], - [2, 7, 1], - [4, 6, 3], - ]; - - let mut mesh = TriMesh::new(verts, indices); - - // We split the vertices according to the following attribute and then test that - // there are no more collisions. - // This tests both functions: split_vertices_by_attrib and attrib_promote. - - mesh.add_attrib_data::<[f32; 2], FaceVertexIndex>( - "uv", - vec![ - [0.630043, 0.00107052], - [0.370129, 0.00107052], - [0.370129, 0.250588], - [0.630043, 0.250588], - [0.370129, 0.749623], - [0.630043, 0.749623], - [0.630043, 0.500105], - [0.370129, 0.500105], - [0.370129, 0.500105], - [0.630043, 0.500105], - [0.630043, 0.250588], - [0.370129, 0.250588], - [0.630043, 0.749623], - [0.370129, 0.749623], - [0.370129, 0.99914], - [0.630043, 0.99914], - [0.879561, 0.500105], - [0.879561, 0.250588], - [0.630043, 0.250588], - [0.630043, 0.500105], - [0.120612, 0.250588], - [0.120612, 0.500105], - [0.370129, 0.500105], - [0.370129, 0.250588], - ], - ) - .unwrap(); - - mesh.split_vertices_by_attrib("uv"); - - mesh.attrib_promote::<[f32; 2], _>("uv", |a, b| assert_eq!(a, b)) - .unwrap(); - } } diff --git a/src/algo/merge.rs b/src/algo/merge.rs index bdc742f..7820aa4 100644 --- a/src/algo/merge.rs +++ b/src/algo/merge.rs @@ -3,6 +3,7 @@ * Implementations for common mesh types are also included here. */ +use crate::mesh::attrib::AttribValueCache; use crate::mesh::attrib::*; use crate::mesh::polymesh::PolyMesh; use crate::mesh::tetmesh::{TetMesh, TetMeshExt}; @@ -211,6 +212,7 @@ impl Merge for TetMesh { cell_attributes: other_cell_attributes, cell_vertex_attributes: other_cell_vertex_attributes, cell_face_attributes: other_cell_face_attributes, + attribute_value_cache, } = other; self.vertex_positions @@ -247,6 +249,9 @@ impl Merge for TetMesh { other_cell_face_attributes, other_num_cell_faces, ); + for value in attribute_value_cache.into_iter() { + self.attribute_value_cache.insert(value); + } self } } @@ -284,6 +289,7 @@ impl TetMesh { let mut cell_vertex_attributes = AttribDict::new(); let mut cell_face_attributes = AttribDict::new(); let mut num_vertices = 0; + let mut attribute_value_cache = AttribValueCache::default(); for mesh in mesh_iter { let src = mesh.attrib_as_slice::(source_attrib)?; @@ -326,6 +332,10 @@ impl TetMesh { mesh.num_cell_faces(), ); + for value in mesh.attribute_value_cache.iter() { + attribute_value_cache.insert(value.clone()); + } + // Extend the indices AFTER attributes are transfered since // `merge_attribute_dicts` expects num_elements to be the number before // the merge. @@ -342,6 +352,7 @@ impl TetMesh { cell_attributes, cell_vertex_attributes, cell_face_attributes, + attribute_value_cache, }) } } diff --git a/src/algo/mod.rs b/src/algo/mod.rs index daedeaa..9b88a8d 100644 --- a/src/algo/mod.rs +++ b/src/algo/mod.rs @@ -2,10 +2,14 @@ pub mod connectivity; pub mod merge; pub mod normals; +pub mod partition; +pub mod split; pub use self::connectivity::*; pub use self::merge::*; pub use self::normals::*; +pub use self::partition::*; +pub use self::split::*; /// Useful utilities for testing algorithms in this module. #[cfg(test)] diff --git a/src/algo/partition.rs b/src/algo/partition.rs new file mode 100644 index 0000000..a4987ff --- /dev/null +++ b/src/algo/partition.rs @@ -0,0 +1,264 @@ +/*! + * This module defines routines for dealing with meshes composed of multiple connected components. + * # Implementation Notes + * + * Currently due to the limitations of the Rust language, it is not straightforward (or impossible) + * to generalize the partition function over types that don't already implement `Eq` and `Hash`, if + * we wanted to use a `HashMap`. That is we can't generalize the implementation to work with + * floats or types that contain floats (e.g. arrays and tuples. + * + * For this reason there exist two variations of partition functions that work with different + * assumptions on types. + */ + +use crate::mesh::attrib::*; +use hashbrown::HashMap; +use std::hash::Hash; + +/* + * Partition function implementations + */ + +/// Partition a given slice by unique values. +/// +/// It may be more efficient to implement this function by hand, especially when the number of +/// partitions is known ahead of time. +pub fn partition<'a, T: Hash + Eq + 'a>(iter: impl Iterator) -> (Vec, usize) { + let mut partition = Vec::new(); + let mut map: HashMap<&'a T, usize> = HashMap::default(); + + let mut part_counter = 0; + for val in iter { + let part = map.entry(val.into()).or_insert_with(|| { + let part = part_counter; + part_counter += 1; + part + }); + partition.push(*part); + } + (partition, part_counter) +} + +/// Partition a given slice by unique values. +/// +/// This version of `partition` is useful when `T` doesn't implement `Eq` and `Hash` or +/// `PartitionHashEq` but has `PartialOrd`. For the majority of use cases it is better to use +/// `partition`. +pub fn partition_slice(slice: &[T]) -> (Vec, usize) { + use std::cmp::Ordering; + + // Sort attrib via an explicit permutation. + // The permutation then acts as a map from sorted back to unsorted attribs. + let mut permutation: Vec<_> = (0..slice.len()).collect(); + + // SAFETY: permutation indices are guaranteed to be below slice.len(); + permutation.sort_by(|&i, &j| unsafe { + slice + .get_unchecked(i) + .partial_cmp(slice.get_unchecked(j)) + .unwrap_or(Ordering::Less) + }); + + let mut permutation_iter = permutation + .iter() + .map(|&i| (i, unsafe { slice.get_unchecked(i) })) + .peekable(); + let mut partition = vec![0; slice.len()]; + let mut part_counter = 0; + + while let Some((pi, val)) = permutation_iter.next() { + unsafe { *partition.get_unchecked_mut(pi) = part_counter }; + if permutation_iter.peek().map_or(true, |next| val != next.1) { + part_counter += 1; + } + } + + (partition, part_counter) +} + +pub trait Partition +where + Self: Sized, +{ + /// Returns a partitioning by unique values of the given attribute. + /// + /// The returned vector consists of a ID assigned to each `Src` element identifying which + /// partition it belongs to along with the total number of partitions. + /// + /// The attribute values must have type `T`. + /// + /// If `attrib` doesn't exist at the `Src` topology, the returned vector will consist of all + /// zeros and the number of partitions will be 1. + fn partition_by_attrib>( + &self, + attrib: &str, + ) -> (Vec, usize); + + /// Returns a partitioning by unique values of the given attribute. + /// + /// The returned vector consists of a ID assigned to each `Src` element identifying which + /// partition it belongs to along with the total number of partitions. + /// + /// The attribute values must have type `T`. + /// + /// If `attrib` doesn't exist at the `Src` topology, the returned vector will consist of all + /// zeros and the number of partitions will be 1. + /// + /// This version of `partition_by_attrib` uses sorting to determine unique values instead of a + /// `HashMap`, and therefore only relies on `T` being `PartialOrd` but not `Eq` and `Hash`. + fn partition_by_attrib_by_sort>( + &self, + attrib: &str, + ) -> (Vec, usize); +} + +impl Partition for M +where + Self: Attrib + Sized, +{ + #[inline] + fn partition_by_attrib>( + &self, + attrib: &str, + ) -> (Vec, usize) { + if let Ok(attrib_iter) = self.attrib_iter::(attrib) { + partition(attrib_iter) + } else { + (vec![0; self.attrib_size::()], 1) + } + } + + #[inline] + fn partition_by_attrib_by_sort>( + &self, + attrib: &str, + ) -> (Vec, usize) { + match self.attrib_as_slice::(attrib) { + Ok(attrib) => partition_slice(attrib), + Err(_) => (vec![0; self.attrib_size::()], 1), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mesh::topology::*; + use crate::mesh::{PointCloud, TetMeshExt}; + + #[test] + fn tetmesh_partition_by_attrib() { + // The vertex positions are actually unimportant here. + let verts = vec![[0.0; 3]; 12]; + + // Topology is also unimportant for partitioning by attributes. + let indices = vec![[0, 1, 2, 3], [1, 2, 3, 4], [5, 6, 7, 8], [8, 9, 10, 11]]; + + let mut tetmesh = TetMeshExt::new(verts, indices); + + // Add an attribute that partitions out the first and last 2 vertices, which correspond to a whole + // tet. + tetmesh + .add_attrib_data::( + "attrib", + vec![1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1], + ) + .unwrap(); + + // The resulting mesh must be identical + assert_eq!( + tetmesh.partition_by_attrib::("attrib"), + (vec![0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], 2) + ); + } + + fn partition_data(size: usize, nbins: usize) -> Vec<[usize; 3]> { + use rand::distributions::Uniform; + use rand::prelude::*; + + let seed = [3u8; 32]; + let mut rng = StdRng::from_seed(seed); + + let index_bins = Uniform::from(0..nbins); + + let bins: Vec<[usize; 3]> = (0..nbins) + .map(|_| [rng.gen(), rng.gen(), rng.gen()]) + .collect(); + + (0..size) + .map(|_| bins[index_bins.sample(&mut rng)]) + .collect() + } + + #[test] + fn partition_by_attrib_complex() { + use rand::prelude::*; + let size = 10; + let seed = [3u8; 32]; + let mut rng = StdRng::from_seed(seed); + + // The vertex positions are actually unimportant here. + let verts: Vec<[f64; 3]> = (0..size) + .map(|_| [rng.gen(), rng.gen(), rng.gen()]) + .collect(); + let mut ptcld = PointCloud::new(verts); + + let data = partition_data(size, 10); + ptcld + .add_attrib_data::<_, VertexIndex>("attrib", data) + .unwrap(); + + let (_, num_parts1) = ptcld.partition_by_attrib::<[usize; 3], VertexIndex>("attrib"); + let (_, num_parts2) = + ptcld.partition_by_attrib_by_sort::<[usize; 3], VertexIndex>("attrib"); + assert_eq!(num_parts1, num_parts2); + } + + #[test] + fn partition_by_attrib_by_hash() { + use rand::prelude::*; + let size = 100_000; + let seed = [3u8; 32]; + let mut rng = StdRng::from_seed(seed); + + // The vertex positions are actually unimportant here. + let verts: Vec<[f64; 3]> = (0..size) + .map(|_| [rng.gen(), rng.gen(), rng.gen()]) + .collect(); + let mut ptcld = PointCloud::new(verts); + + let data = partition_data(size, 100); + ptcld + .add_attrib_data::<_, VertexIndex>("attrib", data) + .unwrap(); + + let now = std::time::Instant::now(); + let (_, num_parts) = ptcld.partition_by_attrib::<[usize; 3], VertexIndex>("attrib"); + eprintln!("hash time = {}", now.elapsed().as_millis()); + eprintln!("{}", num_parts); + } + + #[test] + fn partition_by_attrib_by_sort() { + use rand::prelude::*; + let size = 100_000; + let seed = [3u8; 32]; + let mut rng = StdRng::from_seed(seed); + + // The vertex positions are actually unimportant here. + let verts: Vec<[f64; 3]> = (0..size) + .map(|_| [rng.gen(), rng.gen(), rng.gen()]) + .collect(); + let mut ptcld = PointCloud::new(verts); + + let data = partition_data(size, 100); + ptcld + .add_attrib_data::<_, VertexIndex>("attrib", data) + .unwrap(); + + let now = std::time::Instant::now(); + let (_, num_parts) = ptcld.partition_by_attrib_by_sort::<[usize; 3], VertexIndex>("attrib"); + eprintln!("sort time = {}", now.elapsed().as_millis()); + eprintln!("{}", num_parts); + } +} diff --git a/src/algo/split.rs b/src/algo/split.rs new file mode 100644 index 0000000..cf19be0 --- /dev/null +++ b/src/algo/split.rs @@ -0,0 +1,1294 @@ +/*! + * Traits and algorithms for splitting generic meshes. + */ + +use super::connectivity::*; +use crate::index::*; +use crate::mesh::attrib::AttribValueCache; +use crate::mesh::topology::*; +use crate::mesh::{attrib::*, PolyMesh, TetMesh, TetMeshExt, TriMesh}; +use crate::Real; + +/// Helper to split attributes based on the given connectivity info. +fn split_attributes>>( + src_dict: &AttribDict, + num_components: usize, + connectivity: impl Iterator + Clone, + caches: &mut [AttribValueCache], +) -> Vec> { + split_attributes_with(src_dict, num_components, |attrib, num_components| { + let mut new_attribs = vec![attrib.duplicate_empty(); num_components]; + // Get an iterator of typeless values for this attribute. + match &attrib.data { + AttributeData::Direct(d) => { + connectivity + .clone() + .zip(d.data_ref().iter()) + .filter_map(|(comp_id, val_ref)| { + comp_id.into().map(|comp_id| (comp_id, val_ref)) + }) + .for_each(|(valid_idx, val_ref)| { + new_attribs[valid_idx] + .data + .direct_data_mut() + .unwrap() + .push_cloned(val_ref) + .unwrap(); + }); + } + AttributeData::Indirect(i) => { + for (valid_comp_id, val_ref) in + connectivity.clone().zip(i.data_ref().iter()).filter_map( + |(comp_id, val_ref)| comp_id.into().map(|comp_id| (comp_id, val_ref)), + ) + { + new_attribs[valid_comp_id] + .data + .indirect_data_mut() + .unwrap() + .push_cloned(val_ref, &mut caches[valid_comp_id]) + .unwrap(); + } + } + } + + new_attribs + }) +} + +/// Helper to split attributes using a given closure to transfer data from each source attribute to +/// the destination collection of individual empty component attributes. +fn split_attributes_with( + src_dict: &AttribDict, + num_components: usize, + mut split_attribute: impl FnMut(&Attribute, usize) -> Vec>, +) -> Vec> { + let mut comp_attributes = vec![AttribDict::new(); num_components]; + for (name, attrib) in src_dict.iter() { + // Split the given attribute into one attribute per component. + let new_attribs = split_attribute(&attrib, num_components); + assert_eq!(new_attribs.len(), num_components); + + // Save the new attributes to their corresponding attribute dictionaries. + for (attrib_dict, new_attrib) in comp_attributes.iter_mut().zip(new_attribs.into_iter()) { + attrib_dict.insert(name.to_string(), new_attrib); + } + } + comp_attributes +} + +/// Split the object at the `Src` topology (e.g. vertices) into multiple objects of the same type. +pub trait Split +where + Self: Sized, +{ + fn split(self, partition: &[usize], num_parts: usize) -> Vec; +} + +// TODO: Refactor the below implementations by extracting common patterns. This can also be +// combined with implementations conversions between meshes. + +impl Split for TetMesh { + #[inline] + fn split(self, partition: &[usize], num_parts: usize) -> Vec { + self.split_by_vertex_partition(partition, num_parts).0 + } +} + +impl TetMesh { + /// Split the mesh by the given partition. + /// + /// Returns a vector of tetmeshes and the mapping from old cell index to new cell index. + fn split_by_vertex_partition( + self, + vertex_partition: &[usize], + num_parts: usize, + ) -> (Vec, Vec) { + // Fast path, when everything is connected. + if num_parts == 1 { + return (vec![self], vec![]); + } + + // Deconstruct the original mesh. + let TetMesh { + vertex_positions, + indices, + vertex_attributes, + cell_attributes, + cell_vertex_attributes, + cell_face_attributes, + .. + } = self; + + // Record where the new vertices end up (just the index within their respective + // components). The component ids themselves are recorded separately. + let mut new_vertex_indices = vec![Index::INVALID; vertex_positions.len()]; + + // Transfer vertex positions + let mut comp_vertex_positions = vec![Vec::new(); num_parts]; + for (vidx, &comp_id) in vertex_partition.iter().enumerate() { + new_vertex_indices[vidx] = comp_vertex_positions[comp_id].len().into(); + comp_vertex_positions[comp_id].push(vertex_positions[vidx]); + } + + // Validate that all vertices have been properly mapped. + debug_assert!(new_vertex_indices.iter().all(|&idx| idx.is_valid())); + let new_vertex_index_slice: &[usize] = bytemuck::cast_slice(new_vertex_indices.as_slice()); + + // Record cell connectivity. Note that if cells have vertices on different components, + // they will be ignored in the output and their connectivity will be "invalid". + let mut cell_connectivity = vec![Index::INVALID; indices.len()]; + let mut new_cell_indices = vec![Index::INVALID; indices.len()]; + + // Transfer cells + let mut comp_vertex_indices = vec![Vec::new(); num_parts]; + for (cell_idx, &cell) in indices.iter().enumerate() { + let comp_id = vertex_partition[cell[0]]; + if cell.iter().all(|&i| vertex_partition[i] == comp_id) { + let new_cell = [ + new_vertex_index_slice[cell[0]], + new_vertex_index_slice[cell[1]], + new_vertex_index_slice[cell[2]], + new_vertex_index_slice[cell[3]], + ]; + new_cell_indices[cell_idx] = comp_vertex_indices[comp_id].len().into(); + comp_vertex_indices[comp_id].push(new_cell); + cell_connectivity[cell_idx] = Index::from(comp_id); + } + } + + // Initialize attribute value caches for indirect attributes. + let mut comp_attribute_value_caches = vec![AttribValueCache::default(); num_parts]; + + // Transfer vertex attributes + let comp_vertex_attributes = split_attributes( + &vertex_attributes, + num_parts, + vertex_partition.iter().cloned(), + &mut comp_attribute_value_caches, + ); + + // Transfer cell attributes + let comp_cell_attributes = split_attributes( + &cell_attributes, + num_parts, + cell_connectivity.iter().cloned(), + &mut comp_attribute_value_caches, + ); + + // Transfer cell vertex attributes + let comp_cell_vertex_attributes = split_attributes( + &cell_vertex_attributes, + num_parts, + cell_connectivity + .iter() + .flat_map(|c| std::iter::repeat(c).take(4).cloned()), + &mut comp_attribute_value_caches, + ); + + // Transfer cell face attributes + let comp_cell_face_attributes = split_attributes( + &cell_face_attributes, + num_parts, + cell_connectivity + .iter() + .flat_map(|c| std::iter::repeat(c).take(4).cloned()), + &mut comp_attribute_value_caches, + ); + + // Generate a Vec of meshes. + ( + comp_vertex_positions + .into_iter() + .zip(comp_vertex_indices.into_iter()) + .zip(comp_vertex_attributes.into_iter()) + .zip(comp_cell_attributes.into_iter()) + .zip(comp_cell_vertex_attributes.into_iter()) + .zip(comp_cell_face_attributes.into_iter()) + .zip(comp_attribute_value_caches.into_iter()) + .map(|((((((vp, vi), va), ca), cva), cfa), avc)| TetMesh { + vertex_positions: vp.into(), + indices: vi.into(), + vertex_attributes: va, + cell_attributes: ca, + cell_vertex_attributes: cva, + cell_face_attributes: cfa, + attribute_value_cache: avc, + }) + .collect(), + new_cell_indices, + ) + } +} + +impl Split for TetMeshExt { + fn split(self, vertex_partition: &[usize], num_parts: usize) -> Vec { + // Fast path, when everything is connected. + if num_parts == 1 { + return vec![self]; + } + + // Deconstruct the original mesh. + let TetMeshExt { + tetmesh, + cell_offsets, + cell_indices, + vertex_cell_attributes, + .. + } = self; + + let (mut comp_tetmesh, new_cell_indices) = + tetmesh.split_by_vertex_partition(vertex_partition, num_parts); + + // Transfer vertex to cell topology + let mut comp_cell_indices = vec![Vec::new(); num_parts]; + let mut comp_cell_offsets = vec![vec![0]; num_parts]; + for (vidx, &comp_id) in vertex_partition.iter().enumerate() { + let off = cell_offsets[vidx]; + for &cell_idx in &cell_indices[off..cell_offsets[vidx + 1]] { + new_cell_indices[cell_idx] + .if_valid(|new_cidx| comp_cell_indices[comp_id].push(new_cidx)); + } + comp_cell_offsets[comp_id].push(comp_cell_indices[comp_id].len()); + } + + // Transfer vertex-cell attributes + + // A helper closure to map a given attribute value to the corresponding component id if any + // `i` is the index of the original attribute value. + let transfer_comp_id = |vtx_idx: &mut usize, i| -> Option { + // Determine the vertex index here using offsets + let off = cell_offsets[*vtx_idx + 1]; + if i == off { + *vtx_idx += 1; + } + let comp_id = vertex_partition[*vtx_idx]; + let cell_idx = cell_indices[i]; + + // Add value for this vertex to the appropriate component data. + let idx: Index = new_cell_indices[cell_idx]; + idx.map(|_| comp_id).into() + }; + + let comp_vertex_cell_attributes = + split_attributes_with(&vertex_cell_attributes, num_parts, |attrib, num_parts| { + let mut new_attribs = vec![attrib.duplicate_empty(); num_parts]; + + let mut vtx_idx = 0; + + match &attrib.data { + AttributeData::Direct(direct) => { + for (i, val_ref) in direct.data_ref().iter().enumerate() { + if let Some(comp_id) = transfer_comp_id(&mut vtx_idx, i) { + new_attribs[comp_id] + .data + .direct_data_mut() + .unwrap() + .push_cloned(val_ref) + .unwrap(); + } + } + } + AttributeData::Indirect(indirect) => { + for (i, val_ref) in indirect.data_ref().iter().enumerate() { + if let Some(comp_id) = transfer_comp_id(&mut vtx_idx, i) { + new_attribs[comp_id] + .data + .indirect_data_mut() + .unwrap() + .push_cloned( + val_ref, + &mut comp_tetmesh[comp_id].attribute_value_cache, + ) + .unwrap(); + } + } + } + }; + + new_attribs + }); + + // Generate a Vec of meshes. + comp_tetmesh + .into_iter() + .zip(comp_cell_indices.into_iter()) + .zip(comp_cell_offsets.into_iter()) + .zip(comp_vertex_cell_attributes.into_iter()) + .map(|(((tm, ci), co), vca)| TetMeshExt { + tetmesh: tm, + cell_indices: ci, + cell_offsets: co, + vertex_cell_attributes: vca, + }) + .collect() + } +} + +impl Split for PolyMesh { + fn split(self, vertex_partition: &[usize], num_parts: usize) -> Vec { + // Fast path, when everything is connected. + if num_parts == 1 { + return vec![self]; + } + + // Record where the new vertices end up (just the index within their respective + // components). The component ids themselves are recorded separately. + let mut new_vertex_indices = vec![Index::INVALID; self.vertex_positions.len()]; + + // Transfer vertex positions + let mut comp_vertex_positions = vec![Vec::new(); num_parts]; + for (vidx, &comp_id) in vertex_partition.iter().enumerate() { + new_vertex_indices[vidx] = comp_vertex_positions[comp_id].len().into(); + comp_vertex_positions[comp_id].push(self.vertex_positions[vidx]); + } + + // Validate that all vertices have been properly mapped. + debug_assert!(new_vertex_indices.iter().all(|&idx| idx.is_valid())); + let new_vertex_index_slice: &[usize] = bytemuck::cast_slice(new_vertex_indices.as_slice()); + + // Record face connectivity. Note that if faces have vertices on different components, + // they will be ignored in the output and their connectivity will be "invalid". + let mut face_connectivity = vec![Index::INVALID; self.num_faces()]; + + // Transfer faces + let mut comp_indices = vec![Vec::new(); num_parts]; + let mut comp_offsets = vec![vec![0]; num_parts]; + for (face, face_comp_id) in self.face_iter().zip(face_connectivity.iter_mut()) { + let comp_id = vertex_partition[face[0]]; + if face.iter().all(|&i| vertex_partition[i] == comp_id) { + let new_face_vtx_iter = face.iter().map(|&vi| new_vertex_index_slice[vi]); + comp_indices[comp_id].extend(new_face_vtx_iter); + comp_offsets[comp_id].push(comp_indices[comp_id].len()); + *face_comp_id = Index::from(comp_id); + } + } + + // Initialize attribute value caches for indirect attributes. + let mut comp_attribute_value_caches = + vec![AttribValueCache::with_hasher(Default::default()); num_parts]; + + // Transfer vertex attributes + let comp_vertex_attributes = split_attributes( + &self.vertex_attributes, + num_parts, + vertex_partition.iter().cloned(), + &mut comp_attribute_value_caches, + ); + + // Transfer face attributes + let comp_face_attributes = split_attributes( + &self.face_attributes, + num_parts, + face_connectivity.iter().cloned(), + &mut comp_attribute_value_caches, + ); + + // Transfer face vertex attributes + let comp_face_vertex_attributes = split_attributes( + &self.face_vertex_attributes, + num_parts, + face_connectivity.iter().enumerate().flat_map(|(fi, c)| { + std::iter::repeat(c) + .take(self.num_vertices_at_face(fi)) + .cloned() + }), + &mut comp_attribute_value_caches, + ); + + // Transfer face edge attributes + let comp_face_edge_attributes = split_attributes( + &self.face_edge_attributes, + num_parts, + face_connectivity.iter().enumerate().flat_map(|(fi, c)| { + std::iter::repeat(c) + .take(self.num_edges_at_face(fi)) + .cloned() + }), + &mut comp_attribute_value_caches, + ); + + // Generate a Vec of meshes. + comp_vertex_positions + .into_iter() + .zip(comp_indices.into_iter()) + .zip(comp_offsets.into_iter()) + .zip(comp_vertex_attributes.into_iter()) + .zip(comp_face_attributes.into_iter()) + .zip(comp_face_vertex_attributes.into_iter()) + .zip(comp_face_edge_attributes.into_iter()) + .zip(comp_attribute_value_caches.into_iter()) + .map(|(((((((vp, i), o), va), fa), fva), fea), avc)| PolyMesh { + vertex_positions: vp.into(), + indices: i, + offsets: o, + vertex_attributes: va, + face_attributes: fa, + face_vertex_attributes: fva, + face_edge_attributes: fea, + attribute_value_cache: avc, + }) + .collect() + } +} + +pub trait SplitIntoConnectedComponents +where + Src: ElementIndex, + Via: ElementIndex, + Self: Sized, +{ + fn split_into_connected_components(self) -> Vec; +} + +impl SplitIntoConnectedComponents for TetMesh { + fn split_into_connected_components(self) -> Vec { + let tetmesh_ext = TetMeshExt::from(self); + tetmesh_ext + .split_into_connected_components() + .into_iter() + .map(TetMesh::from) + .collect() + } +} + +impl SplitIntoConnectedComponents for TetMeshExt { + fn split_into_connected_components(self) -> Vec { + let (vertex_connectivity, num_components) = self.connectivity(); + self.split(&vertex_connectivity, num_components) + } +} + +impl SplitIntoConnectedComponents for PolyMesh { + fn split_into_connected_components(self) -> Vec { + // First we partition the vertices. + let (vertex_connectivity, num_components) = + Connectivity::::connectivity(&self); + self.split(&vertex_connectivity, num_components) + } +} + +// TODO: Generalize split_vertices_by_face_vertex_attrib between the meshes. +// This will involve converging on how to represent/access indices for rewiring meshes +// through a trait. + +impl TriMesh { + /// Split vertices by a given face-vertex attribute. + /// + /// If a pair of face-vertices have different values for the same vertex, then they will be + /// split into distinct vertices. New vertex positions are appended at the end of the vertex + /// position array. + /// + /// If the given attribute doesn't exist, then nothing is changed. + pub fn split_vertices_by_face_vertex_attrib(&mut self, attrib_name: &str) { + // For each vertex, topo contains a set of face-vertex indices. + let (fv_indices, fv_offsets) = self.reverse_source_topo(); + + // This function doesn't affect the number of faces or face-vertex topology. + let TriMesh { + vertex_positions, + indices, + vertex_attributes, + face_vertex_attributes, + // Other attributes remain unchanged. + .. + } = self; + + if let Some(attrib) = face_vertex_attributes.get(attrib_name) { + let attrib_values = attrib.data_slice(); + + // The partitioning of unique values in the neighbourhood of one vertex. + let mut local_partition = Vec::new(); + let mut unique_values = Vec::new(); + + // Remember which vertices were newly created so we can transfer vertex attributes. + let mut new_vertices = Vec::new(); + + for vtx_idx in 0..vertex_positions.len() { + local_partition.clear(); + unique_values.clear(); + + for face_vertex in + (fv_offsets[vtx_idx]..fv_offsets[vtx_idx + 1]).map(|i| fv_indices[i]) + { + let val = attrib_values.get(face_vertex); + if let Some(idx) = unique_values.iter().position(|uv| uv == &val) { + local_partition.push((idx, face_vertex)); + } else { + local_partition.push((unique_values.len(), face_vertex)); + unique_values.push(val); + } + } + + local_partition.sort_by_key(|a| a.0); + let mut partition_iter = local_partition.iter(); + if let Some(mut prev) = partition_iter.next() { + // First element will have a unique vertex by definition. + for next in partition_iter { + if next.0 != prev.0 { + // Found a different face-vertex attribute. Split the vertex. + // Rewire appropriate vertex index to the new vertex. + let pos = vertex_positions[vtx_idx]; + indices[next.1 / 3][next.1 % 3] = vertex_positions.len(); + vertex_positions.as_mut_vec().push(pos); + new_vertices.push(vtx_idx); + prev = next; + } else { + // Same bucket but new vertices may have been created, so we must still + // rewire to the last newly created vertex. + indices[next.1 / 3][next.1 % 3] = indices[prev.1 / 3][prev.1 % 3]; + } + } + } + } + + // Duplicate vertex attributes for newly created vertices. + for (_, attrib) in vertex_attributes.iter_mut() { + let num = attrib.len(); + attrib.extend_by(new_vertices.len()); + + // Split the extended attribute into original byte slice and and newly extended + // uninitialized slice. + let mut data_slice = attrib.data_mut_slice(); + let (old, mut new) = data_slice.split_at(num); + for (&vtx_idx, mut new_val) in new_vertices.iter().zip(new.iter()) { + // Initialize the extended part. + //let bytes = &old[vtx_idx * element_size..(vtx_idx + 1) * element_size]; + //new[i * element_size..(i + 1) * element_size].copy_from_slice(bytes); + new_val.clone_from_other(old.get(vtx_idx)).unwrap(); + } + } + } + } +} + +impl PolyMesh { + /// Split vertices by a given face-vertex attribute. + /// + /// If a pair of face-vertices have different values for the same vertex, then they will be + /// split into distinct vertices. New vertex positions are appended at the end of the vertex + /// position array. + /// + /// If the given attribute doesn't exist, then nothing is changed. + pub fn split_vertices_by_face_vertex_attrib( + &mut self, + attrib: &str, + ) { + // For each vertex, topo contains a set of face-vertex indices. + let (fv_indices, fv_offsets) = self.reverse_source_topo(); + + // This function doesn't affect the number of faces or face-vertex topology. + let PolyMesh { + vertex_positions, + indices, + vertex_attributes, + face_vertex_attributes, + // Other attributes remain unchanged. + .. + } = self; + + if let Some(attrib) = face_vertex_attributes + .get(attrib) + .and_then(|a| a.as_slice::().ok()) + { + // The partitioning of unique values in the neighbourhood of one vertex. + let mut local_partition = Vec::new(); + + // Remember which vertices were newly created so we can transfer vertex attributes. + let mut new_vertices = Vec::new(); + + for vtx_idx in 0..vertex_positions.len() { + local_partition.clear(); + for face_vertex in + (fv_offsets[vtx_idx]..fv_offsets[vtx_idx + 1]).map(|i| fv_indices[i]) + { + local_partition.push((face_vertex, &attrib[face_vertex])); + } + local_partition + .sort_by(|a, b| a.1.partial_cmp(b.1).unwrap_or(std::cmp::Ordering::Less)); + let mut partition_iter = local_partition.iter(); + if let Some(mut prev) = partition_iter.next() { + // First element will have a unique vertex by definition. + for next in partition_iter { + if next.1 != prev.1 { + // Found a different face-vertex attribute. Split the vertex. + // Rewire appropriate vertex index to the new vertex. + let pos = vertex_positions[vtx_idx]; + indices[next.0] = vertex_positions.len(); + vertex_positions.as_mut_vec().push(pos); + new_vertices.push(vtx_idx); + prev = next; + } else { + // Same bucket but new vertices may have been created, so we must still + // rewire to the last newly created vertex. + indices[next.0] = indices[prev.0]; + } + } + } + } + + // Duplicate vertex attributes for newly created vertices. + for (_, attrib) in vertex_attributes.iter_mut() { + let num = attrib.len(); + attrib.extend_by(new_vertices.len()); + + // Split the extended attribute into original byte slice and newly extended + // uninitialized slice. + let mut data_slice = attrib.data_mut_slice(); + let (old, mut new) = data_slice.split_at(num); + + for (&vtx_idx, mut new_val) in new_vertices.iter().zip(new.iter()) { + // Initialize the extended part. + //let bytes = &old[vtx_idx * element_size..(vtx_idx + 1) * element_size]; + //new[i * element_size..(i + 1) * element_size].copy_from_slice(bytes); + new_val.clone_from_other(old.get(vtx_idx)).unwrap(); + } + } + } + } +} + +impl SplitIntoConnectedComponents for TriMesh { + fn split_into_connected_components(self) -> Vec { + // First we partition the vertices. + let (vertex_connectivity, num_components) = + Connectivity::::connectivity(&self); + + // Fast path, when everything is connected. + if num_components == 1 { + return vec![self]; + } + + // Record where the new vertices end up (just the index within their respective + // components). The component ids themselves are recorded separately. + let mut new_vertex_indices = vec![Index::INVALID; self.vertex_positions.len()]; + + // Transfer vertex positions + let mut comp_vertex_positions = vec![Vec::new(); num_components]; + for (vidx, &comp_id) in vertex_connectivity.iter().enumerate() { + new_vertex_indices[vidx] = comp_vertex_positions[comp_id].len().into(); + comp_vertex_positions[comp_id].push(self.vertex_positions[vidx]); + } + + // Validate that all vertices have been properly mapped. + debug_assert!(new_vertex_indices.iter().all(|&idx| idx.is_valid())); + let new_vertex_index_slice: &[usize] = bytemuck::cast_slice(new_vertex_indices.as_slice()); + + // Record face connectivity. Note that if faces have vertices on different components, + // they will be ignored in the output and their connectivity will be "invalid". + let mut face_connectivity = vec![Index::INVALID; self.num_faces()]; + + // Transfer faces + let mut comp_vertex_indices = vec![Vec::new(); num_components]; + for (face, face_comp_id) in self.face_iter().zip(face_connectivity.iter_mut()) { + let comp_id = vertex_connectivity[face[0]]; + if face.iter().all(|&i| vertex_connectivity[i] == comp_id) { + let new_face = [ + new_vertex_index_slice[face[0]], + new_vertex_index_slice[face[1]], + new_vertex_index_slice[face[2]], + ]; + comp_vertex_indices[comp_id].push(new_face); + *face_comp_id = Index::from(comp_id); + } + } + + // Initialize attribute value caches for indirect attributes. + let mut comp_attribute_value_caches = + vec![AttribValueCache::with_hasher(Default::default()); num_components]; + + // Transfer vertex attributes + let comp_vertex_attributes = split_attributes( + &self.vertex_attributes, + num_components, + vertex_connectivity.iter().cloned(), + &mut comp_attribute_value_caches, + ); + + // Transfer face attributes + let comp_face_attributes = split_attributes( + &self.face_attributes, + num_components, + face_connectivity.iter().cloned(), + &mut comp_attribute_value_caches, + ); + + // Transfer face vertex attributes + let comp_face_vertex_attributes = split_attributes( + &self.face_vertex_attributes, + num_components, + face_connectivity + .iter() + .flat_map(|f| std::iter::repeat(f).take(3).cloned()), + &mut comp_attribute_value_caches, + ); + + // Transfer face edge attributes + let comp_face_edge_attributes = split_attributes( + &self.face_edge_attributes, + num_components, + face_connectivity + .iter() + .flat_map(|f| std::iter::repeat(f).take(3).cloned()), + &mut comp_attribute_value_caches, + ); + + // Generate a Vec of meshes. + comp_vertex_positions + .into_iter() + .zip(comp_vertex_indices.into_iter()) + .zip(comp_vertex_attributes.into_iter()) + .zip(comp_face_attributes.into_iter()) + .zip(comp_face_vertex_attributes.into_iter()) + .zip(comp_face_edge_attributes.into_iter()) + .map(|(((((vp, i), va), fa), fva), fea)| TriMesh { + vertex_positions: vp.into(), + indices: i.into(), + vertex_attributes: va, + face_attributes: fa, + face_vertex_attributes: fva, + face_edge_attributes: fea, + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::algo::test_utils::*; + use crate::mesh::{TetMeshExt, TriMesh}; + + fn build_tetmesh_sample() -> (TetMeshExt, TetMeshExt, TetMeshExt) { + let verts = vec![ + [0.0, 0.0, 0.0], + [0.0, 0.0, 1.0], + [0.0, 1.0, 0.0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 0.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], + [1.0, 1.0, 1.0], + [0.5, 0.0, 0.5], + ]; + + // One connected component consisting of two tets connected at a face, and another + // consisting of a single tet. + let indices = vec![[7, 6, 2, 4], [5, 7, 2, 4], [0, 1, 3, 8]]; + + let tetmesh = TetMeshExt::new(verts, indices); + let comp1 = TetMeshExt::new( + vec![ + [0.0, 0.0, 0.0], + [0.0, 0.0, 1.0], + [0.0, 1.0, 1.0], + [0.5, 0.0, 0.5], + ], + vec![[0, 1, 2, 3]], + ); + let comp2 = TetMeshExt::new( + vec![ + [0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], + [1.0, 1.0, 1.0], + ], + vec![[4, 3, 0, 1], [2, 4, 0, 1]], + ); + (tetmesh, comp1, comp2) + } + + #[test] + fn tetmesh_split() { + let (tetmesh, comp1, comp2) = build_tetmesh_sample(); + + // First lets verify the vertex partitioning. + assert_eq!(tetmesh.connectivity(), (vec![0, 0, 1, 0, 1, 1, 1, 1, 0], 2)); + + let res = tetmesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn tetmesh_split_with_vertex_attributes() { + let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); + tetmesh + .add_attrib_data::("v", (0..tetmesh.num_vertices()).collect()) + .unwrap(); + comp1 + .add_attrib_data::("v", vec![0, 1, 3, 8]) + .unwrap(); + comp2 + .add_attrib_data::("v", vec![2, 4, 5, 6, 7]) + .unwrap(); + let res = tetmesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn tetmesh_split_with_cell_attributes() { + let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); + tetmesh + .add_attrib_data::("c", (0..tetmesh.num_cells()).collect()) + .unwrap(); + comp1 + .add_attrib_data::("c", vec![2]) + .unwrap(); + comp2 + .add_attrib_data::("c", vec![0, 1]) + .unwrap(); + let res = tetmesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn tetmesh_split_with_cell_vertex_attributes() { + let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); + tetmesh + .add_attrib_data::("cv", (0..tetmesh.num_cells() * 4).collect()) + .unwrap(); + + comp1 + .add_attrib_data::("cv", vec![8, 9, 10, 11]) + .unwrap(); + comp2 + .add_attrib_data::("cv", vec![0, 1, 2, 3, 4, 5, 6, 7]) + .unwrap(); + let res = tetmesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn tetmesh_split_with_cell_face_attributes() { + let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); + tetmesh + .add_attrib_data::("cf", (0..tetmesh.num_cells() * 4).collect()) + .unwrap(); + + comp1 + .add_attrib_data::("cf", vec![8, 9, 10, 11]) + .unwrap(); + comp2 + .add_attrib_data::("cf", vec![0, 1, 2, 3, 4, 5, 6, 7]) + .unwrap(); + let res = tetmesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn tetmesh_split_with_vertex_cell_attributes() { + let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); + tetmesh + .add_attrib_data::("vc", (0..tetmesh.num_cells() * 4).collect()) + .unwrap(); + + comp1 + .add_attrib_data::("vc", vec![0, 1, 4, 11]) + .unwrap(); + comp2 + .add_attrib_data::("vc", vec![2, 3, 5, 6, 7, 8, 9, 10]) + .unwrap(); + let res = tetmesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn tetmesh_split_with_all_attributes() { + let (mut tetmesh, mut comp1, mut comp2) = build_tetmesh_sample(); + tetmesh + .add_attrib_data::("v", (0..tetmesh.num_vertices()).collect()) + .unwrap(); + tetmesh + .add_attrib_data::("c", (0..tetmesh.num_cells()).collect()) + .unwrap(); + tetmesh + .add_attrib_data::("cv", (0..tetmesh.num_cells() * 4).collect()) + .unwrap(); + tetmesh + .add_attrib_data::("cf", (0..tetmesh.num_cells() * 4).collect()) + .unwrap(); + tetmesh + .add_attrib_data::("vc", (0..tetmesh.num_cells() * 4).collect()) + .unwrap(); + comp1 + .add_attrib_data::("v", vec![0, 1, 3, 8]) + .unwrap(); + comp1 + .add_attrib_data::("c", vec![2]) + .unwrap(); + comp1 + .add_attrib_data::("cv", vec![8, 9, 10, 11]) + .unwrap(); + comp1 + .add_attrib_data::("cf", vec![8, 9, 10, 11]) + .unwrap(); + comp1 + .add_attrib_data::("vc", vec![0, 1, 4, 11]) + .unwrap(); + + comp2 + .add_attrib_data::("v", vec![2, 4, 5, 6, 7]) + .unwrap(); + comp2 + .add_attrib_data::("c", vec![0, 1]) + .unwrap(); + comp2 + .add_attrib_data::("cv", vec![0, 1, 2, 3, 4, 5, 6, 7]) + .unwrap(); + comp2 + .add_attrib_data::("cf", vec![0, 1, 2, 3, 4, 5, 6, 7]) + .unwrap(); + comp2 + .add_attrib_data::("vc", vec![2, 3, 5, 6, 7, 8, 9, 10]) + .unwrap(); + let res = tetmesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn polymesh_split() { + let (mesh, comp1, comp2) = build_polymesh_sample(); + + // First lets verify the vertex partitioning. + assert_eq!( + mesh.vertex_connectivity(), + (vec![0, 0, 0, 0, 1, 1, 1, 1], 2) + ); + + let res = mesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn polymesh_split_with_attributes() { + let mut sample = build_polymesh_sample(); + add_attribs_to_polymeshes(&mut sample); + let (mesh, comp1, comp2) = sample; + let res = mesh.split_into_connected_components(); + assert_eq!(res, vec![comp1, comp2]); + } + + #[test] + fn polymesh_split_vertices_by_face_vertex_attrib() { + let verts = vec![ + [0.0, 0.0, 0.0], + [0.0, 0.0, 1.0], + [0.0, 1.0, 0.0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 0.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], + [1.0, 1.0, 1.0], + ]; + + // Two triangles connected at an edge, a quad, and two triangles connecting these + // inbetweeen. + let indices = vec![ + 3, 0, 1, 2, 3, 2, 1, 3, 4, 4, 5, 7, 6, 3, 0, 1, 4, 3, 1, 5, 4, + ]; + + let mut polymesh = PolyMesh::new(verts, &indices); + + // Add an arbitrary vertex attribute + polymesh + .add_attrib_data::("v", (0..polymesh.num_vertices()).collect()) + .unwrap(); + + polymesh + .add_attrib_data::( + "no_split", + vec![0, 1, 2, 2, 1, 3, 4, 5, 7, 6, 0, 1, 4, 1, 5, 4], + ) + .unwrap(); + + let mut no_split = polymesh.clone(); + no_split.split_vertices_by_face_vertex_attrib::("no_split"); + assert_eq!(no_split, polymesh); + + polymesh + .add_attrib_data::( + "vertex1_split", + vec![0, 10, 2, 2, 11, 3, 4, 5, 7, 6, 0, 12, 4, 13, 5, 4], + ) + .unwrap(); + + let mut vertex1_split = polymesh.clone(); + vertex1_split.split_vertices_by_face_vertex_attrib::("vertex1_split"); + assert_eq!(vertex1_split.num_vertices(), polymesh.num_vertices() + 3); + assert_eq!( + vertex1_split.num_face_vertices(), + polymesh.num_face_vertices() + ); + assert_eq!( + vertex1_split.attrib::("vertex1_split"), + polymesh.attrib::("vertex1_split") + ); + assert_eq!( + vertex1_split.attrib_as_slice::("v"), + Ok(&[0, 1, 2, 3, 4, 5, 6, 7, 1, 1, 1][..]) + ); + + polymesh + .add_attrib_data::( + "full_split", + (0..polymesh.num_face_vertices()).collect(), + ) + .unwrap(); + + let mut full_split = polymesh.clone(); + full_split.split_vertices_by_face_vertex_attrib::("full_split"); + assert_eq!(full_split.num_vertices(), polymesh.num_face_vertices()); + assert_eq!(full_split.num_face_vertices(), polymesh.num_face_vertices()); + assert_eq!( + full_split.attrib_as_slice::("v"), + Ok(&[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 1, 1, 2, 4, 4, 5][..]) + ); + } + + #[test] + fn trimesh_split_vertices_by_face_vertex_attrib() { + let verts = vec![ + [0.0, 0.0, 0.0], + [0.0, 0.0, 1.0], + [0.0, 1.0, 0.0], + [0.0, 1.0, 1.0], + [1.0, 0.0, 0.0], + [1.0, 0.0, 1.0], + [1.0, 1.0, 0.0], + [1.0, 1.0, 1.0], + ]; + + let indices = vec![ + [0, 1, 2], + [2, 1, 3], + [4, 5, 6], + [6, 5, 7], + [0, 1, 4], + [1, 5, 4], + ]; + + let mut mesh = TriMesh::new(verts, indices); + + // Add an arbitrary vertex attribute + mesh.add_attrib_data::("v", (0..mesh.num_vertices()).collect()) + .unwrap(); + + mesh.add_attrib_data::( + "no_split", + vec![0, 1, 2, 2, 1, 3, 4, 5, 6, 6, 5, 7, 0, 1, 4, 1, 5, 4], + ) + .unwrap(); + + let mut no_split = mesh.clone(); + no_split.split_vertices_by_face_vertex_attrib("no_split"); + assert_eq!(no_split, mesh); + + mesh.add_attrib_data::( + "vertex1_split", + vec![ + 0.0f32, + 10.0 / 3.0, + 2.0, + 2.0, + 11.0, + 3.0, + 4.0, + 5.0, + 6.0 / 4.0, + 6.0 / 4.0, + 5.0, + 7.0, + 0.0, + 12.0, + 4.0, + 13.0, + 5.0, + 4.0, + ], + ) + .unwrap(); + + let mut vertex1_split = mesh.clone(); + vertex1_split.split_vertices_by_face_vertex_attrib("vertex1_split"); + assert_eq!(vertex1_split.num_vertices(), mesh.num_vertices() + 3); + assert_eq!(vertex1_split.num_face_vertices(), mesh.num_face_vertices()); + assert_eq!( + vertex1_split.attrib::("vertex1_split"), + mesh.attrib::("vertex1_split") + ); + assert_eq!( + vertex1_split.attrib_as_slice::("v"), + Ok(&[0, 1, 2, 3, 4, 5, 6, 7, 1, 1, 1][..]) + ); + + mesh.add_attrib_data::( + "full_split", + (0..mesh.num_face_vertices()).collect(), + ) + .unwrap(); + + let mut full_split = mesh.clone(); + full_split.split_vertices_by_face_vertex_attrib("full_split"); + assert_eq!(full_split.num_vertices(), mesh.num_face_vertices()); + assert_eq!(full_split.num_face_vertices(), mesh.num_face_vertices()); + assert_eq!( + full_split.attrib_as_slice::("v"), + Ok(&[0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 1, 1, 2, 4, 4, 5, 5, 6][..]) + ); + } + + /// This is a more complex regression test for splitting vertices. + #[test] + fn trimesh_split_vertices_by_face_vertex_attrib_and_promote_complex() { + let verts = vec![ + [-0.520833, -0.5, 0.5], + [-0.520833, 0.5, 0.5], + [-0.520833, -0.5, -0.5], + [-0.520833, 0.5, -0.5], + [0.520833, -0.5, 0.5], + [0.520833, 0.5, 0.5], + [0.520833, -0.5, -0.5], + [0.520833, 0.5, -0.5], + ]; + + #[rustfmt::skip] + let indices = vec![ + [0, 1, 3], + [4, 5, 7], + [6, 7, 2], + [5, 4, 1], + [5, 0, 2], + [1, 4, 6], + [6, 3, 1], + [2, 7, 5], + [1, 0, 5], + [2, 3, 6], + [7, 6, 4], + [3, 2, 0], + ]; + + let mut mesh = TriMesh::new(verts, indices); + + // We split the vertices according to the following attribute and then test that + // there are no more collisions. + // This tests both functions: split_vertices_by_face_vertex_attrib and attrib_promote. + + mesh.add_attrib_data::<[f32; 2], FaceVertexIndex>( + "uv", + vec![ + [0.630043, 0.00107052], + [0.370129, 0.00107052], + [0.370129, 0.250588], + [0.370129, 0.749623], + [0.630043, 0.749623], + [0.630043, 0.500105], + [0.370129, 0.500105], + [0.630043, 0.500105], + [0.630043, 0.250588], + [0.630043, 0.749623], + [0.370129, 0.749623], + [0.370129, 0.99914], + [0.879561, 0.500105], + [0.879561, 0.250588], + [0.630043, 0.250588], + [0.120612, 0.250588], + [0.120612, 0.500105], + [0.370129, 0.500105], + [0.370129, 0.500105], + [0.370129, 0.250588], + [0.120612, 0.250588], + [0.630043, 0.250588], + [0.630043, 0.500105], + [0.879561, 0.500105], + [0.370129, 0.99914], + [0.630043, 0.99914], + [0.630043, 0.749623], + [0.630043, 0.250588], + [0.370129, 0.250588], + [0.370129, 0.500105], + [0.630043, 0.500105], + [0.370129, 0.500105], + [0.370129, 0.749623], + [0.370129, 0.250588], + [0.630043, 0.250588], + [0.630043, 0.00107052], + ], + ) + .unwrap(); + + mesh.split_vertices_by_face_vertex_attrib("uv"); + + mesh.attrib_promote::<[f32; 2], _>("uv", |a, b| assert_eq!(a, b)) + .unwrap(); + } + + /// The same test for polymeshes. + #[test] + fn polymesh_split_vertices_by_face_vertex_attrib_and_promote_complex() { + let verts = vec![ + [-0.520833, -0.5, 0.5], + [-0.520833, 0.5, 0.5], + [-0.520833, -0.5, -0.5], + [-0.520833, 0.5, -0.5], + [0.520833, -0.5, 0.5], + [0.520833, 0.5, 0.5], + [0.520833, -0.5, -0.5], + [0.520833, 0.5, -0.5], + ]; + + #[rustfmt::skip] + let indices = vec![ + [0, 1, 3], + [2, 4, 5], + [7, 6, 6], + [7, 2, 3], + [5, 4, 1], + [0, 5, 0], + [2, 7, 1], + [4, 6, 3], + ]; + + let mut mesh = TriMesh::new(verts, indices); + + // We split the vertices according to the following attribute and then test that + // there are no more collisions. + // This tests both functions: split_vertices_by_face_vertex_attrib and attrib_promote. + + mesh.add_attrib_data::<[f32; 2], FaceVertexIndex>( + "uv", + vec![ + [0.630043, 0.00107052], + [0.370129, 0.00107052], + [0.370129, 0.250588], + [0.630043, 0.250588], + [0.370129, 0.749623], + [0.630043, 0.749623], + [0.630043, 0.500105], + [0.370129, 0.500105], + [0.370129, 0.500105], + [0.630043, 0.500105], + [0.630043, 0.250588], + [0.370129, 0.250588], + [0.630043, 0.749623], + [0.370129, 0.749623], + [0.370129, 0.99914], + [0.630043, 0.99914], + [0.879561, 0.500105], + [0.879561, 0.250588], + [0.630043, 0.250588], + [0.630043, 0.500105], + [0.120612, 0.250588], + [0.120612, 0.500105], + [0.370129, 0.500105], + [0.370129, 0.250588], + ], + ) + .unwrap(); + + mesh.split_vertices_by_face_vertex_attrib("uv"); + + mesh.attrib_promote::<[f32; 2], _>("uv", |a, b| assert_eq!(a, b)) + .unwrap(); + } +} diff --git a/src/index.rs b/src/index.rs index c5a750d..fb981ab 100644 --- a/src/index.rs +++ b/src/index.rs @@ -16,8 +16,13 @@ pub use self::checked::*; /// debug builds. #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[repr(transparent)] pub struct Index(usize); +// SAFETY: Index is transparent and usize is Pod and Zeroable. +unsafe impl bytemuck::Pod for Index {} +unsafe impl bytemuck::Zeroable for Index {} + impl Index { /// Invalid index instance. pub const INVALID: Index = Index(std::usize::MAX); diff --git a/src/lib.rs b/src/lib.rs index 7822c64..6df69fd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -49,13 +49,16 @@ impl Real for T where // Temporary utils to wrap unsafe reinterpret casts into a safe API. pub(crate) fn index_vec_into_usize(vec: Vec) -> Vec { + // SAFETY: Index is transparent and has exactly the same size and data representation as usize. unsafe { reinterpret::reinterpret_vec(vec) } } -pub(crate) fn into_flat_vec3(vec: Vec<[usize; 3]>) -> Vec { +pub(crate) fn into_flat_vec3(mut vec: Vec<[usize; 3]>) -> Vec { + // SAFETY: A continguous vec of [usize; 3] can be reinterpreted as a contiguous vec of usize unsafe { reinterpret::reinterpret_vec(vec) } } -pub(crate) fn into_flat_vec4(vec: Vec<[usize; 4]>) -> Vec { +pub(crate) fn into_flat_vec4(mut vec: Vec<[usize; 4]>) -> Vec { + // SAFETY: A continguous vec of [usize; 4] can be reinterpreted as a contiguous vec of usize unsafe { reinterpret::reinterpret_vec(vec) } } diff --git a/src/mesh/attrib.rs b/src/mesh/attrib.rs index 2bd67b4..0dcf0b8 100644 --- a/src/mesh/attrib.rs +++ b/src/mesh/attrib.rs @@ -21,7 +21,8 @@ //! implementations of `TetMesh` and `TriMesh` for more details. use std::any::{Any, TypeId}; -use std::collections::HashMap; +//use std::collections::HashMap; +use hashbrown::HashMap; use std::slice; use dync::{traits::HasDrop, VecDrop}; @@ -33,7 +34,8 @@ mod bytes; mod index; // Expose the entry API for our AttribDict type. -pub use std::collections::hash_map::Entry; +//pub use std::collections::hash_map::Entry; +pub use hashbrown::hash_map::Entry; pub use attribute::*; pub use bytes::*; @@ -513,6 +515,9 @@ where } /// Clone attribute data into a `Vec`. + /// + /// This works for direct and indirect attributes. Note that indirect attributes can be + /// expensive to clone. fn attrib_clone_into_vec<'a, 'b, T, I: 'b + AttribIndex>( &'b self, name: &'a str, diff --git a/src/mesh/attrib/attribute.rs b/src/mesh/attrib/attribute.rs index e3c41cc..d4d055a 100644 --- a/src/mesh/attrib/attribute.rs +++ b/src/mesh/attrib/attribute.rs @@ -3,7 +3,8 @@ use std::marker::PhantomData; use std::slice; use dync::{dync_mod, from_dyn, into_dyn, BoxValue, SliceDrop, SliceDropMut, SmallValue, VecDrop}; -use fnv::FnvHashSet as HashSet; +//use fnv::FnvHashSet as HashSet; +use hashbrown::HashSet; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -1595,7 +1596,7 @@ mod tests { #[test] fn indirect_set_value_at() { - let mut cache = AttribValueCache::with_hasher(Default::default()); + let mut cache = AttribValueCache::default(); let mut data = IndirectData::with_size(3, String::from("default")); let val = HValue::new(Irc::new(String::from("default"))); assert_eq!(&data.default_element, &val); diff --git a/src/mesh/polymesh.rs b/src/mesh/polymesh.rs index e19c1dc..856b4f1 100644 --- a/src/mesh/polymesh.rs +++ b/src/mesh/polymesh.rs @@ -364,7 +364,7 @@ impl From> for PolyMesh { face_attributes, face_vertex_attributes, face_edge_attributes, - attribute_value_cache: AttribValueCache::with_hasher(Default::default()), + attribute_value_cache: AttribValueCache::default(), } } } @@ -392,7 +392,7 @@ impl From> for PolyMesh { face_attributes, face_vertex_attributes, face_edge_attributes, - attribute_value_cache: AttribValueCache::with_hasher(Default::default()), + attribute_value_cache: AttribValueCache::default(), } } } diff --git a/src/mesh/tetmesh.rs b/src/mesh/tetmesh.rs index c333a6d..60534d2 100644 --- a/src/mesh/tetmesh.rs +++ b/src/mesh/tetmesh.rs @@ -50,6 +50,8 @@ pub struct TetMesh { pub cell_vertex_attributes: AttribDict, /// Cell face Attributes. pub cell_face_attributes: AttribDict, + /// Indirect attribute value cache + pub attribute_value_cache: AttribValueCache, } impl TetMesh { @@ -65,6 +67,7 @@ impl TetMesh { cell_attributes: AttribDict::new(), cell_vertex_attributes: AttribDict::new(), cell_face_attributes: AttribDict::new(), + attribute_value_cache: AttribValueCache::default(), } } diff --git a/src/mesh/tetmesh/surface.rs b/src/mesh/tetmesh/surface.rs index 9ad1779..f51318d 100644 --- a/src/mesh/tetmesh/surface.rs +++ b/src/mesh/tetmesh/surface.rs @@ -6,7 +6,7 @@ use crate::Real; use super::TetMesh; -type HashMap = fnv::FnvHashMap; +type HashMap = hashbrown::HashMap; /// A triangle with sorted vertices #[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Debug)] diff --git a/src/mesh/uniform_poly_mesh.rs b/src/mesh/uniform_poly_mesh.rs index 671085c..05c6a04 100644 --- a/src/mesh/uniform_poly_mesh.rs +++ b/src/mesh/uniform_poly_mesh.rs @@ -111,6 +111,7 @@ macro_rules! impl_uniform_surface_mesh { } /// Sort vertices by the given key values, and return the reulting order (permutation). + /// /// This function assumes we have at least one vertex. pub(crate) fn sort_vertices_by_key_impl(&mut self, mut f: F) -> Vec where @@ -138,16 +139,16 @@ macro_rules! impl_uniform_surface_mesh { let mut seen = vec![false; vertex_positions.len()]; // Apply the order permutation to vertex_positions in place - apply_permutation(&order, vertex_positions.as_mut_slice(), &mut seen); + apply_permutation_with_seen(&order, vertex_positions.as_mut_slice(), &mut seen); // Apply permutation to each vertex attribute for (_, attrib) in vertex_attributes.iter_mut() { let mut data_slice = attrib.data_mut_slice(); - //let stride = buf_mut.element_size(); - // This is safe because a permutation should not affect data integrity. - //let data = unsafe { buf_mut.as_bytes_mut() }; - apply_permutation(&order, &mut data_slice, &mut seen); + // Clear seen + seen.iter_mut().for_each(|b| *b = false); + + apply_permutation_with_seen(&order, &mut data_slice, &mut seen); } // Build a reverse mapping for convenience. diff --git a/src/utils/slice.rs b/src/utils/slice.rs index 91dde10..8a240fc 100644 --- a/src/utils/slice.rs +++ b/src/utils/slice.rs @@ -39,36 +39,47 @@ impl<'a, V: HasDrop> Swap for SliceDropMut<'a, V> { } } +#[allow(dead_code)] +#[inline] +pub(crate) fn apply_permutation(permutation: &[usize], array: &mut A) { + let mut seen = vec![false; array.len()]; + apply_permutation_with_stride_and_seen(permutation, array, 1, &mut seen); +} + // Utility functions -pub(crate) fn apply_permutation( +#[inline] +pub(crate) fn apply_permutation_with_seen( permutation: &[usize], array: &mut A, seen: &mut [bool], ) { - apply_permutation_with_stride(permutation, array, 1, seen); + apply_permutation_with_stride_and_seen(permutation, array, 1, seen); } -pub(crate) fn apply_permutation_with_stride( +pub(crate) fn apply_permutation_with_stride_and_seen( permutation: &[usize], array: &mut A, stride: usize, seen: &mut [bool], ) { - debug_assert_eq!(permutation.len() * stride, array.len()); - debug_assert_eq!(seen.len() * stride, array.len()); + // Total number of elements being tracked. + let nelem = seen.len(); - // Clear seen - seen.iter_mut().for_each(|x| *x = false); + assert!(permutation.iter().all(|&i| i < nelem)); + assert_eq!(permutation.len(), nelem); + debug_assert_eq!(nelem * stride, array.len()); - for unseen_i in 0..seen.len() { - if seen[unseen_i] { + for unseen_i in 0..nelem { + // SAFETY: unseen_i is explicitly between 0 and seen.len() + if unsafe { *seen.get_unchecked(unseen_i) } { continue; } let mut i = unseen_i; loop { - let idx = permutation[i]; - if seen[idx] { + let idx = unsafe { *permutation.get_unchecked(i) }; + // SAFETY: checked permutation element bounds in the assert above. + if unsafe { *seen.get_unchecked(idx) } { break; } @@ -76,7 +87,8 @@ pub(crate) fn apply_permutation_with_stride( array.swap(off + stride * i, off + stride * idx); } - seen[i] = true; + // SAFETY: i is guaranteed to be < nelem + unsafe { *seen.get_unchecked_mut(i) = true }; i = idx; } } @@ -91,15 +103,14 @@ mod tests { // Checks inner loop in apply_permutation let perm = vec![7, 8, 2, 3, 4, 1, 6, 5, 0]; let mut values = String::from("tightsemi"); - let mut seen = vec![false; 9]; - apply_permutation(&perm, unsafe { values.as_bytes_mut() }, &mut seen); + apply_permutation(&perm, unsafe { values.as_bytes_mut() }); assert_eq!(values, "mightiest"); // Checks the outer loop in apply_permutation let perm = vec![7, 8, 4, 3, 2, 1, 6, 5, 0]; let mut values = String::from("tightsemi"); let mut seen = vec![false; 9]; - apply_permutation(&perm, unsafe { values.as_bytes_mut() }, &mut seen); + apply_permutation_with_seen(&perm, unsafe { values.as_bytes_mut() }, &mut seen); assert_eq!(values, "mithgiest"); let mut pts = vec![ @@ -110,8 +121,9 @@ mod tests { [1.0, 1.0, 1.0], ]; seen.resize(5, false); + seen.iter_mut().for_each(|b| *b = false); let order = [3, 2, 1, 4, 0]; - apply_permutation(&order, &mut pts, &mut seen); + apply_permutation_with_seen(&order, &mut pts, &mut seen); assert_eq!( pts.as_slice(), &[ -- GitLab From f07ed336187b134c5287e6f1fe5329de764fdb08 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sun, 26 Jul 2020 00:18:33 -0700 Subject: [PATCH 03/19] Add split to uniform meshes + refactor attrib cache --- src/algo/merge.rs | 18 ++- src/algo/split.rs | 266 ++++++++++++++++++++-------------- src/lib.rs | 4 +- src/mesh/attrib/attribute.rs | 35 ++--- src/mesh/tetmesh/surface.rs | 1 + src/mesh/uniform_poly_mesh.rs | 7 + 6 files changed, 198 insertions(+), 133 deletions(-) diff --git a/src/algo/merge.rs b/src/algo/merge.rs index 7820aa4..4f12c46 100644 --- a/src/algo/merge.rs +++ b/src/algo/merge.rs @@ -212,7 +212,7 @@ impl Merge for TetMesh { cell_attributes: other_cell_attributes, cell_vertex_attributes: other_cell_vertex_attributes, cell_face_attributes: other_cell_face_attributes, - attribute_value_cache, + attribute_value_cache: other_attribute_value_cache, } = other; self.vertex_positions @@ -249,7 +249,7 @@ impl Merge for TetMesh { other_cell_face_attributes, other_num_cell_faces, ); - for value in attribute_value_cache.into_iter() { + for value in other_attribute_value_cache.into_iter() { self.attribute_value_cache.insert(value); } self @@ -488,7 +488,7 @@ impl Merge for PolyMesh { face_attributes: other_face_attributes, face_vertex_attributes: other_face_vertex_attributes, face_edge_attributes: other_face_edge_attributes, - attribute_value_cache, + attribute_value_cache: other_attribute_value_cache, } = other; self.vertex_positions @@ -529,7 +529,7 @@ impl Merge for PolyMesh { other_num_face_edges, ); - for value in attribute_value_cache.into_iter() { + for value in other_attribute_value_cache.into_iter() { self.attribute_value_cache.insert(value); } self @@ -657,6 +657,7 @@ macro_rules! impl_merge_for_uniform_mesh { face_attributes: other_face_attributes, face_vertex_attributes: other_face_vertex_attributes, face_edge_attributes: other_face_edge_attributes, + attribute_value_cache: other_attribute_value_cache, } = other; self.vertex_positions @@ -693,6 +694,9 @@ macro_rules! impl_merge_for_uniform_mesh { other_face_edge_attributes, other_num_face_edges, ); + for value in other_attribute_value_cache.into_iter() { + self.attribute_value_cache.insert(value); + } self } } @@ -767,6 +771,7 @@ macro_rules! impl_merge_for_uniform_mesh { let mut face_attributes = AttribDict::new(); let mut face_vertex_attributes = AttribDict::new(); let mut face_edge_attributes = AttribDict::new(); + let mut attribute_value_cache = AttribValueCache::default(); let mut num_vertices = 0; for mesh in mesh_iter { @@ -810,6 +815,10 @@ macro_rules! impl_merge_for_uniform_mesh { mesh.num_face_edges(), ); + for value in mesh.attribute_value_cache.iter() { + attribute_value_cache.insert(value.clone()); + } + // Extend the indices AFTER attributes are transfered since // `merge_attribute_dicts` expects num_elements to be the number before // the merge. @@ -825,6 +834,7 @@ macro_rules! impl_merge_for_uniform_mesh { face_attributes, face_vertex_attributes, face_edge_attributes, + attribute_value_cache, }) } } diff --git a/src/algo/split.rs b/src/algo/split.rs index cf19be0..b822ee6 100644 --- a/src/algo/split.rs +++ b/src/algo/split.rs @@ -6,7 +6,7 @@ use super::connectivity::*; use crate::index::*; use crate::mesh::attrib::AttribValueCache; use crate::mesh::topology::*; -use crate::mesh::{attrib::*, PolyMesh, TetMesh, TetMeshExt, TriMesh}; +use crate::mesh::{attrib::*, PolyMesh, QuadMesh, TetMesh, TetMeshExt, TriMesh}; use crate::Real; /// Helper to split attributes based on the given connectivity info. @@ -221,6 +221,145 @@ impl TetMesh { } } +macro_rules! impl_split_for_uniform_mesh { + ($mesh_type:ident; $n:expr; $($ns:expr),*) => { + impl Split for $mesh_type { + #[inline] + fn split(self, partition: &[usize], num_parts: usize) -> Vec { + self.split_by_vertex_partition(partition, num_parts).0 + } + } + + impl $mesh_type { + /// Split the mesh by the given partition. + /// + /// Returns a vector of meshes and the mapping from old cell index to new cell index. + fn split_by_vertex_partition( + self, + vertex_partition: &[usize], + num_parts: usize, + ) -> (Vec, Vec) { + // Fast path, when everything is connected. + if num_parts == 1 { + return (vec![self], vec![]); + } + + // Deconstruct the original mesh. + let $mesh_type { + vertex_positions, + indices, + vertex_attributes, + face_attributes, + face_vertex_attributes, + face_edge_attributes, + .. + } = self; + + // Record where the new vertices end up (just the index within their respective + // components). The component ids themselves are recorded separately. + let mut new_vertex_indices = vec![Index::INVALID; vertex_positions.len()]; + + // Transfer vertex positions + let mut comp_vertex_positions = vec![Vec::new(); num_parts]; + for (vidx, &comp_id) in vertex_partition.iter().enumerate() { + new_vertex_indices[vidx] = comp_vertex_positions[comp_id].len().into(); + comp_vertex_positions[comp_id].push(vertex_positions[vidx]); + } + + // Validate that all vertices have been properly mapped. + debug_assert!(new_vertex_indices.iter().all(|&idx| idx.is_valid())); + let new_vertex_index_slice: &[usize] = bytemuck::cast_slice(new_vertex_indices.as_slice()); + + // Record face connectivity. Note that if faces have vertices on different components, + // they will be ignored in the output and their connectivity will be "invalid". + let mut face_connectivity = vec![Index::INVALID; indices.len()]; + let mut new_face_indices = vec![Index::INVALID; indices.len()]; + + // Transfer faces + let mut comp_vertex_indices = vec![Vec::new(); num_parts]; + for (&face, (face_comp_id, new_face_idx)) in indices.iter().zip(face_connectivity.iter_mut().zip(new_face_indices.iter_mut())) { + let comp_id = vertex_partition[face[0]]; + if face.iter().all(|&i| vertex_partition[i] == comp_id) { + let new_face = [ + $( + new_vertex_index_slice[face[$ns]], + )* + ]; + *new_face_idx = comp_vertex_indices[comp_id].len().into(); + comp_vertex_indices[comp_id].push(new_face); + *face_comp_id = Index::from(comp_id); + } + } + + // Initialize attribute value caches for indirect attributes. + let mut comp_attribute_value_caches = vec![AttribValueCache::default(); num_parts]; + + // Transfer vertex attributes + let comp_vertex_attributes = split_attributes( + &vertex_attributes, + num_parts, + vertex_partition.iter().cloned(), + &mut comp_attribute_value_caches, + ); + + // Transfer face attributes + let comp_face_attributes = split_attributes( + &face_attributes, + num_parts, + face_connectivity.iter().cloned(), + &mut comp_attribute_value_caches, + ); + + // Transfer face vertex attributes + let comp_face_vertex_attributes = split_attributes( + &face_vertex_attributes, + num_parts, + face_connectivity + .iter() + .flat_map(|c| std::iter::repeat(c).take($n).cloned()), + &mut comp_attribute_value_caches, + ); + + // Transfer face edge attributes + let comp_face_edge_attributes = split_attributes( + &face_edge_attributes, + num_parts, + face_connectivity + .iter() + .flat_map(|c| std::iter::repeat(c).take($n).cloned()), + &mut comp_attribute_value_caches, + ); + + // Generate a Vec of meshes. + ( + comp_vertex_positions + .into_iter() + .zip(comp_vertex_indices.into_iter()) + .zip(comp_vertex_attributes.into_iter()) + .zip(comp_face_attributes.into_iter()) + .zip(comp_face_vertex_attributes.into_iter()) + .zip(comp_face_edge_attributes.into_iter()) + .zip(comp_attribute_value_caches.into_iter()) + .map(|((((((vp, vi), va), ca), cva), cfa), avc)| Self { + vertex_positions: vp.into(), + indices: vi.into(), + vertex_attributes: va, + face_attributes: ca, + face_vertex_attributes: cva, + face_edge_attributes: cfa, + attribute_value_cache: avc, + }) + .collect(), + new_face_indices, + ) + } + } + } +} + +impl_split_for_uniform_mesh!(TriMesh; 3; 0, 1, 2); +impl_split_for_uniform_mesh!(QuadMesh; 4; 0, 1, 2, 3); + impl Split for TetMeshExt { fn split(self, vertex_partition: &[usize], num_parts: usize) -> Vec { // Fast path, when everything is connected. @@ -468,6 +607,26 @@ impl SplitIntoConnectedComponents for PolyMesh< } } +impl SplitIntoConnectedComponents for TriMesh { + fn split_into_connected_components(self) -> Vec { + // First we partition the vertices. + let (vertex_connectivity, num_components) = + Connectivity::::connectivity(&self); + + self.split(&vertex_connectivity, num_components) + } +} + +impl SplitIntoConnectedComponents for QuadMesh { + fn split_into_connected_components(self) -> Vec { + // First we partition the vertices. + let (vertex_connectivity, num_components) = + Connectivity::::connectivity(&self); + + self.split(&vertex_connectivity, num_components) + } +} + // TODO: Generalize split_vertices_by_face_vertex_attrib between the meshes. // This will involve converging on how to represent/access indices for rewiring meshes // through a trait. @@ -648,111 +807,6 @@ impl PolyMesh { } } -impl SplitIntoConnectedComponents for TriMesh { - fn split_into_connected_components(self) -> Vec { - // First we partition the vertices. - let (vertex_connectivity, num_components) = - Connectivity::::connectivity(&self); - - // Fast path, when everything is connected. - if num_components == 1 { - return vec![self]; - } - - // Record where the new vertices end up (just the index within their respective - // components). The component ids themselves are recorded separately. - let mut new_vertex_indices = vec![Index::INVALID; self.vertex_positions.len()]; - - // Transfer vertex positions - let mut comp_vertex_positions = vec![Vec::new(); num_components]; - for (vidx, &comp_id) in vertex_connectivity.iter().enumerate() { - new_vertex_indices[vidx] = comp_vertex_positions[comp_id].len().into(); - comp_vertex_positions[comp_id].push(self.vertex_positions[vidx]); - } - - // Validate that all vertices have been properly mapped. - debug_assert!(new_vertex_indices.iter().all(|&idx| idx.is_valid())); - let new_vertex_index_slice: &[usize] = bytemuck::cast_slice(new_vertex_indices.as_slice()); - - // Record face connectivity. Note that if faces have vertices on different components, - // they will be ignored in the output and their connectivity will be "invalid". - let mut face_connectivity = vec![Index::INVALID; self.num_faces()]; - - // Transfer faces - let mut comp_vertex_indices = vec![Vec::new(); num_components]; - for (face, face_comp_id) in self.face_iter().zip(face_connectivity.iter_mut()) { - let comp_id = vertex_connectivity[face[0]]; - if face.iter().all(|&i| vertex_connectivity[i] == comp_id) { - let new_face = [ - new_vertex_index_slice[face[0]], - new_vertex_index_slice[face[1]], - new_vertex_index_slice[face[2]], - ]; - comp_vertex_indices[comp_id].push(new_face); - *face_comp_id = Index::from(comp_id); - } - } - - // Initialize attribute value caches for indirect attributes. - let mut comp_attribute_value_caches = - vec![AttribValueCache::with_hasher(Default::default()); num_components]; - - // Transfer vertex attributes - let comp_vertex_attributes = split_attributes( - &self.vertex_attributes, - num_components, - vertex_connectivity.iter().cloned(), - &mut comp_attribute_value_caches, - ); - - // Transfer face attributes - let comp_face_attributes = split_attributes( - &self.face_attributes, - num_components, - face_connectivity.iter().cloned(), - &mut comp_attribute_value_caches, - ); - - // Transfer face vertex attributes - let comp_face_vertex_attributes = split_attributes( - &self.face_vertex_attributes, - num_components, - face_connectivity - .iter() - .flat_map(|f| std::iter::repeat(f).take(3).cloned()), - &mut comp_attribute_value_caches, - ); - - // Transfer face edge attributes - let comp_face_edge_attributes = split_attributes( - &self.face_edge_attributes, - num_components, - face_connectivity - .iter() - .flat_map(|f| std::iter::repeat(f).take(3).cloned()), - &mut comp_attribute_value_caches, - ); - - // Generate a Vec of meshes. - comp_vertex_positions - .into_iter() - .zip(comp_vertex_indices.into_iter()) - .zip(comp_vertex_attributes.into_iter()) - .zip(comp_face_attributes.into_iter()) - .zip(comp_face_vertex_attributes.into_iter()) - .zip(comp_face_edge_attributes.into_iter()) - .map(|(((((vp, i), va), fa), fva), fea)| TriMesh { - vertex_positions: vp.into(), - indices: i.into(), - vertex_attributes: va, - face_attributes: fa, - face_vertex_attributes: fva, - face_edge_attributes: fea, - }) - .collect() - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/src/lib.rs b/src/lib.rs index 6df69fd..d3e996b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -53,12 +53,12 @@ pub(crate) fn index_vec_into_usize(vec: Vec) -> Vec { unsafe { reinterpret::reinterpret_vec(vec) } } -pub(crate) fn into_flat_vec3(mut vec: Vec<[usize; 3]>) -> Vec { +pub(crate) fn into_flat_vec3(vec: Vec<[usize; 3]>) -> Vec { // SAFETY: A continguous vec of [usize; 3] can be reinterpreted as a contiguous vec of usize unsafe { reinterpret::reinterpret_vec(vec) } } -pub(crate) fn into_flat_vec4(mut vec: Vec<[usize; 4]>) -> Vec { +pub(crate) fn into_flat_vec4(vec: Vec<[usize; 4]>) -> Vec { // SAFETY: A continguous vec of [usize; 4] can be reinterpreted as a contiguous vec of usize unsafe { reinterpret::reinterpret_vec(vec) } } diff --git a/src/mesh/attrib/attribute.rs b/src/mesh/attrib/attribute.rs index d4d055a..1ffee03 100644 --- a/src/mesh/attrib/attribute.rs +++ b/src/mesh/attrib/attribute.rs @@ -238,7 +238,7 @@ impl IndirectData { /// Get the value pointer from the value set corresponding to the given value and insert it in /// to the values set if it doesn't already exist. - fn get_or_insert(set: &mut HashSet, elem: T) -> Irc { + fn get_or_insert(set: &mut AttribValueCache, elem: T) -> Irc { let elem = HValue::new(Irc::new(elem)); if let Some(elem) = set.get(&elem) { Irc::clone(elem.as_ref().downcast().unwrap()) @@ -251,7 +251,7 @@ impl IndirectData { /// Construct an attribute from a given `Vec` of data. pub fn from_vec( vec: Vec, - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Self { let default_element = Irc::new(T::default()); let buf: Vec<_> = vec @@ -269,7 +269,7 @@ impl IndirectData { #[inline] pub fn from_slice( buf: &[T], - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Self { Self::from_vec(buf.to_vec(), cache) } @@ -348,7 +348,7 @@ impl IndirectData { pub fn update_with( &mut self, mut f: F, - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Result<&mut Self, Error> where T: AttributeValueHash, @@ -379,7 +379,7 @@ impl IndirectData { &'a mut self, i: usize, new_value: T, - cache: &'a mut HashSet, + cache: &'a mut AttribValueCache, ) -> Result<&'a mut Self, Error> where T: AttributeValueHash, @@ -392,7 +392,7 @@ impl IndirectData { &'a mut self, i: usize, new_value: &HValue, - cache: &'a mut HashSet, + cache: &'a mut AttribValueCache, ) -> Result<&'a mut Self, Error> { let mut value_out = self.buf.get_mut(i); if let Some(existing) = cache.get(new_value) { @@ -410,7 +410,7 @@ impl IndirectData { pub fn push_cloned( &mut self, new_value_ref: HValueRef, - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Result<&mut Self, Error> { let expected = self.buf.element_type_id(); let actual = new_value_ref.value_type_id(); @@ -659,7 +659,7 @@ impl AttributeData { /// values in the given cache. pub fn indirect_from_vec( vec: Vec, - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Self { AttributeData::Indirect(IndirectData::from_vec(vec, cache)) } @@ -680,7 +680,7 @@ impl AttributeData { #[inline] pub fn indirect_from_slice( data: &[T], - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Self { Self::indirect_from_vec(data.to_vec(), cache) } @@ -746,10 +746,6 @@ impl AttributeData { /// Construct a new attribute with the same buffer type, default element and topology type as /// `self`. - /// - /// The data within the newly created attribute is expected to be initialized with the given - /// function `init`, which takes the output `DataSliceMut` for the new attribute and the existing - /// `DataSlice` from `self`. pub fn duplicate_with( &self, dup_data: impl FnOnce(&mut VecDrop, SliceDrop), @@ -869,7 +865,7 @@ impl AttributeData { pub fn indirect_update_with( &mut self, f: impl FnMut(usize, &Irc) -> Option>, - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Result<&mut Self, Error> { match self { AttributeData::Indirect(i) => match i.update_with::(f, cache) { @@ -1046,7 +1042,7 @@ impl Attribute { /// values in the given cache. pub fn indirect_from_vec( vec: Vec, - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Self { Attribute { data: AttributeData::indirect_from_vec(vec, cache), @@ -1126,10 +1122,6 @@ impl Attribute { } /// Construct a new attribute with the same buffer type and default element as `self`. - /// - /// The data within the newly created attribute is expected to be initialized with the given - /// function `init`, which takes the output `DataVec` for the new attribute and the existing - /// `DataSlice` from `self`. #[inline] pub fn promote_with( &self, @@ -1142,6 +1134,7 @@ impl Attribute { } /// Construct a new attribute with the same buffer type and default element as `self`. + /// /// The attribute is first initialized with the default value by allocating `len` default /// elements. Then the newly created buffer is expected to be modified by the `init` function. pub fn promote_with_len( @@ -1165,7 +1158,7 @@ impl Attribute { #[inline] pub fn indirect_from_slice( data: &[T], - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Self { Self::indirect_from_vec(data.to_vec(), cache) } @@ -1218,7 +1211,7 @@ impl Attribute { pub fn indirect_update_with( &mut self, f: F, - cache: &mut HashSet, + cache: &mut AttribValueCache, ) -> Result<&mut Self, Error> where T: AttributeValueHash, diff --git a/src/mesh/tetmesh/surface.rs b/src/mesh/tetmesh/surface.rs index f51318d..dc53e9f 100644 --- a/src/mesh/tetmesh/surface.rs +++ b/src/mesh/tetmesh/surface.rs @@ -315,6 +315,7 @@ impl TetMesh { face_attributes, face_vertex_attributes, face_edge_attributes: AttribDict::new(), // TetMeshes don't have edge attributes (yet) + attribute_value_cache: self.attribute_value_cache.clone(), }; // Add the mapping to the original tetmesh. Overwrite any existing attributes. diff --git a/src/mesh/uniform_poly_mesh.rs b/src/mesh/uniform_poly_mesh.rs index 05c6a04..1a755fa 100644 --- a/src/mesh/uniform_poly_mesh.rs +++ b/src/mesh/uniform_poly_mesh.rs @@ -26,6 +26,7 @@ macro_rules! impl_uniform_surface_mesh { face_attributes: AttribDict::new(), face_vertex_attributes: AttribDict::new(), face_edge_attributes: AttribDict::new(), + attribute_value_cache: AttribValueCache::default(), } } @@ -282,6 +283,8 @@ pub struct TriMesh { pub face_vertex_attributes: AttribDict, /// Triangle edge attributes. pub face_edge_attributes: AttribDict, + /// Indirect attribute value cache + pub attribute_value_cache: AttribValueCache, } #[derive(Clone, Debug, PartialEq, Attrib, Intrinsic)] @@ -299,6 +302,8 @@ pub struct QuadMesh { pub face_vertex_attributes: AttribDict, /// Quad edge attributes. pub face_edge_attributes: AttribDict, + /// Indirect attribute value cache + pub attribute_value_cache: AttribValueCache, } impl_uniform_surface_mesh!(TriMesh, 3); @@ -416,6 +421,7 @@ impl From> for TriMesh { let PolyMesh { vertex_positions, vertex_attributes, + attribute_value_cache, .. } = mesh; @@ -426,6 +432,7 @@ impl From> for TriMesh { face_attributes: tri_face_attributes, face_vertex_attributes: tri_face_vertex_attributes, face_edge_attributes: tri_face_edge_attributes, + attribute_value_cache, } } } -- GitLab From 869d9de87ea51d6efcae631329a16fca3ac80fc3 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Mon, 27 Jul 2020 23:25:40 -0700 Subject: [PATCH 04/19] Set seeds for hashing in tests This makes tests deterministic. --- src/mesh/tetmesh/surface.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/mesh/tetmesh/surface.rs b/src/mesh/tetmesh/surface.rs index dc53e9f..a06d9f4 100644 --- a/src/mesh/tetmesh/surface.rs +++ b/src/mesh/tetmesh/surface.rs @@ -106,6 +106,14 @@ impl TetMesh { /// /// This function assumes that the given tetmesh is a manifold. fn surface_triangle_set(&self) -> HashMap { + #[cfg(test)] + let mut triangles: HashMap = { + // This will make tests deterministic. + let hash_builder = hashbrown::hash_map::DefaultHashBuilder::with_seeds(7, 47); + HashMap::with_capacity_and_hasher(self.num_cells() * 4, hash_builder) + }; + + #[cfg(not(test))] let mut triangles: HashMap = HashMap::with_capacity_and_hasher(self.num_cells() * 4, Default::default()); -- GitLab From 85fe378c8c70269877e91a8ec6bb363ebe67f59a Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sat, 26 Sep 2020 22:14:55 -0700 Subject: [PATCH 05/19] Add IntoIterator impl for index --- src/index.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/index.rs b/src/index.rs index fb981ab..2cc409a 100644 --- a/src/index.rs +++ b/src/index.rs @@ -45,7 +45,7 @@ impl Index { /// used in `map`. Use this to opt out of automatic index checking. #[inline] pub fn map_inner usize>(self, f: F) -> Index { - Index(f(self.0)) + Index::new(f(self.0)) } /// Checked `and_then` over inner index. This allows operations on valid indices only. @@ -152,6 +152,14 @@ impl From> for Index { } } +impl IntoIterator for Index { + type Item = usize; + type IntoIter = std::option::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.into_option().into_iter() + } +} + impl Add for Index { type Output = Index; -- GitLab From 01549391b76e7a881844414e09fcac183772b87f Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sat, 28 Nov 2020 16:25:00 -0800 Subject: [PATCH 06/19] Impl Merge for PointCloud + add merge_iter Implemented the Merge trait for the PointCloud type. Added a merge_iter function to the Merge trait. This can probably be used from the FromIterator trait. --- src/algo/merge.rs | 60 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 19 deletions(-) diff --git a/src/algo/merge.rs b/src/algo/merge.rs index 4f12c46..2d3bd18 100644 --- a/src/algo/merge.rs +++ b/src/algo/merge.rs @@ -5,6 +5,7 @@ use crate::mesh::attrib::AttribValueCache; use crate::mesh::attrib::*; +use crate::mesh::pointcloud::PointCloud; use crate::mesh::polymesh::PolyMesh; use crate::mesh::tetmesh::{TetMesh, TetMeshExt}; use crate::mesh::topology::*; @@ -20,27 +21,14 @@ pub trait Merge { /// reference. fn merge(&mut self, other: Self) -> &mut Self; - /// Merge a `Vec` of objecs into one of the same type. - fn merge_vec(vec: Vec) -> Self + /// Merge an iterator of objects into one of the same type. + fn merge_iter(iterable: impl IntoIterator) -> Self where Self: Default, { - let n = vec.len(); - let mut iter = vec.into_iter(); - - // Handle trivial cases first. - if n == 0 { - // If no meshes are given on the input, simply return an empty mesh. - return Default::default(); - } + let mut iter = iterable.into_iter(); - // At this point we know there's at least one object. We will merge onto that. - let mut obj = iter.next().unwrap(); - - if n == 1 { - // If only one mesh is given, just return it. - return obj; - } + let mut obj = iter.next().unwrap_or_else(Default::default); for other in iter { obj.merge(other); @@ -49,14 +37,21 @@ pub trait Merge { obj } + /// Merge a `Vec` of objects into one of the same type. + fn merge_vec(vec: Vec) -> Self + where + Self: Default, + { + Self::merge_iter(vec) + } + /// In contrast to `merge_vec`, this function takes an immutable reference to a collection of /// meshes, and creates a brand new mesh that is a union of all the given meshes. fn merge_slice(slice: &[Self]) -> Self where Self: Clone + Default, { - let vec = slice.to_vec(); - Self::merge_vec(vec) + Self::merge_iter(slice.into_iter().cloned()) } } @@ -256,6 +251,33 @@ impl Merge for TetMesh { } } +impl Merge for PointCloud { + fn merge(&mut self, other: Self) -> &mut Self { + let self_num_vertices = self.num_vertices(); + let other_num_vertices = other.num_vertices(); + + // Deconstruct the other mesh explicitly since it will not be valid as soon as we start to + // canibalize its contents. + let PointCloud { + vertex_positions: mut other_vertex_positions, + vertex_attributes: other_vertex_attributes, + } = other; + + self.vertex_positions + .as_mut_vec() + .append(other_vertex_positions.as_mut_vec()); + + // Transfer attributes + merge_attribute_dicts( + &mut self.vertex_attributes, + self_num_vertices, + other_vertex_attributes, + other_num_vertices, + ); + self + } +} + impl TetMesh { /// Merge a iterator of meshes into a single distinct mesh. /// -- GitLab From a56316f7cc4feee081ee455fd3b3dffcd865c33d Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sat, 28 Nov 2020 16:28:21 -0800 Subject: [PATCH 07/19] Implemented Default for PointCloud --- src/mesh/pointcloud.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/mesh/pointcloud.rs b/src/mesh/pointcloud.rs index 33a4d14..c08396c 100644 --- a/src/mesh/pointcloud.rs +++ b/src/mesh/pointcloud.rs @@ -47,6 +47,16 @@ impl PointCloud { } } +impl Default for PointCloud { + /// Produce an empty `PointCloud`. + /// + /// This is not particularly useful on its own, however it can be + /// used as a null case for various mesh algorithms. + fn default() -> Self { + PointCloud::new(vec![]) + } +} + impl NumVertices for PointCloud { #[inline] fn num_vertices(&self) -> usize { -- GitLab From 2da90b53937962fd65e84beb05997989e106e080 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sat, 28 Nov 2020 16:28:52 -0800 Subject: [PATCH 08/19] Doc formatting for TetMesh::default --- src/mesh/tetmesh.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/mesh/tetmesh.rs b/src/mesh/tetmesh.rs index 60534d2..1b7021b 100644 --- a/src/mesh/tetmesh.rs +++ b/src/mesh/tetmesh.rs @@ -230,7 +230,9 @@ impl TetMesh { } impl Default for TetMesh { - /// Produce an empty `TetMesh`. This is not particularly useful on its own, however it can be + /// Produce an empty `TetMesh`. + /// + /// This is not particularly useful on its own, however it can be /// used as a null case for various mesh algorithms. fn default() -> Self { TetMesh::new(vec![], vec![]) -- GitLab From b27283ed335afb05e4d579531873970abc1ecc29 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sat, 28 Nov 2020 16:29:32 -0800 Subject: [PATCH 09/19] Upgraded vtkio to v0.4 which supports xml formats --- Cargo.toml | 3 +- src/io.rs | 10 +- src/io/vtk.rs | 931 ++++++++++++++++++++++++-------------------------- 3 files changed, 453 insertions(+), 491 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ccd6e00..0c8ce5f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,12 +20,13 @@ dync = { version = "0.4", features = ['numeric'] } reinterpret = "0.2" bytemuck = "1.2" autodiff = { version = "0.2", features = ["cgmath"] } -vtkio = { version = "0.3", optional = true } +vtkio = { path = "../vtkio", version = "0.4", optional = true } serde = { version = "1.0", features = ["derive"], optional = true } chashmap = { version = "2", optional = true } rayon = { version = "1", optional = true } hashbrown = { version = "0.8" } math = { package = "cgmath", git = "https://github.com/elrnv/cgmath.git", version = "0.17" } +flatk = "0.3" [dependencies.objio] package = "obj" diff --git a/src/io.rs b/src/io.rs index 02e1598..aedf911 100644 --- a/src/io.rs +++ b/src/io.rs @@ -6,11 +6,13 @@ pub use vtkio::{ use crate::mesh::attrib; use crate::mesh::{PointCloud, PolyMesh, TetMesh}; -use crate::Real; pub mod obj; pub mod vtk; +pub trait Real: vtkio::model::Scalar + crate::Real {} +impl Real for T where T: vtkio::model::Scalar + crate::Real {} + // These names are chosen to be rather short to reduce the const of comparisons. // Although code that relies on this is not idiomatic, it can sometimes be simpler. const UV_ATTRIB_NAME: &str = "uv"; @@ -248,6 +250,7 @@ pub enum Error { UnsupportedFileFormat, UnsupportedDataFormat, MeshTypeMismatch, + MissingMeshData, } impl std::error::Error for Error { @@ -258,9 +261,7 @@ impl std::error::Error for Error { Error::Attrib { .. } => { None // Implement when attrib::Error implements std::error::Error. } - Error::UnsupportedFileFormat => None, - Error::UnsupportedDataFormat => None, - Error::MeshTypeMismatch => None, + _ => None, } } } @@ -274,6 +275,7 @@ impl std::fmt::Display for Error { Error::UnsupportedFileFormat => write!(f, "Unsupported file format specified"), Error::UnsupportedDataFormat => write!(f, "Unsupported data format specified"), Error::MeshTypeMismatch => write!(f, "Mesh type doesn't match expected type"), + Error::MissingMeshData => write!(f, "Missing mesh data"), } } } diff --git a/src/io/vtk.rs b/src/io/vtk.rs index 4522ccf..c71a3df 100644 --- a/src/io/vtk.rs +++ b/src/io/vtk.rs @@ -1,14 +1,15 @@ -use vtkio::call_numeric_buffer_fn; +use vtkio::model::Scalar as VtkScalar; +use crate::algo::merge::Merge; use crate::mesh::attrib::{Attrib, AttribDict, AttribIndex, Attribute, AttributeValue}; use crate::mesh::topology::*; use crate::mesh::{PointCloud, PolyMesh, TetMesh, VertexPositions}; -use crate::Real; +use super::Real; use super::{NORMAL_ATTRIB_NAME, UV_ATTRIB_NAME}; pub use vtkio::Error as VtkError; -pub use vtkio::{model, parser, writer, IOBuffer}; +pub use vtkio::{match_buf, model, parser, writer, IOBuffer}; pub use super::Error; @@ -63,126 +64,134 @@ pub fn convert_polymesh_to_vtk_format(mesh: &PolyMesh) -> Result( +pub fn convert_vtk_dataset_to_polymesh( dataset: model::DataSet, ) -> Result, Error> { match dataset { - model::DataSet::UnstructuredGrid { - points, - cells, - cell_types, - data, - } => { - // Get points. - let pt_coords: Vec = points.cast_into_vec(); - let mut pts = Vec::with_capacity(pt_coords.len() / 3); - for coords in pt_coords.chunks(3) { - pts.push([coords[0], coords[1], coords[2]]); - } - - // We use this counter to determine if the vtk file should actually be parsed as a - // different type of mesh. - let mut count_non_polymesh_faces = 0; - - let mut faces = Vec::new(); - let mut i = 0usize; - for c in 0..cells.num_cells { - if i >= cells.vertices.len() { - break; - } - - let n = cells.vertices[i] as usize; - // Skip geometry we can't represent as a polygon mesh. - if cell_types[c as usize] == model::CellType::Triangle { - if n != 3 { - count_non_polymesh_faces += 1; - i += n + 1; - continue; - } - } else if cell_types[c as usize] == model::CellType::Quad { - if n != 4 { - count_non_polymesh_faces += 1; - i += n + 1; - continue; + model::DataSet::UnstructuredGrid { pieces, .. } => { + Ok(PolyMesh::merge_iter(pieces.into_iter().filter_map( + |piece| { + let model::UnstructuredGridPiece { + points, + cells: model::Cells { cell_verts, types }, + data, + } = piece.load_piece_data().ok()?; + // Get points. + let pt_coords: Vec = points.cast_into()?; // None is returned in case of overflow. + let mut pts = Vec::with_capacity(pt_coords.len() / 3); + for coords in pt_coords.chunks(3) { + pts.push([coords[0], coords[1], coords[2]]); } - } else if cell_types[c as usize] != model::CellType::Polygon { - i += n + 1; - count_non_polymesh_faces += 1; - continue; - } - for _ in 0..=n { - faces.push(cells.vertices[i] as usize); - i += 1; - } - } + // We use this counter to determine if the vtk file should actually be parsed as a + // different type of mesh. + let mut count_non_polymesh_faces = 0; + + let num_cells = cell_verts.num_cells(); + let (connectivity, offsets) = cell_verts.into_xml(); + + let mut begin = 0usize; + let mut faces = Vec::new(); + for c in 0..num_cells { + let end = offsets[c] as usize; + let n = end - begin; + + // Skip geometry we can't represent as a polygon mesh. + if (types[c] == model::CellType::Triangle && n != 3) + || (types[c] == model::CellType::Quad && n != 4) + || (types[c] != model::CellType::Polygon) + { + count_non_polymesh_faces += 1; + begin = end; + continue; + } + + faces.push(n); + for i in begin..end { + faces.push(connectivity[i] as usize); + } + + begin = end; + } - if faces.is_empty() && count_non_polymesh_faces > 0 { - // This should be parsed as another type of mesh. - // We opt to not interpret it as a point cloud mesh, which would be the case if - // there were no other types of faces. - return Err(Error::MeshTypeMismatch); - } // Otherwise we output what we have found, whether or not some other faces were ignored. + if faces.is_empty() && count_non_polymesh_faces > 0 { + // This should be parsed as another type of mesh. + // We opt to not interpret it as a point cloud mesh, which would be the case if + // there were no other types of faces. + return None; + } // Otherwise we output what we have found, whether or not some other faces were ignored. - let mut polymesh = PolyMesh::new(pts, &faces); + let mut polymesh = PolyMesh::new(pts, &faces); - // Populate point attributes. - vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut polymesh); + // Populate point attributes. + vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut polymesh); - // Populate face attributes. - let remainder = vtk_to_mesh_attrib::<_, FaceIndex>(data.cell, &mut polymesh); + // Populate face attributes. + let remainder = vtk_to_mesh_attrib::<_, FaceIndex>(data.cell, &mut polymesh); - // Populate face vertex attributes. - vtk_field_to_mesh_attrib(remainder, &mut polymesh); + // Populate face vertex attributes. + vtk_field_to_mesh_attrib(remainder, &mut polymesh); - Ok(polymesh) + Some(polymesh) + }, + ))) } - model::DataSet::PolyData { points, topo, data } => { - // Get points. - let pt_coords: Vec = points.cast_into_vec(); - let mut pts = Vec::with_capacity(pt_coords.len() / 3); - for coords in pt_coords.chunks(3) { - pts.push([coords[0], coords[1], coords[2]]); - } + model::DataSet::PolyData { pieces, .. } => { + Ok(PolyMesh::merge_iter(pieces.into_iter().filter_map( + |piece| { + let model::PolyDataPiece { points, topo, data } = + piece.load_piece_data().ok()?; + // Get points. + let pt_coords: Vec = points.cast_into()?; // None is returned in case of overflow. + let mut pts = Vec::with_capacity(pt_coords.len() / 3); + for coords in pt_coords.chunks(3) { + pts.push([coords[0], coords[1], coords[2]]); + } - let mut faces = Vec::new(); - for poly in topo.into_iter() { - match poly { - model::PolyDataTopology::Polygons(cells) - | model::PolyDataTopology::TriangleStrips(cells) => { - faces.extend(cells.vertices.into_iter().map(|x| x as usize)); + let mut faces = Vec::new(); + for poly in topo.into_iter() { + match poly { + model::PolyDataTopology::Polygons(cells) + | model::PolyDataTopology::TriangleStrips(cells) => { + let (_, vertices) = cells.into_legacy(); + faces.extend(vertices.into_iter().map(|x| x as usize)); + } + _ => return None, + } } - _ => return Err(Error::MeshTypeMismatch), - } - } - let mut polymesh = PolyMesh::new(pts, &faces); + let mut polymesh = PolyMesh::new(pts, &faces); - // Populate point attributes. - vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut polymesh); + // Populate point attributes. + vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut polymesh); - // Populate face attributes - let remainder = vtk_to_mesh_attrib::<_, FaceIndex>(data.cell, &mut polymesh); + // Populate face attributes + let remainder = vtk_to_mesh_attrib::<_, FaceIndex>(data.cell, &mut polymesh); - // Populate face vertex attributes. - vtk_field_to_mesh_attrib(remainder, &mut polymesh); + // Populate face vertex attributes. + vtk_field_to_mesh_attrib(remainder, &mut polymesh); - Ok(polymesh) + Some(polymesh) + }, + ))) } _ => Err(Error::UnsupportedDataFormat), } @@ -217,18 +226,21 @@ pub fn convert_tetmesh_to_vtk_format(tetmesh: &TetMesh) -> Result( dataset: model::DataSet, ) -> Result, Error> { match dataset { - model::DataSet::UnstructuredGrid { - points, - cells, - cell_types, - data, - } => { - // Get points. - let pt_coords: Vec = points.cast_into_vec(); - let mut pts = Vec::with_capacity(pt_coords.len() / 3); - for coords in pt_coords.chunks(3) { - pts.push([coords[0], coords[1], coords[2]]); - } - - // Get contiguous indices (4 vertex indices for each tet). - let mut indices = Vec::new(); - let mut i = 0usize; - for c in 0..cells.num_cells { - if i >= cells.vertices.len() { - break; - } - - let n = cells.vertices[i] as usize; - - if n != 4 || cell_types[c as usize] != model::CellType::Tetra { - i += n; - continue; - } + model::DataSet::UnstructuredGrid { pieces, .. } => { + Ok(TetMesh::merge_iter(pieces.into_iter().filter_map( + |piece| { + let model::UnstructuredGridPiece { + points, + cells: model::Cells { cell_verts, types }, + data, + } = piece.load_piece_data().ok()?; + // Get points. + let pt_coords: Vec = points.cast_into()?; + let mut pts = Vec::with_capacity(pt_coords.len() / 3); + for coords in pt_coords.chunks(3) { + pts.push([coords[0], coords[1], coords[2]]); + } - indices.push([ - cells.vertices[i + 1] as usize, - cells.vertices[i + 2] as usize, - cells.vertices[i + 3] as usize, - cells.vertices[i + 4] as usize, - ]); - i += 5; - } + let num_cells = cell_verts.num_cells(); + let (connectivity, offsets) = cell_verts.into_xml(); + + // Get contiguous indices (4 vertex indices for each tet). + let mut begin = 0usize; + let mut indices = Vec::new(); + for c in 0..num_cells { + let end = offsets[c] as usize; + let n = end - begin; + + if n != 4 || types[c] != model::CellType::Tetra { + // Not a tetrahedron, skip it. + begin = end; + continue; + } + + indices.push([ + connectivity[begin + 0] as usize, + connectivity[begin + 1] as usize, + connectivity[begin + 2] as usize, + connectivity[begin + 3] as usize, + ]); + begin = end; + } - let mut tetmesh = TetMesh::new(pts, indices); + let mut tetmesh = TetMesh::new(pts, indices); - // Don't bother transferring attributes if there are no vertices or cells. - // This supresses some needless size mismatch warnings when the dataset has an - // unstructuredgrid representing something other than a tetmesh. + // Don't bother transferring attributes if there are no vertices or cells. + // This supresses some needless size mismatch warnings when the dataset has an + // unstructuredgrid representing something other than a tetmesh. - if tetmesh.num_vertices() > 0 { - // Populate point attributes. - vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut tetmesh); - } + if tetmesh.num_vertices() > 0 { + // Populate point attributes. + vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut tetmesh); + } - if tetmesh.num_cells() > 0 { - // Populate tet attributes - vtk_to_mesh_attrib::<_, CellIndex>(data.cell, &mut tetmesh); - } + if tetmesh.num_cells() > 0 { + // Populate tet attributes + vtk_to_mesh_attrib::<_, CellIndex>(data.cell, &mut tetmesh); + } - Ok(tetmesh) + Some(tetmesh) + }, + ))) } _ => Err(Error::UnsupportedDataFormat), } @@ -314,11 +331,12 @@ pub fn convert_pointcloud_to_vtk_format( Ok(model::Vtk { version: model::Version::new((4, 2)), title: String::from("Point Cloud"), - data: model::DataSet::PolyData { - points: points.into(), + byte_order: model::ByteOrder::BigEndian, + data: model::DataSet::inline(model::PolyDataPiece { + points: IOBuffer::new(points), topo: vec![ // A single VERTICES entry containing all points - model::PolyDataTopology::Vertices(model::Cells { + model::PolyDataTopology::Vertices(model::VertexNumbers::Legacy { num_cells: 1, vertices: std::iter::once(num_verts) .chain(0..num_verts) @@ -329,100 +347,120 @@ pub fn convert_pointcloud_to_vtk_format( point: point_attribs, cell: Vec::new(), }, - }, + }), }) } pub fn convert_vtk_dataset_to_pointcloud( dataset: model::DataSet, ) -> Result, Error> { + let mut pts = Vec::new(); + let mut vertices = Vec::new(); match dataset { - model::DataSet::UnstructuredGrid { - points, - cells, - cell_types, - data, - } => { - // Get points. - let pt_coords: Vec = points.cast_into_vec(); - let mut pts = Vec::with_capacity(pt_coords.len() / 3); - for coords in pt_coords.chunks(3) { - pts.push([coords[0], coords[1], coords[2]]); - } - - // We use this counter to determine if the vtk file should actually be parsed as a - // different type of mesh. - let mut count_non_vertex_cells = 0; - - let mut vertices = Vec::new(); - let mut i = 0usize; - for c in 0..cells.num_cells { - if i >= cells.vertices.len() { - break; - } - - let n = cells.vertices[i] as usize; - // Skip geometry we can't represent as a point cloud. - if cell_types[c as usize] == model::CellType::Vertex { - if n != 1 { - i += n + 1; - count_non_vertex_cells += 1; - continue; + model::DataSet::UnstructuredGrid { pieces, .. } => { + Ok(PointCloud::merge_iter(pieces.into_iter().filter_map( + |piece| { + let model::UnstructuredGridPiece { + points, + cells: model::Cells { cell_verts, types }, + data, + } = piece.load_piece_data().ok()?; + + pts.clear(); + vertices.clear(); + + // Get points. + let pt_coords: Vec = points.cast_into()?; + pts.reserve(pt_coords.len() / 3); + for coords in pt_coords.chunks(3) { + pts.push([coords[0], coords[1], coords[2]]); } - } else if cell_types[c as usize] != model::CellType::PolyVertex { - i += n + 1; - count_non_vertex_cells += 1; - continue; - } - - i += 1; // Skipping the size of the cell - for _ in 0..=n { - vertices.push(cells.vertices[i] as usize); - i += 1; - } - } - - if vertices.is_empty() && count_non_vertex_cells > 0 { - // This should be parsed as another type of mesh. - // We opt to not interpret it as a point cloud mesh, which would be the case if - // there were no other types of faces. - return Err(Error::MeshTypeMismatch); - } // Otherwise we output what we have found, whether or not some other faces were ignored. - - let referenced_points: Vec<_> = vertices.iter().map(|&vtx| pts[vtx]).collect(); - let mut pointcloud = PointCloud::new(referenced_points); - - // Populate point attributes. - vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut pointcloud); + // We use this counter to determine if the vtk file should actually be parsed as a + // different type of mesh. + let mut count_non_vertex_cells = 0; + + let (num_cells, cell_vertices) = cell_verts.into_legacy(); + + let mut i = 0usize; + for c in 0..num_cells { + if i >= cell_vertices.len() { + break; + } + + let n = cell_vertices[i] as usize; + // Skip geometry we can't represent as a point cloud. + if types[c as usize] == model::CellType::Vertex { + if n != 1 { + i += n + 1; + count_non_vertex_cells += 1; + continue; + } + } else if types[c as usize] != model::CellType::PolyVertex { + i += n + 1; + count_non_vertex_cells += 1; + continue; + } + + i += 1; // Skipping the size of the cell + + for _ in 0..=n { + vertices.push(cell_vertices[i] as usize); + i += 1; + } + } - Ok(pointcloud) + if vertices.is_empty() && count_non_vertex_cells > 0 { + // This should be parsed as another type of mesh. + // We opt to not interpret it as a point cloud mesh, which would be the case if + // there were no other types of faces. + return None; + } // Otherwise we output what we have found, whether or not some other faces were ignored. + let referenced_points: Vec<_> = vertices.iter().map(|&vtx| pts[vtx]).collect(); + let mut pointcloud = PointCloud::new(referenced_points); + + // Populate point attributes. + vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut pointcloud); + + Some(pointcloud) + }, + ))) } - model::DataSet::PolyData { points, topo, data } => { - // Get points. - let pt_coords: Vec = points.cast_into_vec(); - let mut pts = Vec::with_capacity(pt_coords.len() / 3); - for coords in pt_coords.chunks(3) { - pts.push([coords[0], coords[1], coords[2]]); - } + model::DataSet::PolyData { pieces, .. } => { + Ok(PointCloud::merge_iter(pieces.into_iter().filter_map( + |piece| { + let model::PolyDataPiece { points, topo, data } = + piece.load_piece_data().ok()?; + pts.clear(); + vertices.clear(); + + // Get points. + let pt_coords: Vec = points.cast_into()?; + pts.reserve(pt_coords.len() / 3); + for coords in pt_coords.chunks(3) { + pts.push([coords[0], coords[1], coords[2]]); + } - let mut vertices = Vec::new(); - for cell in topo.into_iter() { - match cell { - model::PolyDataTopology::Vertices(cells) => { - vertices.extend(cells.vertices.into_iter().skip(1).map(|x| x as usize)); + for cell in topo.into_iter() { + match cell { + model::PolyDataTopology::Vertices(cells) => { + let (_, cell_vertices) = cells.into_legacy(); + vertices + .extend(cell_vertices.into_iter().skip(1).map(|x| x as usize)); + } + _ => return None, + } } - _ => return Err(Error::MeshTypeMismatch), - } - } - let referenced_points: Vec<_> = vertices.iter().map(|&vtx| pts[vtx]).collect(); - let mut ptcloud = PointCloud::new(referenced_points); + let referenced_points: Vec<_> = vertices.iter().map(|&vtx| pts[vtx]).collect(); + let mut ptcloud = PointCloud::new(referenced_points); - // Populate point attributes. - vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut ptcloud); + // Populate point attributes. + vtk_to_mesh_attrib::<_, VertexIndex>(data.point, &mut ptcloud); - Ok(ptcloud) + Some(ptcloud) + }, + ))) } _ => Err(Error::UnsupportedDataFormat), } @@ -444,20 +482,20 @@ fn flatten33(vec: Vec<[[T; 3]; 3]>) -> Vec { } /// Transfer a `uv` attribute from this attribute to the `vtk` model. -fn into_vtk_attrib_uv(attrib: &Attribute) -> Option { +fn into_vtk_attrib_uv(name: &str, attrib: &Attribute) -> Option { // Try 2d texture coordinates let mut maybe_iobuf = attrib .direct_clone_into_vec::<[f32; 2]>() - .map(|y| flatten2(y).into()); + .map(|y| IOBuffer::from(flatten2(y))); if maybe_iobuf.is_err() { // try with f64 maybe_iobuf = attrib .direct_clone_into_vec::<[f64; 2]>() - .map(|y| flatten2(y).into()); + .map(|y| IOBuffer::from(flatten2(y))); } if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::TextureCoordinates { dim: 2, data }); + return Some(model::Attribute::tcoords(name, 2).with_data(data)); } // Try 3d texture coordinates @@ -472,13 +510,13 @@ fn into_vtk_attrib_uv(attrib: &Attribute) -> Option { } if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::TextureCoordinates { dim: 3, data }); + return Some(model::Attribute::tcoords(name, 3).with_data(data)); } None } -fn mesh_to_vtk_attrib(attrib: &Attribute) -> Option { +fn mesh_to_vtk_attrib_impl(name: &str, attrib: &Attribute) -> Option { // Try to match a scalar field. let maybe_iobuf = attrib .direct_clone_into_vec::() @@ -494,11 +532,14 @@ fn mesh_to_vtk_attrib(attrib: &Attribute) -> Option { .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())); if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::Scalars { - num_comp: 1, - lookup_table: None, + return Some(model::Attribute::DataArray(model::DataArray { + name: name.to_string(), + elem: model::ElementType::Scalars { + num_comp: 1, + lookup_table: None, + }, data, - }); + })); } // Try to match a vector field. @@ -552,11 +593,14 @@ fn mesh_to_vtk_attrib(attrib: &Attribute) -> Option { }); if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::Scalars { - num_comp: 2, - lookup_table: None, + return Some(model::Attribute::DataArray(model::DataArray { + name: name.to_string(), + elem: model::ElementType::Scalars { + num_comp: 2, + lookup_table: None, + }, data, - }); + })); } // Try to match a vector field. @@ -610,7 +654,11 @@ fn mesh_to_vtk_attrib(attrib: &Attribute) -> Option { }); if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::Vectors { data }); + return Some(model::Attribute::DataArray(model::DataArray { + name: name.to_string(), + elem: model::ElementType::Vectors, + data, + })); } // Try to match a vector field. @@ -664,15 +712,18 @@ fn mesh_to_vtk_attrib(attrib: &Attribute) -> Option { }); if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::Scalars { - num_comp: 4, - lookup_table: None, + return Some(model::Attribute::DataArray(model::DataArray { + name: name.to_string(), + elem: model::ElementType::Scalars { + num_comp: 4, + lookup_table: None, + }, data, - }); + })); } // Try to match a tensor field. - let maybe_iobuf = attrib + let maybe_iobuf: Result = attrib .direct_clone_into_vec::<[[u8; 3]; 3]>() .map(|y| flatten33(y).into()) .or_else(|_| { @@ -722,23 +773,21 @@ fn mesh_to_vtk_attrib(attrib: &Attribute) -> Option { }); if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::Tensors { data }); + return Some(model::Attribute::tensors(name).with_data(data)); } None } -fn mesh_to_vtk_named_attrib( - name: &str, - attrib: &Attribute, -) -> Option<(String, model::Attribute)> { +fn mesh_to_vtk_named_attrib(name: &str, attrib: &Attribute) -> Option { // Try to match special attributes if name == UV_ATTRIB_NAME { - if let Some(attrib) = into_vtk_attrib_uv(attrib) { - return Some((name.to_string(), attrib)); + let attrib = into_vtk_attrib_uv(name, attrib); + if attrib.is_some() { + return attrib; } } else if name == NORMAL_ATTRIB_NAME { - let mut maybe_iobuf = attrib + let mut maybe_iobuf: Result = attrib .direct_clone_into_vec::<[f32; 3]>() .map(|y| flatten3(y).into()); if maybe_iobuf.is_err() { @@ -749,12 +798,12 @@ fn mesh_to_vtk_named_attrib( } if let Ok(data) = maybe_iobuf { - return Some((name.to_string(), model::Attribute::Normals { data })); + return Some(model::Attribute::normals(name).with_data(data)); } } // Match with other vtk attributes. - mesh_to_vtk_attrib(attrib).map(|attrib| (name.to_string(), attrib)) + mesh_to_vtk_attrib_impl(name, attrib) } /// Transfer attribute data from `attrib_dict` to a vtk FIELD attribute. This is useful for storing @@ -763,7 +812,7 @@ fn mesh_to_vtk_named_attrib( fn mesh_to_vtk_named_field_attribs( field_data_name: &str, attrib_dict: &AttribDict, -) -> Option<(String, model::Attribute)> { +) -> Option { let data_array: Vec<_> = attrib_dict .iter() .filter_map(|(name, attrib)| { @@ -784,7 +833,7 @@ fn mesh_to_vtk_named_field_attribs( if let Ok(data) = maybe_iobuf { return Some(model::FieldArray { name: name.to_string(), - num_comp: 1, + elem: 1, data, }); } @@ -842,7 +891,7 @@ fn mesh_to_vtk_named_field_attribs( if let Ok(data) = maybe_iobuf { return Some(model::FieldArray { name: name.to_string(), - num_comp: 2, + elem: 2, data, }); } @@ -900,7 +949,7 @@ fn mesh_to_vtk_named_field_attribs( if let Ok(data) = maybe_iobuf { return Some(model::FieldArray { name: name.to_string(), - num_comp: 3, + elem: 3, data, }); } @@ -910,20 +959,13 @@ fn mesh_to_vtk_named_field_attribs( .collect(); if !data_array.is_empty() { - Some(( - field_data_name.to_string(), - model::Attribute::Field { data_array }, - )) + Some(model::Attribute::field(field_data_name).with_field_data(data_array)) } else { None } } -fn add_2d_array_attrib<'a, T, M, I>( - buf: IOBuffer, - name: &'a str, - tetmesh: &mut M, -) -> Result<(), Error> +fn add_2d_array_attrib<'a, T, M, I>(buf: &[T], name: &'a str, tetmesh: &mut M) -> Result<(), Error> where T: AttributeValue + Copy + Default, I: AttribIndex, @@ -933,43 +975,33 @@ where let mut vecs = Vec::with_capacity(buf.len() / n); let mut count_comp = 0; let mut cur = [[T::default(); 3]; 3]; - if let Some(iter) = buf.iter::() { - for &val in iter { - cur[count_comp / 3][count_comp % 3] = val; // row-major -> col-major - count_comp += 1; - if count_comp == n { - vecs.push(cur); - count_comp = 0; - } + for &val in buf.iter() { + cur[count_comp / 3][count_comp % 3] = val; // row-major -> col-major + count_comp += 1; + if count_comp == n { + vecs.push(cur); + count_comp = 0; } - tetmesh.add_attrib_data::<_, I>(name, vecs)?; - Ok(()) - } else { - // This error should mean that there is a bug in the buffer crate. - Err(Error::UnsupportedDataFormat) } + tetmesh.add_attrib_data::<_, I>(name, vecs)?; + Ok(()) } // Simple attrib -fn add_array_attrib<'a, T, M, I>(buf: IOBuffer, name: &'a str, mesh: &mut M) -> Result<(), Error> +fn add_array_attrib<'a, T, M, I>(buf: &[T], name: &'a str, mesh: &mut M) -> Result<(), Error> where T: AttributeValue + Default, I: AttribIndex, M: Attrib, { - if let Some(data_vec) = buf.into_vec::() { - mesh.add_attrib_data::<_, I>(name, data_vec)?; - Ok(()) - } else { - // This error should mean that there is a bug in the buffer crate. - Err(Error::UnsupportedDataFormat) - } + mesh.add_attrib_data::<_, I>(name, buf.to_vec())?; + Ok(()) } macro_rules! impl_add_array_attrib { ($fn_name:ident, $size:expr) => { fn $fn_name<'a, T, M, I: AttribIndex>( - buf: IOBuffer, + buf: &[T], name: &'a str, mesh: &mut M, ) -> Result<(), Error> @@ -981,38 +1013,31 @@ macro_rules! impl_add_array_attrib { let mut vecs = Vec::with_capacity(buf.len() / n); let mut count_comp = 0; let mut cur = [T::default(); $size]; - if let Some(iter) = buf.iter::() { - for &val in iter { - cur[count_comp] = val; - count_comp += 1; - if count_comp == n { - vecs.push(cur); - count_comp = 0; - } + for &val in buf.iter() { + cur[count_comp] = val; + count_comp += 1; + if count_comp == n { + vecs.push(cur); + count_comp = 0; } - mesh.add_attrib_data::<_, I>(name, vecs)?; - Ok(()) - } else { - // This error should mean that there is a bug in the buffer crate. - Err(Error::UnsupportedDataFormat) } + mesh.add_attrib_data::<_, I>(name, vecs)?; + Ok(()) } }; } -impl_add_array_attrib!(add_array_attrib1, 1); impl_add_array_attrib!(add_array_attrib2, 2); impl_add_array_attrib!(add_array_attrib3, 3); impl_add_array_attrib!(add_array_attrib4, 4); -/// Add vtk attributes to the given mesh, and return any unprocessed attributes that can be -/// processed further. In other words, if the reason an attribute is not processed is because it +/// Adds VTK attributes to the given mesh, and returns any unprocessed attributes that can be +/// processed further. +/// +/// If the reason an attribute is not processed is because it /// has an unsupported type, we leave it out of the remainder. #[allow(clippy::cognitive_complexity)] -fn vtk_to_mesh_attrib( - attribs: Vec<(String, model::Attribute)>, - mesh: &mut M, -) -> Vec<(String, model::Attribute)> +fn vtk_to_mesh_attrib(attribs: Vec, mesh: &mut M) -> Vec where M: Attrib, I: AttribIndex, @@ -1020,95 +1045,74 @@ where // We populate another vector instead of using filter_map to allow for errors to propagate. let mut remainder = Vec::with_capacity(attribs.len()); - for (name, attrib) in attribs { + for attrib in attribs { match attrib { - model::Attribute::Scalars { num_comp, data, .. } => { - if num_comp == 1 { - // Note that only the first found attribute with the same name and location - // will be inserted. - call_numeric_buffer_fn!( add_array_attrib::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 2 { - call_numeric_buffer_fn!( add_array_attrib2::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 3 { - call_numeric_buffer_fn!( add_array_attrib3::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 4 { - call_numeric_buffer_fn!( add_array_attrib4::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else { - // Other values for num_comp are not supported by the vtk standard - // at the time of this writing. - continue; + model::Attribute::DataArray(model::DataArray { name, elem, data }) => { + match elem { + model::ElementType::Scalars { num_comp: dim, .. } | model::ElementType::TCoords(dim) => { + if dim == 1 { + // Note that only the first found attribute with the same name and location + // will be inserted. + match_buf!( &data, v => add_array_attrib::<_,M,I>(&v, name.as_str(), mesh) ) + } else if dim == 2 { + match_buf!( &data, v => add_array_attrib2::<_,M,I>(&v, name.as_str(), mesh) ) + } else if dim == 3 { + match_buf!( &data, v => add_array_attrib3::<_,M,I>(&v, name.as_str(), mesh) ) + } else if dim == 4 { + match_buf!( &data, v => add_array_attrib4::<_,M,I>(&v, name.as_str(), mesh) ) + } else { + // Other values for dim are not supported by the vtk standard + // at the time of this writing. + continue; + } + } + model::ElementType::Vectors | model::ElementType::Normals => { + match_buf!( &data, v => add_array_attrib3::<_,M,I>(&v, name.as_str(), mesh) ) + } + model::ElementType::Tensors => { + match_buf!( &data, v => add_2d_array_attrib::<_,M,I>(&v, name.as_str(), mesh) ) + } + _ => { continue; } // LookupTable and ColorScalars attributes ignored } } - model::Attribute::Vectors { data } | model::Attribute::Normals { data } => { - call_numeric_buffer_fn!( add_array_attrib3::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } - model::Attribute::TextureCoordinates { dim, data } => { - if dim == 1 { - call_numeric_buffer_fn!( add_array_attrib1::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if dim == 2 { - call_numeric_buffer_fn!( add_array_attrib2::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if dim == 3 { - call_numeric_buffer_fn!( add_array_attrib3::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if dim == 4 { - call_numeric_buffer_fn!( add_array_attrib4::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else { continue; } - } - model::Attribute::Tensors { data } => { - call_numeric_buffer_fn!( add_2d_array_attrib::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } - model::Attribute::Field { data_array } => { + model::Attribute::Field { data_array, name } => { if special_field_attributes().contains(&name.as_str()) { - remainder.push((name, model::Attribute::Field { data_array })); + remainder.push(model::Attribute::Field { name, data_array }); continue; } for model::FieldArray { name, - num_comp, + elem, data, } in data_array { // Field attributes dont necessarily have the right size. We check it here. - if num_comp == 1 { + if elem == 1 { // Note that only the first found attribute with the same name and location // will be inserted. - call_numeric_buffer_fn!( add_array_attrib::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 2 { - call_numeric_buffer_fn!( add_array_attrib2::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 3 { - call_numeric_buffer_fn!( add_array_attrib3::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 4 { - call_numeric_buffer_fn!( add_array_attrib4::<_, M, I>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) + match_buf!( &data, v => add_array_attrib::<_,M,I>(&v, name.as_str(), mesh) ) + } else if elem == 2 { + match_buf!( &data, v => add_array_attrib2::<_,M,I>(&v, name.as_str(), mesh) ) + } else if elem == 3 { + match_buf!( &data, v => add_array_attrib3::<_,M,I>(&v, name.as_str(), mesh) ) + } else if elem == 4 { + match_buf!( &data, v => add_array_attrib4::<_,M,I>(&v, name.as_str(), mesh) ) } else { continue; } .unwrap_or_else(|err| eprintln!("WARNING: Field attribute transfer error: {}", err)); } continue; } - _ => { continue; } // LookupTable and ColorScalars attributes ignored } // Attribute transfer might fail, but we shouldn't stop trying the rest of the attributes. // Simply issue a warning and continue; .unwrap_or_else(|err| { #[cfg(feature = "unstable")] { - eprintln!("WARNING: Attribute transfer error for \"{}\" at {}: {}", name, std::intrinsics::type_name::(), err) + eprintln!("WARNING: Attribute transfer error at {}: {}", std::intrinsics::type_name::(), err) } #[cfg(not(feature = "unstable"))] { - eprintln!("WARNING: Attribute transfer error for \"{}\": {}", name, err) + eprintln!("WARNING: Attribute transfer error: {}", err) } }) } @@ -1117,36 +1121,27 @@ where /// Populate face vertex attributes from field attributes. #[allow(clippy::cognitive_complexity)] -fn vtk_field_to_mesh_attrib(attribs: Vec<(String, model::Attribute)>, mesh: &mut M) +fn vtk_field_to_mesh_attrib(attribs: Vec, mesh: &mut M) where M: Attrib + FaceVertex, FaceVertexIndex: AttribIndex, { - for (name, attrib) in attribs { - if !special_field_attributes().contains(&name.as_str()) { + for attrib in attribs { + if !special_field_attributes().contains(&attrib.name()) { continue; } - if let model::Attribute::Field { data_array } = attrib { - for model::FieldArray { - name, - num_comp, - data, - } in data_array - { - if num_comp == 1 { + if let model::Attribute::Field { data_array, .. } = attrib { + for model::FieldArray { name, elem, data } in data_array { + if elem == 1 { // Note that only the first found attribute with the same name and location // will be inserted. - call_numeric_buffer_fn!( add_array_attrib::<_, M, FaceVertexIndex>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 2 { - call_numeric_buffer_fn!( add_array_attrib2::<_, M, FaceVertexIndex>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 3 { - call_numeric_buffer_fn!( add_array_attrib3::<_, M, FaceVertexIndex>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) - } else if num_comp == 4 { - call_numeric_buffer_fn!( add_array_attrib4::<_, M, FaceVertexIndex>(data, name.as_str(), mesh) - or { Err(Error::UnsupportedDataFormat) } ) + match_buf!( &data, v => add_array_attrib::<_, _, FaceVertexIndex>(&v, name.as_str(), mesh) ) + } else if elem == 2 { + match_buf!( &data, v => add_array_attrib2::<_, _, FaceVertexIndex>(&v, name.as_str(), mesh) ) + } else if elem == 3 { + match_buf!( &data, v => add_array_attrib3::<_, _, FaceVertexIndex>(&v, name.as_str(), mesh) ) + } else if elem == 4 { + match_buf!( &data, v => add_array_attrib4::<_, _, FaceVertexIndex>(&v, name.as_str(), mesh) ) } else { continue; } .unwrap_or_else(|err| eprintln!("WARNING: Face Vertex Attribute transfer error for \"{}\": {}", name, err)) } @@ -1160,60 +1155,40 @@ mod tests { #[test] fn basic_test() { - let vtk = model::DataSet::UnstructuredGrid { + let vtk = model::DataSet::inline(model::UnstructuredGridPiece { points: vec![ 2., 1., 0., 0., 0., 1., 0., 1., 1., 1., 1., 1., 2., 1., 1., 0., 1., 2., 2., 1., 2., 1., 1., 4., 2., 1., 4., 1., 1., 5., 2., 1., 5., ] .into(), cells: model::Cells { - num_cells: 3, - vertices: vec![4, 1, 3, 2, 5, 4, 0, 4, 3, 6, 4, 9, 10, 8, 7], + cell_verts: model::VertexNumbers::Legacy { + num_cells: 3, + vertices: vec![4, 1, 3, 2, 5, 4, 0, 4, 3, 6, 4, 9, 10, 8, 7], + }, + types: vec![model::CellType::Tetra; 3], }, - cell_types: vec![ - model::CellType::Tetra, - model::CellType::Tetra, - model::CellType::Tetra, - ], data: model::Attributes { point: vec![ - ( - String::from("scalars"), - model::Attribute::Scalars { - num_comp: 1, - lookup_table: None, - data: vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0f32] - .into(), - }, - ), - ( - String::from("vectors"), - model::Attribute::Vectors { - data: vec![ - 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., - 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., - ] - .into(), - }, - ), - ( - String::from("tensors"), - model::Attribute::Tensors { - data: vec![ - 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., - 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., 1., - 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., - 1., 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., 1., 0., - 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., - 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., - ] - .into(), - }, - ), + model::Attribute::scalars("scalars", 1).with_data(vec![ + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0f32, + ]), + model::Attribute::vectors("vectors").with_data(vec![ + 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., + 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., + ]), + model::Attribute::tensors("tensors").with_data(vec![ + 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., + 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., 1., 0., 0., 1., 1., + 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., + 0., 2., 0., 0., 0., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., + 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., + 1., 0., 0., 1., + ]), ], cell: vec![], }, - }; + }); let vtktetmesh = convert_vtk_dataset_to_tetmesh(vtk.clone()).unwrap(); @@ -1284,14 +1259,14 @@ mod tests { assert_eq!(vtktetmesh, tetmesh); } - fn vtk_polymesh_example() -> (IOBuffer, model::Cells, model::Attributes) { + fn vtk_polymesh_example() -> (IOBuffer, model::VertexNumbers, model::Attributes) { let (buf, attrib) = vtk_pointcloud_example(); - let cells = model::Cells { + let cell_verts = model::VertexNumbers::Legacy { num_cells: 3, vertices: vec![4, 1, 3, 2, 5, 4, 0, 4, 3, 6, 4, 9, 10, 8, 7], }; - (buf, cells, attrib) + (buf, cell_verts, attrib) } /// Produce an example polymesh for testing that corresponds to the vtk model returned by @@ -1360,13 +1335,15 @@ mod tests { #[test] fn unstructured_data_polymesh_test() { - let (points, cells, data) = vtk_polymesh_example(); - let vtk = model::DataSet::UnstructuredGrid { + let (points, cell_verts, data) = vtk_polymesh_example(); + let vtk = model::DataSet::inline(model::UnstructuredGridPiece { points, - cells: cells.clone(), - cell_types: vec![model::CellType::Polygon; cells.num_cells as usize], + cells: model::Cells { + cell_verts: cell_verts.clone(), + types: vec![model::CellType::Polygon; cell_verts.num_cells() as usize], + }, data, - }; + }); let vtkpolymesh = convert_vtk_dataset_to_polymesh(vtk.clone()).unwrap(); let polymesh = polymesh_example(); @@ -1383,11 +1360,11 @@ mod tests { #[test] fn poly_data_polymesh_test() { let (points, cells, data) = vtk_polymesh_example(); - let vtk = model::DataSet::PolyData { + let vtk = model::DataSet::inline(model::PolyDataPiece { points, topo: vec![model::PolyDataTopology::Polygons(cells)], data, - }; + }); let vtkpolymesh = convert_vtk_dataset_to_polymesh(vtk.clone()).unwrap(); let polymesh = polymesh_example(); @@ -1410,39 +1387,21 @@ mod tests { .into(), model::Attributes { point: vec![ - ( - String::from("scalars"), - model::Attribute::Scalars { - num_comp: 1, - lookup_table: None, - data: vec![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0f32] - .into(), - }, - ), - ( - String::from("vectors"), - model::Attribute::Vectors { - data: vec![ - 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., - 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., - ] - .into(), - }, - ), - ( - String::from("tensors"), - model::Attribute::Tensors { - data: vec![ - 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., - 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., 1., - 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., - 1., 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., 1., 0., - 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., - 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., - ] - .into(), - }, - ), + model::Attribute::scalars("scalars", 1).with_data(vec![ + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0f32, + ]), + model::Attribute::vectors("vectors").with_data(vec![ + 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., + 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., + ]), + model::Attribute::tensors("tensors").with_data(vec![ + 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., + 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., 1., 0., 0., 1., 1., 0., 0., 1., 1., + 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., + 0., 2., 0., 0., 0., 1., 0., 0., 1., 1., 0., 0., 1., 1., 0., 0., 2., 0., 1., + 0., 0., 1., 1., 0., 0., 2., 0., 1., 0., 0., 1., 1., 0., 0., 2., 0., 0., 0., + 1., 0., 0., 1., + ]), ], cell: vec![], }, @@ -1516,18 +1475,18 @@ mod tests { fn poly_data_pointcloud_test() { let (points, data) = vtk_pointcloud_example(); let num_vertices = (points.len() / 3) as u32; - let cells = model::Cells { + let cell_verts = model::VertexNumbers::Legacy { num_cells: 1, vertices: std::iter::once(num_vertices) .chain(0..num_vertices) .collect(), }; - let vtk = model::DataSet::PolyData { + let vtk = model::DataSet::inline(model::PolyDataPiece { points, - topo: vec![model::PolyDataTopology::Vertices(cells)], + topo: vec![model::PolyDataTopology::Vertices(cell_verts)], data, - }; + }); let vtkpointcloud = convert_vtk_dataset_to_pointcloud(vtk.clone()).unwrap(); let pointcloud = pointcloud_example(); -- GitLab From 78e6b17c99e28ee737ed806f505e1c835cbbd534 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sun, 29 Nov 2020 10:13:11 -0500 Subject: [PATCH 10/19] Update vtkio dep to point to git --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 0c8ce5f..7c914e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ dync = { version = "0.4", features = ['numeric'] } reinterpret = "0.2" bytemuck = "1.2" autodiff = { version = "0.2", features = ["cgmath"] } -vtkio = { path = "../vtkio", version = "0.4", optional = true } +vtkio = { version = "0.4", git = "https://github.com/elrnv/vtkio.git", optional = true } serde = { version = "1.0", features = ["derive"], optional = true } chashmap = { version = "2", optional = true } rayon = { version = "1", optional = true } -- GitLab From 816ee5903caf469215b4b5317e46ad21f5e353b4 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Tue, 1 Dec 2020 22:50:18 -0500 Subject: [PATCH 11/19] Upgrade to vtkio v0.5 --- Cargo.toml | 2 +- src/io/vtk.rs | 62 +++++++++++++++++++++++---------------------------- 2 files changed, 29 insertions(+), 35 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7c914e6..b60cbc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ dync = { version = "0.4", features = ['numeric'] } reinterpret = "0.2" bytemuck = "1.2" autodiff = { version = "0.2", features = ["cgmath"] } -vtkio = { version = "0.4", git = "https://github.com/elrnv/vtkio.git", optional = true } +vtkio = { version = "0.5", git = "https://github.com/elrnv/vtkio.git", optional = true } serde = { version = "1.0", features = ["derive"], optional = true } chashmap = { version = "2", optional = true } rayon = { version = "1", optional = true } diff --git a/src/io/vtk.rs b/src/io/vtk.rs index c71a3df..7445c95 100644 --- a/src/io/vtk.rs +++ b/src/io/vtk.rs @@ -157,7 +157,7 @@ pub fn convert_vtk_dataset_to_polymesh( model::DataSet::PolyData { pieces, .. } => { Ok(PolyMesh::merge_iter(pieces.into_iter().filter_map( |piece| { - let model::PolyDataPiece { points, topo, data } = + let model::PolyDataPiece { points, polys, strips, data, .. } = piece.load_piece_data().ok()?; // Get points. let pt_coords: Vec = points.cast_into()?; // None is returned in case of overflow. @@ -167,15 +167,13 @@ pub fn convert_vtk_dataset_to_polymesh( } let mut faces = Vec::new(); - for poly in topo.into_iter() { - match poly { - model::PolyDataTopology::Polygons(cells) - | model::PolyDataTopology::TriangleStrips(cells) => { - let (_, vertices) = cells.into_legacy(); - faces.extend(vertices.into_iter().map(|x| x as usize)); - } - _ => return None, - } + if let Some(topo) = polys { + let (_, vertices) = topo.into_legacy(); + faces.extend(vertices.into_iter().map(|x| x as usize)); + } + if let Some(topo) = strips { + let (_, vertices) = topo.into_legacy(); + faces.extend(vertices.into_iter().map(|x| x as usize)); } let mut polymesh = PolyMesh::new(pts, &faces); @@ -334,19 +332,18 @@ pub fn convert_pointcloud_to_vtk_format( byte_order: model::ByteOrder::BigEndian, data: model::DataSet::inline(model::PolyDataPiece { points: IOBuffer::new(points), - topo: vec![ - // A single VERTICES entry containing all points - model::PolyDataTopology::Vertices(model::VertexNumbers::Legacy { - num_cells: 1, - vertices: std::iter::once(num_verts) - .chain(0..num_verts) - .collect::>(), - }), - ], + // A single VERTICES entry containing all points + verts: Some(model::VertexNumbers::Legacy { + num_cells: 1, + vertices: std::iter::once(num_verts) + .chain(0..num_verts) + .collect::>(), + }), data: model::Attributes { point: point_attribs, cell: Vec::new(), }, + ..Default::default() }), }) } @@ -429,7 +426,7 @@ pub fn convert_vtk_dataset_to_pointcloud( model::DataSet::PolyData { pieces, .. } => { Ok(PointCloud::merge_iter(pieces.into_iter().filter_map( |piece| { - let model::PolyDataPiece { points, topo, data } = + let model::PolyDataPiece { points, verts, data, .. } = piece.load_piece_data().ok()?; pts.clear(); vertices.clear(); @@ -441,15 +438,10 @@ pub fn convert_vtk_dataset_to_pointcloud( pts.push([coords[0], coords[1], coords[2]]); } - for cell in topo.into_iter() { - match cell { - model::PolyDataTopology::Vertices(cells) => { - let (_, cell_vertices) = cells.into_legacy(); - vertices - .extend(cell_vertices.into_iter().skip(1).map(|x| x as usize)); - } - _ => return None, - } + if let Some(topo) = verts { + let (_, cell_vertices) = topo.into_legacy(); + vertices + .extend(cell_vertices.into_iter().skip(1).map(|x| x as usize)); } let referenced_points: Vec<_> = vertices.iter().map(|&vtx| pts[vtx]).collect(); @@ -1359,11 +1351,12 @@ mod tests { #[test] fn poly_data_polymesh_test() { - let (points, cells, data) = vtk_polymesh_example(); + let (points, polys, data) = vtk_polymesh_example(); let vtk = model::DataSet::inline(model::PolyDataPiece { points, - topo: vec![model::PolyDataTopology::Polygons(cells)], + polys: Some(polys), data, + ..Default::default() }); let vtkpolymesh = convert_vtk_dataset_to_polymesh(vtk.clone()).unwrap(); @@ -1475,17 +1468,18 @@ mod tests { fn poly_data_pointcloud_test() { let (points, data) = vtk_pointcloud_example(); let num_vertices = (points.len() / 3) as u32; - let cell_verts = model::VertexNumbers::Legacy { + let verts = Some(model::VertexNumbers::Legacy { num_cells: 1, vertices: std::iter::once(num_vertices) .chain(0..num_vertices) .collect(), - }; + }); let vtk = model::DataSet::inline(model::PolyDataPiece { points, - topo: vec![model::PolyDataTopology::Vertices(cell_verts)], + verts, data, + ..Default::default() }); let vtkpointcloud = convert_vtk_dataset_to_pointcloud(vtk.clone()).unwrap(); -- GitLab From c2e8fc466b90ef8b8c4522b8b0388bac0d298b6d Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Tue, 1 Dec 2020 22:55:14 -0500 Subject: [PATCH 12/19] Re-export all of vtkio API in the io module --- src/io/vtk.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/io/vtk.rs b/src/io/vtk.rs index 7445c95..414908d 100644 --- a/src/io/vtk.rs +++ b/src/io/vtk.rs @@ -9,7 +9,7 @@ use super::Real; use super::{NORMAL_ATTRIB_NAME, UV_ATTRIB_NAME}; pub use vtkio::Error as VtkError; -pub use vtkio::{match_buf, model, parser, writer, IOBuffer}; +pub use vtkio::*; pub use super::Error; -- GitLab From 6c455773ebae93237ccf81362b23fdbf7570b180 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Tue, 1 Dec 2020 23:40:33 -0500 Subject: [PATCH 13/19] Enabled polymesh export using PolyData vtk type --- src/io.rs | 4 ++-- src/io/vtk.rs | 65 +++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 50 insertions(+), 19 deletions(-) diff --git a/src/io.rs b/src/io.rs index aedf911..7ccac99 100644 --- a/src/io.rs +++ b/src/io.rs @@ -106,7 +106,7 @@ pub fn save_polymesh>( fn save_polymesh_impl(polymesh: &PolyMesh, file: &Path) -> Result<(), Error> { match file.extension().and_then(|ext| ext.to_str()) { Some("vtk") => { - let vtk = vtk::convert_polymesh_to_vtk_format(polymesh)?; + let vtk = vtk::convert_polymesh_to_vtk_format(polymesh, vtk::VTKPolyExportStyle::PolyData)?; export_vtk(vtk, file)?; Ok(()) } @@ -130,7 +130,7 @@ pub fn save_polymesh_ascii>( fn save_polymesh_ascii_impl(polymesh: &PolyMesh, file: &Path) -> Result<(), Error> { match file.extension().and_then(|ext| ext.to_str()) { Some("vtk") => { - let vtk = vtk::convert_polymesh_to_vtk_format(polymesh)?; + let vtk = vtk::convert_polymesh_to_vtk_format(polymesh, vtk::VTKPolyExportStyle::PolyData)?; export_vtk_ascii(vtk, file)?; Ok(()) } diff --git a/src/io/vtk.rs b/src/io/vtk.rs index 414908d..fc0d989 100644 --- a/src/io/vtk.rs +++ b/src/io/vtk.rs @@ -27,7 +27,20 @@ fn special_field_attributes() -> &'static [&'static str] { &[FACE_VERTEX_ATTRIBUTES_FIELD] } -pub fn convert_polymesh_to_vtk_format(mesh: &PolyMesh) -> Result { +/// An enum indicating how polygon data should be exported in VTK format. +/// +/// Polygon data can be represented by the designated `PolyData` VTK data set type or the more +/// general `UnstructuredGrid` type. +/// +/// Note that both styles are supported in XML as well as Legacy VTK files. +pub enum VTKPolyExportStyle { + /// Use `PolyData` VTK type for exporting polygons. + PolyData, + /// Use `UnstructuredGrid` VTK type for exporting polygons. + UnstructuredGrid, +} + +pub fn convert_polymesh_to_vtk_format(mesh: &PolyMesh, style: VTKPolyExportStyle) -> Result { let points: Vec = mesh .vertex_positions() .iter() @@ -65,20 +78,38 @@ pub fn convert_polymesh_to_vtk_format(mesh: &PolyMesh) -> Result { + model::DataSet::inline(model::UnstructuredGridPiece { + points: IOBuffer::new(points), + cells: model::Cells { + cell_verts: model::VertexNumbers::Legacy { + num_cells: mesh.num_faces() as u32, + vertices, + }, + types: vec![model::CellType::Polygon; mesh.num_faces()], + }, + data: model::Attributes { + point: point_attribs, + cell: face_attribs, + }, + }) + } + VTKPolyExportStyle::PolyData => { + model::DataSet::inline(model::PolyDataPiece { + points: IOBuffer::new(points), + polys: Some(model::VertexNumbers::Legacy { + num_cells: mesh.num_faces() as u32, + vertices, + }), + data: model::Attributes { + point: point_attribs, + cell: face_attribs, + }, + ..Default::default() + }) + } + } }) } @@ -1344,7 +1375,7 @@ mod tests { assert_eq!(vtkpolymesh, polymesh); // polymesh -> vtk test - let polymeshvtk = convert_polymesh_to_vtk_format(&polymesh).unwrap(); + let polymeshvtk = convert_polymesh_to_vtk_format(&polymesh, VTKPolyExportStyle::UnstructuredGrid).unwrap(); let vtkpolymesh = convert_vtk_dataset_to_polymesh(polymeshvtk.data).unwrap(); assert_eq!(vtkpolymesh, polymesh); } @@ -1366,7 +1397,7 @@ mod tests { assert_eq!(vtkpolymesh, polymesh); // polymesh -> vtk test - let polymeshvtk = convert_polymesh_to_vtk_format(&polymesh).unwrap(); + let polymeshvtk = convert_polymesh_to_vtk_format(&polymesh, VTKPolyExportStyle::PolyData).unwrap(); let vtkpolymesh = convert_vtk_dataset_to_polymesh(polymeshvtk.data).unwrap(); assert_eq!(vtkpolymesh, polymesh); } -- GitLab From 3dda771a9a7932b39157bd4ec576d665be28ba8e Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Tue, 1 Dec 2020 23:50:41 -0500 Subject: [PATCH 14/19] Enable unstructured grid export for pointclouds --- src/io.rs | 4 ++-- src/io/vtk.rs | 55 ++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 41 insertions(+), 18 deletions(-) diff --git a/src/io.rs b/src/io.rs index 7ccac99..a264335 100644 --- a/src/io.rs +++ b/src/io.rs @@ -177,7 +177,7 @@ pub fn save_pointcloud>( pub fn save_pointcloud_impl(ptcloud: &PointCloud, file: &Path) -> Result<(), Error> { match file.extension().and_then(|ext| ext.to_str()) { Some("vtk") => { - let vtk = vtk::convert_pointcloud_to_vtk_format(ptcloud)?; + let vtk = vtk::convert_pointcloud_to_vtk_format(ptcloud, vtk::VTKPolyExportStyle::PolyData)?; export_vtk(vtk, file)?; Ok(()) } @@ -201,7 +201,7 @@ pub fn save_pointcloud_ascii>( fn save_pointcloud_ascii_impl(ptcloud: &PointCloud, file: &Path) -> Result<(), Error> { match file.extension().and_then(|ext| ext.to_str()) { Some("vtk") => { - let vtk = vtk::convert_pointcloud_to_vtk_format(ptcloud)?; + let vtk = vtk::convert_pointcloud_to_vtk_format(ptcloud, vtk::VTKPolyExportStyle::PolyData)?; export_vtk_ascii(vtk, file)?; Ok(()) } diff --git a/src/io/vtk.rs b/src/io/vtk.rs index fc0d989..473a59d 100644 --- a/src/io/vtk.rs +++ b/src/io/vtk.rs @@ -343,6 +343,7 @@ pub fn convert_vtk_dataset_to_tetmesh( pub fn convert_pointcloud_to_vtk_format( ptcloud: &PointCloud, + style: VTKPolyExportStyle ) -> Result { let num_verts = ptcloud.num_vertices() as u32; let points: Vec = ptcloud @@ -361,21 +362,43 @@ pub fn convert_pointcloud_to_vtk_format( version: model::Version::new((4, 2)), title: String::from("Point Cloud"), byte_order: model::ByteOrder::BigEndian, - data: model::DataSet::inline(model::PolyDataPiece { - points: IOBuffer::new(points), - // A single VERTICES entry containing all points - verts: Some(model::VertexNumbers::Legacy { - num_cells: 1, - vertices: std::iter::once(num_verts) - .chain(0..num_verts) - .collect::>(), - }), - data: model::Attributes { - point: point_attribs, - cell: Vec::new(), - }, - ..Default::default() - }), + data: match style { + VTKPolyExportStyle::PolyData => { + model::DataSet::inline(model::PolyDataPiece { + points: IOBuffer::new(points), + // A single VERTICES entry containing all points + verts: Some(model::VertexNumbers::Legacy { + num_cells: 1, + vertices: std::iter::once(num_verts) + .chain(0..num_verts) + .collect::>(), + }), + data: model::Attributes { + point: point_attribs, + cell: Vec::new(), + }, + ..Default::default() + }) + } + VTKPolyExportStyle::UnstructuredGrid => { + model::DataSet::inline(model::UnstructuredGridPiece { + points: IOBuffer::new(points), + cells: model::Cells { + cell_verts: model::VertexNumbers::Legacy { + num_cells: 1, + vertices: std::iter::once(num_verts) + .chain(0..num_verts) + .collect::>(), + }, + types: vec![model::CellType::Vertex; ptcloud.num_vertices()], + }, + data: model::Attributes { + point: point_attribs, + cell: Vec::new(), + }, + }) + } + } }) } @@ -1520,7 +1543,7 @@ mod tests { assert_eq!(vtkpointcloud, pointcloud); // pointcloud -> vtk test - let pointcloudvtk = convert_pointcloud_to_vtk_format(&pointcloud).unwrap(); + let pointcloudvtk = convert_pointcloud_to_vtk_format(&pointcloud, VTKPolyExportStyle::PolyData).unwrap(); let vtkpointcloud = convert_vtk_dataset_to_pointcloud(pointcloudvtk.data).unwrap(); assert_eq!(vtkpointcloud, pointcloud); } -- GitLab From d18b72cf0a4c5e35df614b22b7d7c613229d46a3 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Thu, 3 Dec 2020 10:14:11 -0500 Subject: [PATCH 15/19] Set VTK version to 0.1 for compat with XML --- src/io/vtk.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/io/vtk.rs b/src/io/vtk.rs index 473a59d..e627daa 100644 --- a/src/io/vtk.rs +++ b/src/io/vtk.rs @@ -75,7 +75,7 @@ pub fn convert_polymesh_to_vtk_format(mesh: &PolyMesh, style: VTKPol .collect(); Ok(model::Vtk { - version: model::Version::new((4, 2)), + version: model::Version::new((0, 1)), title: String::from("Polygonal Mesh"), byte_order: model::ByteOrder::BigEndian, data: match style { @@ -253,7 +253,7 @@ pub fn convert_tetmesh_to_vtk_format(tetmesh: &TetMesh) -> Result( .collect(); Ok(model::Vtk { - version: model::Version::new((4, 2)), + version: model::Version::new((0, 1)), title: String::from("Point Cloud"), byte_order: model::ByteOrder::BigEndian, data: match style { -- GitLab From ce45794e373ff38750051c4e90490e1dea21faf5 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Thu, 3 Dec 2020 14:08:26 -0500 Subject: [PATCH 16/19] Refactor repetetive io code into a macro + format --- src/io.rs | 12 +- src/io/vtk.rs | 642 ++++++++++++++------------------------------------ 2 files changed, 189 insertions(+), 465 deletions(-) diff --git a/src/io.rs b/src/io.rs index a264335..2faf76a 100644 --- a/src/io.rs +++ b/src/io.rs @@ -106,7 +106,8 @@ pub fn save_polymesh>( fn save_polymesh_impl(polymesh: &PolyMesh, file: &Path) -> Result<(), Error> { match file.extension().and_then(|ext| ext.to_str()) { Some("vtk") => { - let vtk = vtk::convert_polymesh_to_vtk_format(polymesh, vtk::VTKPolyExportStyle::PolyData)?; + let vtk = + vtk::convert_polymesh_to_vtk_format(polymesh, vtk::VTKPolyExportStyle::PolyData)?; export_vtk(vtk, file)?; Ok(()) } @@ -130,7 +131,8 @@ pub fn save_polymesh_ascii>( fn save_polymesh_ascii_impl(polymesh: &PolyMesh, file: &Path) -> Result<(), Error> { match file.extension().and_then(|ext| ext.to_str()) { Some("vtk") => { - let vtk = vtk::convert_polymesh_to_vtk_format(polymesh, vtk::VTKPolyExportStyle::PolyData)?; + let vtk = + vtk::convert_polymesh_to_vtk_format(polymesh, vtk::VTKPolyExportStyle::PolyData)?; export_vtk_ascii(vtk, file)?; Ok(()) } @@ -177,7 +179,8 @@ pub fn save_pointcloud>( pub fn save_pointcloud_impl(ptcloud: &PointCloud, file: &Path) -> Result<(), Error> { match file.extension().and_then(|ext| ext.to_str()) { Some("vtk") => { - let vtk = vtk::convert_pointcloud_to_vtk_format(ptcloud, vtk::VTKPolyExportStyle::PolyData)?; + let vtk = + vtk::convert_pointcloud_to_vtk_format(ptcloud, vtk::VTKPolyExportStyle::PolyData)?; export_vtk(vtk, file)?; Ok(()) } @@ -201,7 +204,8 @@ pub fn save_pointcloud_ascii>( fn save_pointcloud_ascii_impl(ptcloud: &PointCloud, file: &Path) -> Result<(), Error> { match file.extension().and_then(|ext| ext.to_str()) { Some("vtk") => { - let vtk = vtk::convert_pointcloud_to_vtk_format(ptcloud, vtk::VTKPolyExportStyle::PolyData)?; + let vtk = + vtk::convert_pointcloud_to_vtk_format(ptcloud, vtk::VTKPolyExportStyle::PolyData)?; export_vtk_ascii(vtk, file)?; Ok(()) } diff --git a/src/io/vtk.rs b/src/io/vtk.rs index e627daa..69a8b2e 100644 --- a/src/io/vtk.rs +++ b/src/io/vtk.rs @@ -4,6 +4,10 @@ use crate::algo::merge::Merge; use crate::mesh::attrib::{Attrib, AttribDict, AttribIndex, Attribute, AttributeValue}; use crate::mesh::topology::*; use crate::mesh::{PointCloud, PolyMesh, TetMesh, VertexPositions}; +use flatk::{ + consts::{U10, U11, U12, U13, U14, U15, U16, U2, U3, U4, U5, U6, U7, U8, U9}, + U, +}; use super::Real; use super::{NORMAL_ATTRIB_NAME, UV_ATTRIB_NAME}; @@ -40,7 +44,10 @@ pub enum VTKPolyExportStyle { UnstructuredGrid, } -pub fn convert_polymesh_to_vtk_format(mesh: &PolyMesh, style: VTKPolyExportStyle) -> Result { +pub fn convert_polymesh_to_vtk_format( + mesh: &PolyMesh, + style: VTKPolyExportStyle, +) -> Result { let points: Vec = mesh .vertex_positions() .iter() @@ -95,21 +102,19 @@ pub fn convert_polymesh_to_vtk_format(mesh: &PolyMesh, style: VTKPol }, }) } - VTKPolyExportStyle::PolyData => { - model::DataSet::inline(model::PolyDataPiece { - points: IOBuffer::new(points), - polys: Some(model::VertexNumbers::Legacy { - num_cells: mesh.num_faces() as u32, - vertices, - }), - data: model::Attributes { - point: point_attribs, - cell: face_attribs, - }, - ..Default::default() - }) - } - } + VTKPolyExportStyle::PolyData => model::DataSet::inline(model::PolyDataPiece { + points: IOBuffer::new(points), + polys: Some(model::VertexNumbers::Legacy { + num_cells: mesh.num_faces() as u32, + vertices, + }), + data: model::Attributes { + point: point_attribs, + cell: face_attribs, + }, + ..Default::default() + }), + }, }) } @@ -188,8 +193,13 @@ pub fn convert_vtk_dataset_to_polymesh( model::DataSet::PolyData { pieces, .. } => { Ok(PolyMesh::merge_iter(pieces.into_iter().filter_map( |piece| { - let model::PolyDataPiece { points, polys, strips, data, .. } = - piece.load_piece_data().ok()?; + let model::PolyDataPiece { + points, + polys, + strips, + data, + .. + } = piece.load_piece_data().ok()?; // Get points. let pt_coords: Vec = points.cast_into()?; // None is returned in case of overflow. let mut pts = Vec::with_capacity(pt_coords.len() / 3); @@ -343,7 +353,7 @@ pub fn convert_vtk_dataset_to_tetmesh( pub fn convert_pointcloud_to_vtk_format( ptcloud: &PointCloud, - style: VTKPolyExportStyle + style: VTKPolyExportStyle, ) -> Result { let num_verts = ptcloud.num_vertices() as u32; let points: Vec = ptcloud @@ -398,7 +408,7 @@ pub fn convert_pointcloud_to_vtk_format( }, }) } - } + }, }) } @@ -480,8 +490,12 @@ pub fn convert_vtk_dataset_to_pointcloud( model::DataSet::PolyData { pieces, .. } => { Ok(PointCloud::merge_iter(pieces.into_iter().filter_map( |piece| { - let model::PolyDataPiece { points, verts, data, .. } = - piece.load_piece_data().ok()?; + let model::PolyDataPiece { + points, + verts, + data, + .. + } = piece.load_piece_data().ok()?; pts.clear(); vertices.clear(); @@ -494,8 +508,7 @@ pub fn convert_vtk_dataset_to_pointcloud( if let Some(topo) = verts { let (_, cell_vertices) = topo.into_legacy(); - vertices - .extend(cell_vertices.into_iter().skip(1).map(|x| x as usize)); + vertices.extend(cell_vertices.into_iter().skip(1).map(|x| x as usize)); } let referenced_points: Vec<_> = vertices.iter().map(|&vtx| pts[vtx]).collect(); @@ -562,266 +575,93 @@ fn into_vtk_attrib_uv(name: &str, attrib: &Attribute) -> Option { + $attrib.direct_clone_into_vec::<$t>().map($f) + }; + (@build $attrib:ident, ($t:ident $($ts:ident)*), $f:expr) => { + $attrib.direct_clone_into_vec::<$t>().map($f) + $( + .or_else(|_| try_interpret_attrib!(@direct $attrib, $ts, $f)) + )* + }; + (@direct $attrib:ident, $n:expr, $t:ident, $f:expr) => { + $attrib.direct_clone_into_vec::<[$t; $n]>().map($f) + }; + (@build $attrib:ident, $n:expr, ($t:ident $($ts:ident)*), $f:expr) => { + $attrib.direct_clone_into_vec::<[$t; $n]>().map($f) + $( + .or_else(|_| try_interpret_attrib!(@direct $attrib, $n, $ts, $f)) + )* + }; + (@direct $attrib:ident, $n:expr, $m:expr, $t:ident, $f:expr) => { + $attrib.direct_clone_into_vec::<[[$t; $n]; $m]>().map($f) + }; + (@build $attrib:ident, $n:expr, $m:expr, ($t:ident $($ts:ident)*), $f:expr) => { + $attrib.direct_clone_into_vec::<[[$t; $n]; $m]>().map($f) + $( + .or_else(|_| try_interpret_attrib!(@direct $attrib, $n, $m, $ts, $f)) + )* + }; + ($attrib:ident, $f:expr) => { + { + try_interpret_attrib!(@build $attrib, (u8 i8 u16 i16 u32 i32 u64 i64 f32 f64), $f) + } + }; + ($attrib:ident, $n:expr, $f:expr) => { + { + try_interpret_attrib!(@build $attrib, $n, (u8 i8 u16 i16 u32 i32 u64 i64 f32 f64), $f) + } + }; + ($attrib:ident, $n:expr, $m:expr, $f:expr) => { + { + try_interpret_attrib!(@build $attrib, $n, $m, (u8 i8 u16 i16 u32 i32 u64 i64 f32 f64), $f) + } + } +} + +macro_rules! try_interpret_generic_attrib { + ($attrib:ident, $name:ident $(,$n:expr)*) => { + { + $( + if let Ok(data) = try_interpret_attrib!($attrib, $n, |x| IOBuffer::from( + x.iter().flat_map(|x| x.iter().cloned()).collect::>() + )) { + return Some(model::Attribute::generic($name, $n).with_data(data)); + } + )* + } + } +} + fn mesh_to_vtk_attrib_impl(name: &str, attrib: &Attribute) -> Option { // Try to match a scalar field. - let maybe_iobuf = attrib - .direct_clone_into_vec::() - .map(|x| x.into()) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())); - - if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::DataArray(model::DataArray { - name: name.to_string(), - elem: model::ElementType::Scalars { - num_comp: 1, - lookup_table: None, - }, - data, - })); + if let Ok(data) = try_interpret_attrib!(attrib, IOBuffer::from) { + return Some(model::Attribute::scalars(name, 1).with_data(data)); } // Try to match a vector field. - let maybe_iobuf = attrib - .direct_clone_into_vec::<[u8; 2]>() - .map(|y| flatten2(y).into()) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i8; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u16; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i16; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u32; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i32; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u64; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i64; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f32; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f64; 2]>() - .map(|y| flatten2(y).into()) - }); - - if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::DataArray(model::DataArray { - name: name.to_string(), - elem: model::ElementType::Scalars { - num_comp: 2, - lookup_table: None, - }, - data, - })); + if let Ok(data) = try_interpret_attrib!(attrib, 2, |x| IOBuffer::from(flatten2(x))) { + return Some(model::Attribute::scalars(name, 2).with_data(data)); } // Try to match a vector field. - let maybe_iobuf = attrib - .direct_clone_into_vec::<[u8; 3]>() - .map(|y| flatten3(y).into()) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i8; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u16; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i16; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u32; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i32; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u64; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i64; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f32; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f64; 3]>() - .map(|y| flatten3(y).into()) - }); - - if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::DataArray(model::DataArray { - name: name.to_string(), - elem: model::ElementType::Vectors, - data, - })); + if let Ok(data) = try_interpret_attrib!(attrib, 3, |x| IOBuffer::from(flatten3(x))) { + return Some(model::Attribute::vectors(name).with_data(data)); } // Try to match a vector field. - let maybe_iobuf = attrib - .direct_clone_into_vec::<[u8; 4]>() - .map(|y| flatten4(y).into()) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i8; 4]>() - .map(|y| flatten4(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u16; 4]>() - .map(|y| flatten4(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i16; 4]>() - .map(|y| flatten4(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u32; 4]>() - .map(|y| flatten4(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i32; 4]>() - .map(|y| flatten4(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u64; 4]>() - .map(|y| flatten4(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i64; 4]>() - .map(|y| flatten4(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f32; 4]>() - .map(|y| flatten4(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f64; 4]>() - .map(|y| flatten4(y).into()) - }); - - if let Ok(data) = maybe_iobuf { - return Some(model::Attribute::DataArray(model::DataArray { - name: name.to_string(), - elem: model::ElementType::Scalars { - num_comp: 4, - lookup_table: None, - }, - data, - })); + if let Ok(data) = try_interpret_attrib!(attrib, 4, |x| IOBuffer::from(flatten4(x))) { + return Some(model::Attribute::scalars(name, 4).with_data(data)); } // Try to match a tensor field. - let maybe_iobuf: Result = attrib - .direct_clone_into_vec::<[[u8; 3]; 3]>() - .map(|y| flatten33(y).into()) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[i8; 3]; 3]>() - .map(|y| flatten33(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[u16; 3]; 3]>() - .map(|y| flatten33(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[i16; 3]; 3]>() - .map(|y| flatten33(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[u32; 3]; 3]>() - .map(|y| flatten33(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[i32; 3]; 3]>() - .map(|y| flatten33(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[u64; 3]; 3]>() - .map(|y| flatten33(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[i64; 3]; 3]>() - .map(|y| flatten33(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[f32; 3]; 3]>() - .map(|y| flatten33(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[[f64; 3]; 3]>() - .map(|y| flatten33(y).into()) - }); - - if let Ok(data) = maybe_iobuf { + if let Ok(data) = try_interpret_attrib!(attrib, 3, 3, |x| IOBuffer::from(flatten33(x))) { return Some(model::Attribute::tensors(name).with_data(data)); } + // Try to match a generic field for any size up to 16. + try_interpret_generic_attrib!(attrib, name, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); None } @@ -863,141 +703,18 @@ fn mesh_to_vtk_named_field_attribs( .iter() .filter_map(|(name, attrib)| { // Try to match a scalar field. - let maybe_iobuf = attrib - .direct_clone_into_vec::() - .map(|x| x.into()) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())) - .or_else(|_| attrib.direct_clone_into_vec::().map(|x| x.into())); - - if let Ok(data) = maybe_iobuf { - return Some(model::FieldArray { - name: name.to_string(), - elem: 1, - data, - }); + if let Ok(data) = try_interpret_attrib!(attrib, IOBuffer::from) { + return Some(model::FieldArray::new(name, 1).with_data(data)); } // Try to match a 2D vector field. - let maybe_iobuf = attrib - .direct_clone_into_vec::<[u8; 2]>() - .map(|y| flatten2(y).into()) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i8; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u16; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i16; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u32; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i32; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u64; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i64; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f32; 2]>() - .map(|y| flatten2(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f64; 2]>() - .map(|y| flatten2(y).into()) - }); - - if let Ok(data) = maybe_iobuf { - return Some(model::FieldArray { - name: name.to_string(), - elem: 2, - data, - }); + if let Ok(data) = try_interpret_attrib!(attrib, 2, |x| IOBuffer::from(flatten2(x))) { + return Some(model::FieldArray::new(name, 2).with_data(data)); } // Try to match a 3D vector field. - let maybe_iobuf = attrib - .direct_clone_into_vec::<[u8; 3]>() - .map(|y| flatten3(y).into()) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i8; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u16; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i16; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u32; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i32; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[u64; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[i64; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f32; 3]>() - .map(|y| flatten3(y).into()) - }) - .or_else(|_| { - attrib - .direct_clone_into_vec::<[f64; 3]>() - .map(|y| flatten3(y).into()) - }); - - if let Ok(data) = maybe_iobuf { - return Some(model::FieldArray { - name: name.to_string(), - elem: 3, - data, - }); + if let Ok(data) = try_interpret_attrib!(attrib, 3, |x| IOBuffer::from(flatten3(x))) { + return Some(model::FieldArray::new(name, 3).with_data(data)); } None @@ -1044,39 +761,22 @@ where Ok(()) } -macro_rules! impl_add_array_attrib { - ($fn_name:ident, $size:expr) => { - fn $fn_name<'a, T, M, I: AttribIndex>( - buf: &[T], - name: &'a str, - mesh: &mut M, - ) -> Result<(), Error> - where - T: AttributeValue + Copy + Default, - M: Attrib, - { - let n = $size; - let mut vecs = Vec::with_capacity(buf.len() / n); - let mut count_comp = 0; - let mut cur = [T::default(); $size]; - for &val in buf.iter() { - cur[count_comp] = val; - count_comp += 1; - if count_comp == n { - vecs.push(cur); - count_comp = 0; - } - } - mesh.add_attrib_data::<_, I>(name, vecs)?; - Ok(()) - } - }; +fn add_array_attrib_n<'a, T: bytemuck::Pod, M, I: AttribIndex, N>( + buf: &[T], + name: &'a str, + mesh: &mut M, +) -> Result<(), Error> +where + T: AttributeValue + Copy + Default, + M: Attrib, + N: flatk::Unsigned + Default + flatk::Array, + >::Array: Default + PartialEq + std::fmt::Debug, +{ + let chunked = flatk::UniChunked::<_, U>::from_flat(buf.to_vec()); + mesh.add_attrib_data::<_, I>(name, chunked.into_arrays())?; + Ok(()) } -impl_add_array_attrib!(add_array_attrib2, 2); -impl_add_array_attrib!(add_array_attrib3, 3); -impl_add_array_attrib!(add_array_attrib4, 4); - /// Adds VTK attributes to the given mesh, and returns any unprocessed attributes that can be /// processed further. /// @@ -1094,31 +794,49 @@ where for attrib in attribs { match attrib { model::Attribute::DataArray(model::DataArray { name, elem, data }) => { + let name = name.as_str(); match elem { model::ElementType::Scalars { num_comp: dim, .. } | model::ElementType::TCoords(dim) => { - if dim == 1 { + match dim { // Note that only the first found attribute with the same name and location // will be inserted. - match_buf!( &data, v => add_array_attrib::<_,M,I>(&v, name.as_str(), mesh) ) - } else if dim == 2 { - match_buf!( &data, v => add_array_attrib2::<_,M,I>(&v, name.as_str(), mesh) ) - } else if dim == 3 { - match_buf!( &data, v => add_array_attrib3::<_,M,I>(&v, name.as_str(), mesh) ) - } else if dim == 4 { - match_buf!( &data, v => add_array_attrib4::<_,M,I>(&v, name.as_str(), mesh) ) - } else { + 1 => match_buf!( &data, v => add_array_attrib::<_,M,I>(&v, name, mesh) ), + 2 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U2>(&v, name, mesh) ), + 3 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U3>(&v, name, mesh) ), + 4 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U4>(&v, name, mesh) ), // Other values for dim are not supported by the vtk standard // at the time of this writing. - continue; + _ => continue, } } model::ElementType::Vectors | model::ElementType::Normals => { - match_buf!( &data, v => add_array_attrib3::<_,M,I>(&v, name.as_str(), mesh) ) + match_buf!( &data, v => add_array_attrib_n::<_,M,I,U3>(&v, name, mesh) ) } model::ElementType::Tensors => { - match_buf!( &data, v => add_2d_array_attrib::<_,M,I>(&v, name.as_str(), mesh) ) + match_buf!( &data, v => add_2d_array_attrib::<_,M,I>(&v, name, mesh) ) + } + model::ElementType::Generic(dim) => { + match dim { + 1 => match_buf!( &data, v => add_array_attrib::<_,M,I>(&v, name, mesh) ), + 2 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U2>(&v, name, mesh) ), + 3 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U3>(&v, name, mesh) ), + 4 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U4>(&v, name, mesh) ), + 5 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U5>(&v, name, mesh) ), + 6 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U6>(&v, name, mesh) ), + 7 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U7>(&v, name, mesh) ), + 8 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U8>(&v, name, mesh) ), + 9 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U9>(&v, name, mesh) ), + 10 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U10>(&v, name, mesh) ), + 11 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U11>(&v, name, mesh) ), + 12 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U12>(&v, name, mesh) ), + 13 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U13>(&v, name, mesh) ), + 14 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U14>(&v, name, mesh) ), + 15 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U15>(&v, name, mesh) ), + 16 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U16>(&v, name, mesh) ), + _ => continue, + } } - _ => { continue; } // LookupTable and ColorScalars attributes ignored + _ => continue, // LookupTable and ColorScalars attributes ignored } } model::Attribute::Field { data_array, name } => { @@ -1132,18 +850,17 @@ where data, } in data_array { + let name = name.as_str(); // Field attributes dont necessarily have the right size. We check it here. - if elem == 1 { + match elem { // Note that only the first found attribute with the same name and location // will be inserted. - match_buf!( &data, v => add_array_attrib::<_,M,I>(&v, name.as_str(), mesh) ) - } else if elem == 2 { - match_buf!( &data, v => add_array_attrib2::<_,M,I>(&v, name.as_str(), mesh) ) - } else if elem == 3 { - match_buf!( &data, v => add_array_attrib3::<_,M,I>(&v, name.as_str(), mesh) ) - } else if elem == 4 { - match_buf!( &data, v => add_array_attrib4::<_,M,I>(&v, name.as_str(), mesh) ) - } else { continue; } + 1 => match_buf!( &data, v => add_array_attrib::<_,M,I>(&v, name, mesh) ), + 2 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U2>(&v, name, mesh) ), + 3 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U3>(&v, name, mesh) ), + 4 => match_buf!( &data, v => add_array_attrib_n::<_,M,I,U4>(&v, name, mesh) ), + _ => continue, + } .unwrap_or_else(|err| eprintln!("WARNING: Field attribute transfer error: {}", err)); } continue; @@ -1178,17 +895,16 @@ where } if let model::Attribute::Field { data_array, .. } = attrib { for model::FieldArray { name, elem, data } in data_array { - if elem == 1 { + let name = name.as_str(); + match elem { // Note that only the first found attribute with the same name and location // will be inserted. - match_buf!( &data, v => add_array_attrib::<_, _, FaceVertexIndex>(&v, name.as_str(), mesh) ) - } else if elem == 2 { - match_buf!( &data, v => add_array_attrib2::<_, _, FaceVertexIndex>(&v, name.as_str(), mesh) ) - } else if elem == 3 { - match_buf!( &data, v => add_array_attrib3::<_, _, FaceVertexIndex>(&v, name.as_str(), mesh) ) - } else if elem == 4 { - match_buf!( &data, v => add_array_attrib4::<_, _, FaceVertexIndex>(&v, name.as_str(), mesh) ) - } else { continue; } + 1 => match_buf!( &data, v => add_array_attrib::<_, _, FaceVertexIndex>(&v, name, mesh) ), + 2 => match_buf!( &data, v => add_array_attrib_n::<_, _, FaceVertexIndex,U2>(&v, name, mesh) ), + 3 => match_buf!( &data, v => add_array_attrib_n::<_, _, FaceVertexIndex,U3>(&v, name, mesh) ), + 4 => match_buf!( &data, v => add_array_attrib_n::<_, _, FaceVertexIndex,U4>(&v, name, mesh) ), + _ => continue, + } .unwrap_or_else(|err| eprintln!("WARNING: Face Vertex Attribute transfer error for \"{}\": {}", name, err)) } } // Ignore all other attributes @@ -1398,7 +1114,9 @@ mod tests { assert_eq!(vtkpolymesh, polymesh); // polymesh -> vtk test - let polymeshvtk = convert_polymesh_to_vtk_format(&polymesh, VTKPolyExportStyle::UnstructuredGrid).unwrap(); + let polymeshvtk = + convert_polymesh_to_vtk_format(&polymesh, VTKPolyExportStyle::UnstructuredGrid) + .unwrap(); let vtkpolymesh = convert_vtk_dataset_to_polymesh(polymeshvtk.data).unwrap(); assert_eq!(vtkpolymesh, polymesh); } @@ -1420,7 +1138,8 @@ mod tests { assert_eq!(vtkpolymesh, polymesh); // polymesh -> vtk test - let polymeshvtk = convert_polymesh_to_vtk_format(&polymesh, VTKPolyExportStyle::PolyData).unwrap(); + let polymeshvtk = + convert_polymesh_to_vtk_format(&polymesh, VTKPolyExportStyle::PolyData).unwrap(); let vtkpolymesh = convert_vtk_dataset_to_polymesh(polymeshvtk.data).unwrap(); assert_eq!(vtkpolymesh, polymesh); } @@ -1543,7 +1262,8 @@ mod tests { assert_eq!(vtkpointcloud, pointcloud); // pointcloud -> vtk test - let pointcloudvtk = convert_pointcloud_to_vtk_format(&pointcloud, VTKPolyExportStyle::PolyData).unwrap(); + let pointcloudvtk = + convert_pointcloud_to_vtk_format(&pointcloud, VTKPolyExportStyle::PolyData).unwrap(); let vtkpointcloud = convert_vtk_dataset_to_pointcloud(pointcloudvtk.data).unwrap(); assert_eq!(vtkpointcloud, pointcloud); } -- GitLab From 4fcfbe3d6d751d101fea62d23be12cf903dba0ba Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Thu, 3 Dec 2020 14:17:08 -0500 Subject: [PATCH 17/19] Remove non-additive async-attribs feature + fix Removed the async-attribs feature which was non additive. The thinking is that in the majority of cases, this is what you would want anyways. Until a benchmark shows otherwise we will use Arcs. The fix involves adding Send and Sync bounds when converting vtk types. --- Cargo.toml | 3 --- src/io/vtk.rs | 2 +- src/mesh/attrib/attribute.rs | 20 -------------------- 3 files changed, 1 insertion(+), 24 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b60cbc1..00a404b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,9 +45,6 @@ io = ["vtkio", "objio"] unstable = ["parallel"] bench = ["criterion/real_blackbox", "unstable"] serde_all = ["serde", "math/serde"] -# Implements Send and Sync for attribute values (and hence attributes) -# This has a performance cost on indirect attributes, so it's exposed here as a feature. -async-attribs = [] [[bench]] name = "storage" diff --git a/src/io/vtk.rs b/src/io/vtk.rs index 69a8b2e..68458e6 100644 --- a/src/io/vtk.rs +++ b/src/io/vtk.rs @@ -770,7 +770,7 @@ where T: AttributeValue + Copy + Default, M: Attrib, N: flatk::Unsigned + Default + flatk::Array, - >::Array: Default + PartialEq + std::fmt::Debug, + >::Array: Default + PartialEq + std::fmt::Debug + Send + Sync, { let chunked = flatk::UniChunked::<_, U>::from_flat(buf.to_vec()); mesh.add_attrib_data::<_, I>(name, chunked.into_arrays())?; diff --git a/src/mesh/attrib/attribute.rs b/src/mesh/attrib/attribute.rs index 1ffee03..0cbe7e9 100644 --- a/src/mesh/attrib/attribute.rs +++ b/src/mesh/attrib/attribute.rs @@ -13,13 +13,9 @@ use crate::mesh::topology::*; use super::Error; -#[cfg(not(feature = "async-attribs"))] -pub use std::rc::Rc as Irc; -#[cfg(feature = "async-attribs")] pub use std::sync::Arc as Irc; /// A module defining traits for values stored at a mesh attribute. -#[cfg(feature = "async-attribs")] #[allow(missing_docs)] #[dync_mod] mod value_traits { @@ -35,22 +31,6 @@ mod value_traits { impl AttributeValueHash for T where T: AttributeValue + Eq + std::hash::Hash {} } -#[cfg(not(feature = "async-attribs"))] -#[allow(missing_docs)] -#[dync_mod] -mod value_traits { - /// A basic value that can be stored as an attribute in a mesh type. - pub trait AttributeValue: Clone + PartialEq + std::fmt::Debug + 'static {} - impl AttributeValue for T where T: Clone + PartialEq + std::fmt::Debug + 'static {} - - /// A value that can be stored as an indirect attribute in a mesh type. - /// - /// This value is cached inside a `HashSet`, so it requires additional constraints beyond - /// those imposed on `AttributeValue`. - pub trait AttributeValueHash: AttributeValue + Eq + std::hash::Hash {} - impl AttributeValueHash for T where T: AttributeValue + Eq + std::hash::Hash {} -} - pub use self::value_traits::*; /// A slice of attribute values belonging to a particular attribute. -- GitLab From 406556569c922620316a3a7f1c5becc1ed40d377 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Fri, 15 Jan 2021 22:44:14 -0800 Subject: [PATCH 18/19] Update vtkio to v0.5 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 00a404b..de98af1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ serde = { version = "1.0", features = ["derive"], optional = true } chashmap = { version = "2", optional = true } rayon = { version = "1", optional = true } hashbrown = { version = "0.8" } -math = { package = "cgmath", git = "https://github.com/elrnv/cgmath.git", version = "0.17" } +math = { package = "cgmath", version = "0.18" } flatk = "0.3" [dependencies.objio] -- GitLab From eeae01c2c8f8eca6f3642f07fccc5dda0374b9d2 Mon Sep 17 00:00:00 2001 From: Egor Larionov Date: Sat, 16 Jan 2021 12:35:22 -0800 Subject: [PATCH 19/19] Update autodiff, vtkio, rand, approx --- Cargo.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index de98af1..85c8057 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,8 +19,8 @@ num-traits = "0.2" dync = { version = "0.4", features = ['numeric'] } reinterpret = "0.2" bytemuck = "1.2" -autodiff = { version = "0.2", features = ["cgmath"] } -vtkio = { version = "0.5", git = "https://github.com/elrnv/vtkio.git", optional = true } +autodiff = { version = "0.3", features = ["cgmath"] } +vtkio = { version = "0.6", git = "https://github.com/elrnv/vtkio.git", optional = true } serde = { version = "1.0", features = ["derive"], optional = true } chashmap = { version = "2", optional = true } rayon = { version = "1", optional = true } @@ -34,8 +34,8 @@ version = "0.10" optional = true [dev-dependencies] -rand = "0.7" -approx = "0.3" +rand = "0.8" +approx = "0.4" criterion = { version = "0.3" } [features] -- GitLab