From a49e392a3f1ededf56a41cc8443fa5e64521952f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 26 Aug 2025 22:10:49 +0200 Subject: [PATCH 01/63] Remove local lockfile --- Cargo.lock | 44 +- pysplashsurf/Cargo.lock | 1920 --------------------------------------- 2 files changed, 22 insertions(+), 1942 deletions(-) delete mode 100644 pysplashsurf/Cargo.lock diff --git a/Cargo.lock b/Cargo.lock index 121b6a1..f65e822 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,9 +135,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "bitflags" -version = "2.9.2" +version = "2.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a65b545ab31d687cff52899d4890855fec459eb6afe0da6417b8a18da87aa29" +checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" [[package]] name = "bumpalo" @@ -213,9 +213,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.33" +version = "1.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee0f8803222ba5a7e2777dd72ca451868909b1ac410621b676adf07280e9b5f" +checksum = "42bc4aea80032b7bf409b0bc7ccad88853858911b7713a8062fdc0623867bedc" dependencies = [ "shlex", ] @@ -269,9 +269,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.45" +version = "4.5.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc0e74a703892159f5ae7d3aac52c8e6c392f5ae5f359c70b5881d60aaac318" +checksum = "2c5e4fcf9c21d2e544ca1ee9d8552de13019a42aa7dbf32747fa7aaf1df76e57" dependencies = [ "clap_builder", "clap_derive", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.44" +version = "4.5.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e7f4214277f3c7aa526a59dd3fbe306a370daee1f8b7b8c987069cd8e888a8" +checksum = "fecb53a0e6fcfb055f686001bc2e2592fa527efaf38dbe81a6a9563562e57d41" dependencies = [ "anstream", "anstyle", @@ -677,9 +677,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" dependencies = [ "equivalent", "hashbrown 0.15.5", @@ -1409,9 +1409,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" dependencies = [ "aho-corasick", "memchr 2.7.5", @@ -1421,9 +1421,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" dependencies = [ "aho-corasick", "memchr 2.7.5", @@ -1432,9 +1432,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" [[package]] name = "rstar" @@ -1480,9 +1480,9 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "safe_arch" -version = "0.7.4" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" +checksum = "3fb5032219cc30e5bb98749b19a18ceb2cf15e24ba8d517a7e64dff4f1f1eca5" dependencies = [ "bytemuck", ] @@ -1965,9 +1965,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.33" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce5da8ecb62bcd8ec8b7ea19f69a51275e91299be594ea5cc6ef7819e16cd03" +checksum = "5a0ab08c041f0cbb00a12fd091b2877dcec2311f90f87a88391d4b0961ffb4fe" dependencies = [ "bytemuck", "safe_arch", @@ -2181,9 +2181,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr 2.7.5", ] diff --git a/pysplashsurf/Cargo.lock b/pysplashsurf/Cargo.lock deleted file mode 100644 index 773be72..0000000 --- a/pysplashsurf/Cargo.lock +++ /dev/null @@ -1,1920 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 4 - -[[package]] -name = "adler2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr 2.7.5", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "anstream" -version = "0.6.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" - -[[package]] -name = "anstyle-parse" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" -dependencies = [ - "anstyle", - "once_cell_polyfill", - "windows-sys", -] - -[[package]] -name = "any_ascii" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70033777eb8b5124a81a1889416543dddef2de240019b674c81285a2635a7e1e" - -[[package]] -name = "anyhow" -version = "1.0.98" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" - -[[package]] -name = "approx" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" -dependencies = [ - "num-traits", -] - -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - -[[package]] -name = "autocfg" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" - -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "bitflags" -version = "2.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" - -[[package]] -name = "bumpalo" -version = "3.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" - -[[package]] -name = "bytecount" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" - -[[package]] -name = "bytemuck" -version = "1.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" - -[[package]] -name = "bytemuck_derive" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "camino" -version = "1.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo-platform" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" -dependencies = [ - "camino", - "cargo-platform", - "semver", - "serde", - "serde_json", -] - -[[package]] -name = "cargo_metadata" -version = "0.19.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" -dependencies = [ - "camino", - "cargo-platform", - "semver", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "cc" -version = "1.2.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" -dependencies = [ - "shlex", -] - -[[package]] -name = "cfg-if" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" - -[[package]] -name = "chrono" -version = "0.4.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "js-sys", - "num-traits", - "wasm-bindgen", - "windows-link", -] - -[[package]] -name = "clap" -version = "4.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" -dependencies = [ - "clap_builder", - "clap_derive", -] - -[[package]] -name = "clap_builder" -version = "4.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_derive" -version = "4.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "clap_lex" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" - -[[package]] -name = "colorchoice" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" - -[[package]] -name = "console" -version = "0.15.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" -dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "unicode-width", - "windows-sys", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" - -[[package]] -name = "crc32fast" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" - -[[package]] -name = "dashmap" -version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core", -] - -[[package]] -name = "either" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" - -[[package]] -name = "encode_unicode" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "errno" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" -dependencies = [ - "libc", - "windows-sys", -] - -[[package]] -name = "error-chain" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "fern" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4316185f709b23713e41e3195f90edef7fb00c3ed4adc79769cf09cc762a3b29" -dependencies = [ - "log", -] - -[[package]] -name = "flate2" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "getrandom" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.11.1+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" -dependencies = [ - "cfg-if", - "libc", - "r-efi", - "wasi 0.14.2+wasi-0.2.4", -] - -[[package]] -name = "glob" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" - -[[package]] -name = "hash32" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" -dependencies = [ - "byteorder", -] - -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" - -[[package]] -name = "hashbrown" -version = "0.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" - -[[package]] -name = "heapless" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" -dependencies = [ - "hash32", - "stable_deref_trait", -] - -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "iana-time-zone" -version = "0.1.63" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "log", - "wasm-bindgen", - "windows-core", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "indexmap" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" -dependencies = [ - "equivalent", - "hashbrown 0.15.4", -] - -[[package]] -name = "indicatif" -version = "0.17.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235" -dependencies = [ - "console", - "number_prefix", - "portable-atomic", - "unicode-width", - "web-time", -] - -[[package]] -name = "indoc" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" - -[[package]] -name = "inventory" -version = "0.3.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab08d7cd2c5897f2c949e5383ea7c7db03fb19130ffcfbf7eda795137ae3cb83" -dependencies = [ - "rustversion", -] - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" - -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" - -[[package]] -name = "js-sys" -version = "0.3.77" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" -dependencies = [ - "once_cell", - "wasm-bindgen", -] - -[[package]] -name = "lexical-sort" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c09e4591611e231daf4d4c685a66cb0410cc1e502027a20ae55f2bb9e997207a" -dependencies = [ - "any_ascii", -] - -[[package]] -name = "libc" -version = "0.2.172" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" - -[[package]] -name = "libm" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "linux-raw-sys" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" - -[[package]] -name = "lock_api" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" - -[[package]] -name = "lz4_flex" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05304f8e67dfc93d1b4b990137fd1a7a4c6ad44b60a9c486c8c4486f9d2027ae" - -[[package]] -name = "lzma-sys" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - -[[package]] -name = "matrixmultiply" -version = "0.3.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06de3016e9fae57a36fd14dba131fccf49f74b40b7fbdb472f96e361ec71a08" -dependencies = [ - "autocfg", - "rawpointer", -] - -[[package]] -name = "memchr" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a" -dependencies = [ - "libc", -] - -[[package]] -name = "memchr" -version = "2.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" - -[[package]] -name = "memoffset" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" -dependencies = [ - "autocfg", -] - -[[package]] -name = "miniz_oxide" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" -dependencies = [ - "adler2", -] - -[[package]] -name = "nalgebra" -version = "0.33.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b" -dependencies = [ - "approx", - "bytemuck", - "matrixmultiply", - "nalgebra-macros", - "num-complex", - "num-rational", - "num-traits", - "rand", - "rand_distr", - "simba", - "typenum", -] - -[[package]] -name = "nalgebra-macros" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "ndarray" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841" -dependencies = [ - "matrixmultiply", - "num-complex", - "num-integer", - "num-traits", - "portable-atomic", - "portable-atomic-util", - "rawpointer", -] - -[[package]] -name = "nom" -version = "3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05aec50c70fd288702bcd93284a8444607f3292dbdf2a30de5ea5dcdbe72287b" -dependencies = [ - "memchr 1.0.2", -] - -[[package]] -name = "nom" -version = "8.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9761775871bdef83bee530e60050f7e54b1105350d6884eb0fb4f46c2f9405" -dependencies = [ - "memchr 2.7.5", -] - -[[package]] -name = "num-bigint" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" -dependencies = [ - "num-integer", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "num-integer" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" -dependencies = [ - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", - "libm", -] - -[[package]] -name = "number_prefix" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" - -[[package]] -name = "numeric_literals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "095aa67b0b9f2081746998f4f17106bdb51d56dc8c211afca5531b92b83bf98a" -dependencies = [ - "quote", - "syn 1.0.109", -] - -[[package]] -name = "numpy" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f1dee9aa8d3f6f8e8b9af3803006101bb3653866ef056d530d53ae68587191" -dependencies = [ - "libc", - "ndarray", - "num-complex", - "num-integer", - "num-traits", - "pyo3", - "pyo3-build-config", - "rustc-hash", -] - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "once_cell_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" - -[[package]] -name = "parking_lot" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - -[[package]] -name = "peg" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f76678828272f177ac33b7e2ac2e3e73cc6c1cd1e3e387928aa69562fa51367" -dependencies = [ - "peg-macros", - "peg-runtime", -] - -[[package]] -name = "peg-macros" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636d60acf97633e48d266d7415a9355d4389cea327a193f87df395d88cd2b14d" -dependencies = [ - "peg-runtime", - "proc-macro2", - "quote", -] - -[[package]] -name = "peg-runtime" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555b1514d2d99d78150d3c799d4c357a3e2c2a8062cd108e93a06d9057629c5" - -[[package]] -name = "pkg-config" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" - -[[package]] -name = "ply-rs" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbadf9cb4a79d516de4c64806fe64ffbd8161d1ac685d000be789fb628b88963" -dependencies = [ - "byteorder", - "linked-hash-map", - "peg", - "skeptic", -] - -[[package]] -name = "portable-atomic" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" - -[[package]] -name = "portable-atomic-util" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" -dependencies = [ - "portable-atomic", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] - -[[package]] -name = "proc-macro2" -version = "1.0.95" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "pulldown-cmark" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" -dependencies = [ - "bitflags", - "memchr 2.7.5", - "unicase", -] - -[[package]] -name = "pyo3" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f239d656363bcee73afef85277f1b281e8ac6212a1d42aa90e55b90ed43c47a4" -dependencies = [ - "anyhow", - "indoc", - "libc", - "memoffset", - "once_cell", - "portable-atomic", - "pyo3-build-config", - "pyo3-ffi", - "pyo3-macros", - "unindent", -] - -[[package]] -name = "pyo3-build-config" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755ea671a1c34044fa165247aaf6f419ca39caa6003aee791a0df2713d8f1b6d" -dependencies = [ - "once_cell", - "target-lexicon", -] - -[[package]] -name = "pyo3-ffi" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc95a2e67091e44791d4ea300ff744be5293f394f1bafd9f78c080814d35956e" -dependencies = [ - "libc", - "pyo3-build-config", -] - -[[package]] -name = "pyo3-macros" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a179641d1b93920829a62f15e87c0ed791b6c8db2271ba0fd7c2686090510214" -dependencies = [ - "proc-macro2", - "pyo3-macros-backend", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "pyo3-macros-backend" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dff85ebcaab8c441b0e3f7ae40a6963ecea8a9f5e74f647e33fcf5ec9a1e89e" -dependencies = [ - "heck", - "proc-macro2", - "pyo3-build-config", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "pyo3-stub-gen" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da99110990aded329ea6e5e6567bcab1577a2109253cebbd54d23cd61951752" -dependencies = [ - "anyhow", - "cargo_metadata 0.19.2", - "chrono", - "either", - "indexmap", - "inventory", - "itertools 0.13.0", - "log", - "maplit", - "num-complex", - "numpy", - "pyo3", - "pyo3-build-config", - "pyo3-stub-gen-derive", - "semver", - "serde", - "toml", -] - -[[package]] -name = "pyo3-stub-gen-derive" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a036cb01c21f3014989614036a69f1467bfbfde608a37f98eaefb016b1abfe" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "pysplashsurf" -version = "0.11.0" -dependencies = [ - "anyhow", - "bytemuck", - "fxhash", - "log", - "ndarray", - "numpy", - "pyo3", - "pyo3-stub-gen", - "rayon", - "splashsurf", - "splashsurf_lib", -] - -[[package]] -name = "quick-xml" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8533f14c8382aaad0d592c812ac3b826162128b65662331e1127b45c3d18536b" -dependencies = [ - "memchr 2.7.5", - "serde", -] - -[[package]] -name = "quote" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "r-efi" -version = "5.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.16", -] - -[[package]] -name = "rand_distr" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" -dependencies = [ - "num-traits", - "rand", -] - -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - -[[package]] -name = "rayon" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" -dependencies = [ - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" -dependencies = [ - "crossbeam-deque", - "crossbeam-utils", -] - -[[package]] -name = "redox_syscall" -version = "0.5.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" -dependencies = [ - "bitflags", -] - -[[package]] -name = "regex" -version = "1.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" -dependencies = [ - "aho-corasick", - "memchr 2.7.5", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" -dependencies = [ - "aho-corasick", - "memchr 2.7.5", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" - -[[package]] -name = "rstar" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "421400d13ccfd26dfa5858199c30a5d76f9c54e0dba7575273025b43c5175dbb" -dependencies = [ - "heapless", - "num-traits", - "smallvec", -] - -[[package]] -name = "rustc-hash" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" - -[[package]] -name = "rustix" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" -dependencies = [ - "bitflags", - "errno", - "libc", - "linux-raw-sys", - "windows-sys", -] - -[[package]] -name = "rustversion" -version = "1.0.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" - -[[package]] -name = "ryu" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" - -[[package]] -name = "safe_arch" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" -dependencies = [ - "bytemuck", -] - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "semver" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" -dependencies = [ - "serde", -] - -[[package]] -name = "serde" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.219" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "serde_json" -version = "1.0.140" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" -dependencies = [ - "itoa", - "memchr 2.7.5", - "ryu", - "serde", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - -[[package]] -name = "shlex" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" - -[[package]] -name = "simba" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa" -dependencies = [ - "approx", - "num-complex", - "num-traits", - "paste", - "wide", -] - -[[package]] -name = "skeptic" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" -dependencies = [ - "bytecount", - "cargo_metadata 0.14.2", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", -] - -[[package]] -name = "smallvec" -version = "1.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" - -[[package]] -name = "splashsurf" -version = "0.11.0" -dependencies = [ - "anyhow", - "bytemuck", - "chrono", - "clap", - "fern", - "indicatif", - "lexical-sort", - "log", - "once_cell", - "parking_lot", - "rayon", - "regex", - "splashsurf_lib", - "walkdir", -] - -[[package]] -name = "splashsurf_lib" -version = "0.11.0" -dependencies = [ - "anyhow", - "arrayvec", - "bitflags", - "bytemuck", - "bytemuck_derive", - "chrono", - "clap", - "dashmap", - "fern", - "flate2", - "fxhash", - "itertools 0.14.0", - "log", - "nalgebra", - "nom 8.0.0", - "num-integer", - "num-traits", - "numeric_literals", - "parking_lot", - "ply-rs", - "rayon", - "rstar", - "serde_json", - "simba", - "thiserror", - "thread_local", - "vtkio", -] - -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.102" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6397daf94fa90f058bd0fd88429dd9e5738999cca8d701813c80723add80462" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "target-lexicon" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" - -[[package]] -name = "tempfile" -version = "3.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" -dependencies = [ - "fastrand", - "getrandom 0.3.3", - "once_cell", - "rustix", - "windows-sys", -] - -[[package]] -name = "thiserror" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "2.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "thread_local" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" -dependencies = [ - "cfg-if", - "once_cell", -] - -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - -[[package]] -name = "toml_datetime" -version = "0.6.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.22.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "toml_write", - "winnow", -] - -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - -[[package]] -name = "typenum" -version = "1.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" - -[[package]] -name = "unicase" -version = "2.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" - -[[package]] -name = "unicode-ident" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" - -[[package]] -name = "unicode-width" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" - -[[package]] -name = "unindent" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3" - -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "version_check" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" - -[[package]] -name = "vtkio" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abbe89e5b97b472d57abeb02755a06d75b28d2df7d1fe3df5baf032281a65c16" -dependencies = [ - "base64", - "bytemuck", - "byteorder", - "flate2", - "lz4_flex", - "nom 3.2.1", - "num-derive", - "num-traits", - "quick-xml", - "serde", - "xz2", -] - -[[package]] -name = "walkdir" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" -dependencies = [ - "same-file", - "winapi-util", -] - -[[package]] -name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" - -[[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" -dependencies = [ - "wit-bindgen-rt", -] - -[[package]] -name = "wasm-bindgen" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" -dependencies = [ - "cfg-if", - "once_cell", - "rustversion", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.102", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "web-time" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "wide" -version = "0.7.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" -dependencies = [ - "bytemuck", - "safe_arch", -] - -[[package]] -name = "winapi-util" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "windows-core" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" -dependencies = [ - "windows-implement", - "windows-interface", - "windows-link", - "windows-result", - "windows-strings", -] - -[[package]] -name = "windows-implement" -version = "0.60.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "windows-interface" -version = "0.59.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] - -[[package]] -name = "windows-link" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets", -] - -[[package]] -name = "windows-targets" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" - -[[package]] -name = "winnow" -version = "0.7.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" -dependencies = [ - "memchr 2.7.5", -] - -[[package]] -name = "wit-bindgen-rt" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags", -] - -[[package]] -name = "xz2" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" -dependencies = [ - "lzma-sys", -] - -[[package]] -name = "zerocopy" -version = "0.8.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.102", -] From 820871b247d37418b47e409b8cd0cfd49f59db76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 26 Aug 2025 22:11:54 +0200 Subject: [PATCH 02/63] Py: Fix local build on macOS --- Cargo.lock | 1 + pysplashsurf/Cargo.toml | 5 ++++- pysplashsurf/README.md | 2 +- pysplashsurf/build.rs | 11 +++++++++++ 4 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 pysplashsurf/build.rs diff --git a/Cargo.lock b/Cargo.lock index f65e822..56a91ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1303,6 +1303,7 @@ dependencies = [ "ndarray", "numpy", "pyo3", + "pyo3-build-config", "pyo3-stub-gen", "splashsurf", "splashsurf_lib", diff --git a/pysplashsurf/Cargo.toml b/pysplashsurf/Cargo.toml index f63b184..5dc8b28 100644 --- a/pysplashsurf/Cargo.toml +++ b/pysplashsurf/Cargo.toml @@ -13,7 +13,10 @@ numpy = "0.25.0" ndarray = "0.16.1" bytemuck = { version = "1.23.0", features = ["extern_crate_alloc"] } anyhow = "1.0.98" -pyo3-stub-gen = "0.12.0" +pyo3-stub-gen = "0.12.2" + +[build-dependencies] +pyo3-build-config = "0.25.1" [features] extension-module = ["pyo3/extension-module", "pyo3/abi3-py37"] diff --git a/pysplashsurf/README.md b/pysplashsurf/README.md index a0504d2..e2e6215 100644 --- a/pysplashsurf/README.md +++ b/pysplashsurf/README.md @@ -85,4 +85,4 @@ To generate the Sphinx documentation, make sure that the package is installed th The resulting HTML files will be in `pysplashsurf/pysplashsurf/docs/build/html`. ### Stub File Generation -To automatically generate a stub file for the package, run `cargo run --bin stub_gen` from the root project folder (from `pysplashsurf/`). +To automatically generate a stub file for the package, run `cargo run --bin stub_gen --no-default-features` from the root project folder (from `pysplashsurf/`). diff --git a/pysplashsurf/build.rs b/pysplashsurf/build.rs new file mode 100644 index 0000000..b5c01d6 --- /dev/null +++ b/pysplashsurf/build.rs @@ -0,0 +1,11 @@ +fn main() { + if std::env::var_os("CARGO_CFG_TARGET_OS=macos").is_some() { + println!( + "cargo:rustc-link-arg=-Wl,-rpath,{}", + pyo3_build_config::get() + .lib_dir + .clone() + .expect("Python lib dir not found") + ); + } +} From c6aeda6a7c1459544f543cf7bbca5bdb7efee7f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 26 Aug 2025 19:33:33 +0200 Subject: [PATCH 03/63] Py: WIP: Combine f32 & f64 pipeline and mesh types --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 32 +++ pysplashsurf/src/lib.rs | 5 + pysplashsurf/src/mesh.rs | 247 ++++++++++++++++++++- pysplashsurf/src/pipeline.rs | 231 ++++++++++++++++++- 4 files changed, 513 insertions(+), 2 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index d9a8f7d..165b8c5 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -324,6 +324,36 @@ class MixedTriQuadMeshWithDataF64: Get all registered cell attribute names """ +class PyMeshWithData: + @property + def dtype(self) -> numpy.dtype: + r""" + Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + """ + def copy_mesh(self) -> PyTriMesh3d: + r""" + Returns a copy of the contained mesh without associated data and attributes + """ + def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: + r""" + Returns a copy of the `Nx3` array of vertex positions + """ + +class PyTriMesh3d: + @property + def dtype(self) -> numpy.dtype: + r""" + Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + """ + def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: + r""" + Returns a copy of the `Nx3` array of vertex positions + """ + def copy_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: + r""" + Returns a copy of the `Nx3` array of vertex positions + """ + class SphInterpolatorF32: r""" SphInterpolator wrapper @@ -596,3 +626,5 @@ class UniformGridF64: """ ... +def reconstruction_pipeline_multi(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> typing.Optional[PyMeshWithData]: ... + diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 6ab5589..eb0e1e8 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -107,6 +107,11 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m )?); + let _ = m.add_function(wrap_pyfunction!( + pipeline::reconstruction_pipeline_multi, + m + )?); + let _ = m.add_function(wrap_pyfunction!( pipeline::reconstruction_pipeline_py_f32, m diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 07e9ae8..8b3285d 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,5 +1,10 @@ +use anyhow::anyhow; use ndarray::{Array2, ArrayView, ArrayView2}; -use numpy::{Element, IntoPyArray, PyArray, PyArray2, PyArrayMethods, PyReadonlyArray2, ToPyArray}; +use numpy as np; +use numpy::{ + Element, IntoPyArray, PyArray, PyArray2, PyArrayDescr, PyArrayMethods, PyReadonlyArray2, + PyUntypedArray, ToPyArray, +}; use pyo3::{ IntoPyObjectExt, exceptions::PyValueError, @@ -15,6 +20,7 @@ use splashsurf_lib::{ }, nalgebra::{Unit, Vector3}, }; +use std::marker::PhantomData; use crate::aabb::{Aabb3dF32, Aabb3dF64}; @@ -62,6 +68,107 @@ fn add_attribute_with_name<'py, R: Real + Element>( } } +struct TriMeshInterface { + phantom_data: PhantomData, +} + +impl TriMeshInterface { + /// Returns a copy of the `Nx3` array of vertex positions + fn get_vertices<'py>( + mesh: &TriMesh3d, + py: Python<'py>, + ) -> PyResult>> { + let points: &[R] = bytemuck::cast_slice(&mesh.vertices); + let vertices: ArrayView2 = + ArrayView::from_shape((mesh.vertices.len(), 3), points).map_err(anyhow::Error::new)?; + Ok(vertices.to_pyarray(py)) // seems like at least one copy is necessary here (to_pyarray copies the data) + } + + /// Returns a copy of the `Mx3` array of the vertex indices that make up a triangle + fn get_triangles<'py>( + mesh: &TriMesh3d, + py: Python<'py>, + ) -> PyResult>> { + let tris: &[u64] = bytemuck::cast_slice(&mesh.triangles); + let triangles: ArrayView2 = + ArrayView::from_shape((mesh.triangles.len(), 3), tris).map_err(anyhow::Error::new)?; + Ok(triangles.to_pyarray(py)) + } + + /// Alias for `get_triangles` + fn get_cells<'py>(mesh: &TriMesh3d, py: Python<'py>) -> PyResult>> { + Self::get_triangles(mesh, py) + } + + /// Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) + fn take_vertices<'py>( + mesh: &mut TriMesh3d, + py: Python<'py>, + ) -> PyResult>> { + let vertices = std::mem::take(&mut mesh.vertices); + let n = vertices.len(); + let vertices_scalar: Vec = bytemuck::cast_vec(vertices); + let vertices_array = PyArray::from_vec(py, vertices_scalar) + .reshape([n, 3]) + .map_err(anyhow::Error::new)?; + Ok(vertices_array) + } + + /// Returns the `Mx3` array of the vertex indices that make up the triangles by moving it out of the mesh (zero copy) + fn take_triangles<'py>( + mesh: &mut TriMesh3d, + py: Python<'py>, + ) -> PyResult>> { + let triangles = std::mem::take(&mut mesh.triangles); + let m = triangles.len(); + let triangles_scalar: Vec = bytemuck::cast_vec(triangles); + let triangles_array = PyArray::from_vec(py, triangles_scalar) + .reshape([m, 3]) + .map_err(anyhow::Error::new)?; + Ok(triangles_array) + } + + /// Alias for `take_triangles` + fn take_cells<'py>( + mesh: &mut TriMesh3d, + py: Python<'py>, + ) -> PyResult>> { + Self::take_triangles(mesh, py) + } + + /// Returns a tuple containing the vertices and triangles of the mesh by moving them out of the mesh (zero copy) + fn take_vertices_and_triangles<'py>( + mesh: &mut TriMesh3d, + py: Python<'py>, + ) -> PyResult> { + let tup = ( + Self::take_vertices(mesh, py)?, + Self::take_triangles(mesh, py)?, + ); + tup.into_pyobject(py) + } + + /// Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) + fn par_vertex_normals<'py>( + mesh: &TriMesh3d, + py: Python<'py>, + ) -> PyResult>> { + let normals_vec = mesh.par_vertex_normals(); + let normals_vec = bytemuck::allocation::cast_vec::>, R>(normals_vec); + + let normals: &[R] = normals_vec.as_slice(); + let normals: ArrayView2 = + ArrayView::from_shape((normals.len() / 3, 3), normals).map_err(anyhow::Error::new)?; + + Ok(normals.to_pyarray(py)) + } + + /// Returns a mapping of all mesh vertices to the set of their connected neighbor vertices + fn vertex_vertex_connectivity(mesh: &TriMesh3d) -> Vec> { + mesh.vertex_vertex_connectivity() + } +} + macro_rules! create_mesh_data_interface { ($name: ident, $type: ident, $mesh_class: ident, $pymesh_class: ident, $aabb_class: ident) => { /// MeshWithData wrapper @@ -436,6 +543,144 @@ macro_rules! create_tri_quad_mesh_interface { }; } +enum PyTriMesh3dData { + F32(TriMesh3d), + F64(TriMesh3d), +} + +#[gen_stub_pyclass] +#[pyclass] +pub struct PyTriMesh3d { + inner: PyTriMesh3dData, +} + +impl TryFrom> for PyTriMesh3d { + type Error = &'static str; + + fn try_from(mesh: TriMesh3d) -> Result { + if std::any::TypeId::of::() == std::any::TypeId::of::() { + let mesh = unsafe { std::mem::transmute::, TriMesh3d>(mesh) }; + Ok(Self { + inner: PyTriMesh3dData::F32(mesh), + }) + } else if std::any::TypeId::of::() == std::any::TypeId::of::() { + let mesh = unsafe { std::mem::transmute::, TriMesh3d>(mesh) }; + Ok(Self { + inner: PyTriMesh3dData::F64(mesh), + }) + } else { + Err("Unsupported scalar type for TriMesh3d. Only f32 and f64 are supported.") + } + } +} + +#[gen_stub_pymethods] +#[pymethods] +impl PyTriMesh3d { + /// Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + #[getter] + pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { + match &self.inner { + PyTriMesh3dData::F32(_) => np::dtype::(py), + PyTriMesh3dData::F64(_) => np::dtype::(py), + } + } + + /// Returns a copy of the `Nx3` array of vertex positions + pub fn copy_vertices<'py>(&self, py: Python<'py>) -> PyResult> { + match &self.inner { + PyTriMesh3dData::F32(mesh) => TriMeshInterface::get_vertices(mesh, py) + .map(|v| v.into_any().downcast_into::().unwrap()), + PyTriMesh3dData::F64(mesh) => TriMeshInterface::get_vertices(mesh, py) + .map(|v| v.into_any().downcast_into::().unwrap()), + } + } + + /// Returns a copy of the `Nx3` array of vertex positions + pub fn copy_triangles<'py>(&self, py: Python<'py>) -> PyResult>> { + match &self.inner { + PyTriMesh3dData::F32(mesh) => TriMeshInterface::get_triangles(mesh, py), + PyTriMesh3dData::F64(mesh) => TriMeshInterface::get_triangles(mesh, py), + } + } +} + +enum PyMeshWithDataData { + Tri3dF32(MeshWithData>), + Tri3dF64(MeshWithData>), +} + +#[gen_stub_pyclass] +#[pyclass] +pub struct PyMeshWithData { + inner: PyMeshWithDataData, +} + +impl TryFrom>> for PyMeshWithData { + type Error = &'static str; + + fn try_from(mesh: MeshWithData>) -> Result { + if std::any::TypeId::of::() == std::any::TypeId::of::() { + let mesh = unsafe { + std::mem::transmute::< + MeshWithData>, + MeshWithData>, + >(mesh) + }; + Ok(Self { + inner: PyMeshWithDataData::Tri3dF32(mesh), + }) + } else if std::any::TypeId::of::() == std::any::TypeId::of::() { + let mesh = unsafe { + std::mem::transmute::< + MeshWithData>, + MeshWithData>, + >(mesh) + }; + Ok(Self { + inner: PyMeshWithDataData::Tri3dF64(mesh), + }) + } else { + Err("Unsupported scalar type for MeshWithData. Only f32 and f64 are supported.") + } + } +} + +#[gen_stub_pymethods] +#[pymethods] +impl PyMeshWithData { + /// Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + #[getter] + pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { + match &self.inner { + PyMeshWithDataData::Tri3dF32(_) => np::dtype::(py), + PyMeshWithDataData::Tri3dF64(_) => np::dtype::(py), + } + } + + /// Returns a copy of the contained mesh without associated data and attributes + pub fn copy_mesh(&self) -> PyResult { + match &self.inner { + PyMeshWithDataData::Tri3dF32(mesh) => { + PyTriMesh3d::try_from(mesh.mesh.clone()).map_err(|e| anyhow!(e).into()) + } + PyMeshWithDataData::Tri3dF64(mesh) => { + PyTriMesh3d::try_from(mesh.mesh.clone()).map_err(|e| anyhow!(e).into()) + } + } + } + + /// Returns a copy of the `Nx3` array of vertex positions + pub fn copy_vertices<'py>(&self, py: Python<'py>) -> PyResult> { + match &self.inner { + PyMeshWithDataData::Tri3dF32(mesh) => TriMeshInterface::get_vertices(&mesh.mesh, py) + .map(|v| v.into_any().downcast_into::().unwrap()), + PyMeshWithDataData::Tri3dF64(mesh) => TriMeshInterface::get_vertices(&mesh.mesh, py) + .map(|v| v.into_any().downcast_into::().unwrap()), + } + } +} + create_tri_mesh_interface!(TriMesh3dF64, f64); create_tri_mesh_interface!(TriMesh3dF32, f32); diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index 1ec9470..5c5ea8d 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -1,3 +1,4 @@ +use crate::mesh::PyMeshWithData; use crate::{ mesh::{ MixedTriQuadMeshWithDataF32, MixedTriQuadMeshWithDataF64, TriMeshWithDataF32, @@ -5,11 +6,18 @@ use crate::{ }, reconstruction::{SurfaceReconstructionF32, SurfaceReconstructionF64}, }; -use numpy::{Element, PyArray1, PyArray2, PyArrayMethods, PyReadonlyArray1, PyReadonlyArray2}; +use anyhow::anyhow; +use numpy as np; +use numpy::{ + Element, PyArray1, PyArray2, PyArrayDescr, PyArrayDescrMethods, PyArrayMethods, + PyReadonlyArray1, PyReadonlyArray2, PyUntypedArray, PyUntypedArrayMethods, +}; +use pyo3::exceptions::PyTypeError; use pyo3::{ prelude::*, types::{PyDict, PyString}, }; +use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::{ Aabb3d, GridDecompositionParameters, Index, Real, SpatialDecomposition, mesh::{AttributeData, MeshAttribute}, @@ -17,6 +25,227 @@ use splashsurf_lib::{ }; use std::borrow::Cow; +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "reconstruction_pipeline_multi")] +#[pyo3(signature = (particles, *, attributes_to_interpolate = None, + particle_radius, rest_density = 1000.0, smoothing_length, cube_size, iso_surface_threshold = 0.6, + aabb_min = None, aabb_max = None, multi_threading = true, + subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, + check_mesh_closed = false, check_mesh_manifold = false, check_mesh_orientation = false, check_mesh_debug = false, + mesh_cleanup = false, mesh_cleanup_snap_dist = None, decimate_barnacles = false, keep_vertices = false, compute_normals = false, sph_normals = false, + normals_smoothing_iters = None, mesh_smoothing_iters = None, mesh_smoothing_weights = true, mesh_smoothing_weights_normalization = 13.0, + generate_quads = false, quad_max_edge_diag_ratio = 1.75, quad_max_normal_angle = 10.0, quad_max_interior_angle = 135.0, + output_mesh_smoothing_weights = false, output_raw_normals = false, output_raw_mesh = false, + mesh_aabb_min = None, mesh_aabb_max = None, mesh_aabb_clamp_vertices = true, dtype = None +))] +pub fn reconstruction_pipeline_multi<'py>( + particles: &Bound<'py, PyUntypedArray>, + attributes_to_interpolate: Option>, + particle_radius: f64, + rest_density: f64, + smoothing_length: f64, + cube_size: f64, + iso_surface_threshold: f64, + aabb_min: Option<[f64; 3]>, + aabb_max: Option<[f64; 3]>, + multi_threading: bool, + subdomain_grid: bool, + subdomain_grid_auto_disable: bool, + subdomain_num_cubes_per_dim: u32, + check_mesh_closed: bool, + check_mesh_manifold: bool, + check_mesh_orientation: bool, + check_mesh_debug: bool, + mesh_cleanup: bool, + mesh_cleanup_snap_dist: Option, + decimate_barnacles: bool, + keep_vertices: bool, + compute_normals: bool, + sph_normals: bool, + normals_smoothing_iters: Option, + mesh_smoothing_iters: Option, + mesh_smoothing_weights: bool, + mesh_smoothing_weights_normalization: f64, + generate_quads: bool, + quad_max_edge_diag_ratio: f64, + quad_max_normal_angle: f64, + quad_max_interior_angle: f64, + output_mesh_smoothing_weights: bool, + output_raw_normals: bool, + output_raw_mesh: bool, + mesh_aabb_min: Option<[f64; 3]>, + mesh_aabb_max: Option<[f64; 3]>, + mesh_aabb_clamp_vertices: bool, + dtype: Option>, +) -> PyResult> { + let py = particles.py(); + let element_type = dtype.unwrap_or_else(|| particles.dtype()); + + let particle_aabb = aabb_min + .zip(aabb_max) + .map(|(min, max)| Aabb3d::new(Vector3::from(min), Vector3::from(max))); + + let mesh_aabb = mesh_aabb_min + .zip(mesh_aabb_max) + .map(|(min, max)| Aabb3d::new(Vector3::from(min), Vector3::from(max))); + + let spatial_decomposition = if subdomain_grid { + SpatialDecomposition::UniformGrid(GridDecompositionParameters { + subdomain_num_cubes_per_dim, + auto_disable: subdomain_grid_auto_disable, + }) + } else { + SpatialDecomposition::None + }; + + let parameters = splashsurf_lib::Parameters { + particle_radius, + rest_density, + compact_support_radius: 2.0 * smoothing_length * particle_radius, + cube_size: cube_size * particle_radius, + iso_surface_threshold, + particle_aabb, + enable_multi_threading: multi_threading, + spatial_decomposition, + global_neighborhood_list: mesh_smoothing_weights, + }; + + let postprocessing_args = splashsurf::reconstruct::ReconstructionPostprocessingParameters { + check_mesh_closed, + check_mesh_manifold, + check_mesh_orientation, + check_mesh_debug, + mesh_cleanup, + mesh_cleanup_snap_dist, + decimate_barnacles, + keep_vertices, + compute_normals, + sph_normals, + normals_smoothing_iters, + interpolate_attributes: None, + mesh_smoothing_iters, + mesh_smoothing_weights, + mesh_smoothing_weights_normalization, + generate_quads, + quad_max_edge_diag_ratio, + quad_max_normal_angle, + quad_max_interior_angle, + output_mesh_smoothing_weights, + output_raw_normals, + output_raw_mesh, + mesh_aabb, + mesh_aabb_clamp_vertices, + }; + + if element_type.is_equiv_to(&np::dtype::(py)) { + println!("Detected f32 particle array"); + let particles = particles.downcast::>()?; + let reconstruction = reconstruction_pipeline_generic_impl::( + particles, + attributes_to_interpolate, + ¶meters + .try_convert() + .expect("failed to convert reconstruction parameters to f32"), + &postprocessing_args, + )?; + let mesh = reconstruction + .tri_mesh + .map(|mesh_with_data| PyMeshWithData::try_from(mesh_with_data)); + mesh.transpose().map_err(|err| anyhow!(err).into()) + } else if element_type.is_equiv_to(&np::dtype::(py)) { + println!("Detected f64 particle array"); + let particles = particles.downcast::>()?; + let reconstruction = reconstruction_pipeline_generic_impl::( + particles, + attributes_to_interpolate, + ¶meters, + &postprocessing_args, + )?; + let mesh = reconstruction + .tri_mesh + .map(|mesh_with_data| PyMeshWithData::try_from(mesh_with_data)); + mesh.transpose().map_err(|err| anyhow!(err).into()) + } else { + Err(PyTypeError::new_err(format!( + "Unsupported element type: {}", + element_type + ))) + } +} + +fn reconstruction_pipeline_generic_impl<'py, I: Index, R: Real + Element>( + particles: &Bound<'py, PyArray2>, + attributes_to_interpolate: Option>, + parameters: &splashsurf_lib::Parameters, + postprocessing_args: &splashsurf::reconstruct::ReconstructionPostprocessingParameters, +) -> Result, anyhow::Error> { + let particles: PyReadonlyArray2 = particles.readonly(); + let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); + + enum AttributePyView<'a, R: Real + Element> { + U64(PyReadonlyArray1<'a, u64>), + Float(PyReadonlyArray1<'a, R>), + FloatVec3(PyReadonlyArray2<'a, R>), + } + + let mut attr_names = Vec::new(); + let mut attr_views = Vec::new(); + + // Collect readonly views of all attribute arrays + for (key, value) in attributes_to_interpolate.iter().flatten() { + let key_str: String = key + .downcast::() + .expect("attribute key has to be a string") + .extract()?; + + if let Ok(value) = value.downcast::>() { + attr_views.push(AttributePyView::U64(value.readonly())); + attr_names.push(key_str); + } else if let Ok(value) = value.downcast::>() { + attr_views.push(AttributePyView::Float(value.readonly())); + attr_names.push(key_str); + } else if let Ok(value) = value.downcast::>() { + attr_views.push(AttributePyView::FloatVec3(value.readonly())); + attr_names.push(key_str); + } else { + println!("Failed to downcast attribute {} to valid type", &key_str); + } + } + + // Get slices from attribute views and construct borrowed MeshAttributes + let attributes = attr_names + .into_iter() + .zip(attr_views.iter()) + .map(|(name, view)| -> Result, anyhow::Error> { + let data = match view { + AttributePyView::U64(view) => { + AttributeData::ScalarU64(Cow::Borrowed(view.as_slice()?.into())) + } + AttributePyView::Float(view) => { + AttributeData::ScalarReal(Cow::Borrowed(view.as_slice()?.into())) + } + AttributePyView::FloatVec3(view) => { + let vec3_slice: &[Vector3] = bytemuck::cast_slice(view.as_slice()?); + AttributeData::Vector3Real(Cow::Borrowed(vec3_slice.into())) + } + }; + Ok(MeshAttribute::new(name, data)) + }) + .collect::, _>>()?; + + let mut postprocessing_args = postprocessing_args.clone(); + postprocessing_args.interpolate_attributes = + (!attributes.is_empty()).then(|| attributes.iter().map(|a| a.name.clone()).collect()); + + splashsurf::reconstruct::reconstruction_pipeline( + particle_positions, + &attributes, + ¶meters, + &postprocessing_args, + ) +} + fn reconstruction_pipeline_generic<'py, I: Index, R: Real + Element>( particles: &Bound<'py, PyArray2>, attributes_to_interpolate: Bound<'py, PyDict>, From af70b2377de72b27f16e1ffc959202c76183a96a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Wed, 27 Aug 2025 14:17:32 +0200 Subject: [PATCH 04/63] Py: Fix cast of usize for Numpy arrays on 32 bit architectures --- pysplashsurf/src/lib.rs | 7 +++++++ pysplashsurf/src/mesh.rs | 31 ++++++++++++++++--------------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index eb0e1e8..603783f 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -4,6 +4,13 @@ use pyo3_stub_gen::define_stub_info_gatherer; use splashsurf::cli; +#[cfg(target_pointer_width = "64")] +pub(crate) use u64 as NumpyUsize; +#[cfg(target_pointer_width = "32")] +pub(crate) use u32 as NumpyUsize; +#[cfg(not(any(target_pointer_width = "64", target_pointer_width = "32")))] +compile_error!("Unsupported target pointer width, only 32 and 64 bit are supported."); + mod aabb; mod mesh; mod sph_interpolation; diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 8b3285d..1248770 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,4 +1,5 @@ use anyhow::anyhow; +use crate::NumpyUsize; use ndarray::{Array2, ArrayView, ArrayView2}; use numpy as np; use numpy::{ @@ -88,15 +89,15 @@ impl TriMeshInterface { fn get_triangles<'py>( mesh: &TriMesh3d, py: Python<'py>, - ) -> PyResult>> { - let tris: &[u64] = bytemuck::cast_slice(&mesh.triangles); - let triangles: ArrayView2 = + ) -> PyResult>> { + let tris: &[NumpyUsize] = bytemuck::cast_slice(&mesh.triangles); + let triangles: ArrayView2 = ArrayView::from_shape((mesh.triangles.len(), 3), tris).map_err(anyhow::Error::new)?; Ok(triangles.to_pyarray(py)) } /// Alias for `get_triangles` - fn get_cells<'py>(mesh: &TriMesh3d, py: Python<'py>) -> PyResult>> { + fn get_cells<'py>(mesh: &TriMesh3d, py: Python<'py>) -> PyResult>> { Self::get_triangles(mesh, py) } @@ -118,10 +119,10 @@ impl TriMeshInterface { fn take_triangles<'py>( mesh: &mut TriMesh3d, py: Python<'py>, - ) -> PyResult>> { + ) -> PyResult>> { let triangles = std::mem::take(&mut mesh.triangles); let m = triangles.len(); - let triangles_scalar: Vec = bytemuck::cast_vec(triangles); + let triangles_scalar: Vec = bytemuck::cast_vec(triangles); let triangles_array = PyArray::from_vec(py, triangles_scalar) .reshape([m, 3]) .map_err(anyhow::Error::new)?; @@ -132,7 +133,7 @@ impl TriMeshInterface { fn take_cells<'py>( mesh: &mut TriMesh3d, py: Python<'py>, - ) -> PyResult>> { + ) -> PyResult>> { Self::take_triangles(mesh, py) } @@ -405,16 +406,16 @@ macro_rules! create_tri_mesh_interface { } /// Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - fn get_triangles<'py>(&self, py: Python<'py>) -> PyResult>> { - let tris: &[u64] = bytemuck::cast_slice(&self.inner.triangles); - let triangles: ArrayView2 = + fn get_triangles<'py>(&self, py: Python<'py>) -> PyResult>> { + let tris: &[NumpyUsize] = bytemuck::cast_slice(&self.inner.triangles); + let triangles: ArrayView2 = ArrayView::from_shape((self.inner.triangles.len(), 3), tris) .map_err(anyhow::Error::new)?; Ok(triangles.to_pyarray(py)) } /// Alias for `get_triangles` - fn get_cells<'py>(&self, py: Python<'py>) -> PyResult>> { + fn get_cells<'py>(&self, py: Python<'py>) -> PyResult>> { self.get_triangles(py) } @@ -436,10 +437,10 @@ macro_rules! create_tri_mesh_interface { fn take_triangles<'py>( &mut self, py: Python<'py>, - ) -> PyResult>> { + ) -> PyResult>> { let triangles = std::mem::take(&mut self.inner.triangles); let m = triangles.len(); - let triangles_scalar: Vec = bytemuck::cast_vec(triangles); + let triangles_scalar: Vec = bytemuck::cast_vec(triangles); let triangles_array = PyArray::from_vec(py, triangles_scalar) .reshape([m, 3]) .map_err(anyhow::Error::new)?; @@ -447,7 +448,7 @@ macro_rules! create_tri_mesh_interface { } /// Alias for `take_triangles` - fn take_cells<'py>(&mut self, py: Python<'py>) -> PyResult>> { + fn take_cells<'py>(&mut self, py: Python<'py>) -> PyResult>> { self.take_triangles(py) } @@ -597,7 +598,7 @@ impl PyTriMesh3d { } /// Returns a copy of the `Nx3` array of vertex positions - pub fn copy_triangles<'py>(&self, py: Python<'py>) -> PyResult>> { + pub fn copy_triangles<'py>(&self, py: Python<'py>) -> PyResult>> { match &self.inner { PyTriMesh3dData::F32(mesh) => TriMeshInterface::get_triangles(mesh, py), PyTriMesh3dData::F64(mesh) => TriMeshInterface::get_triangles(mesh, py), From e8b0c6452091bfa8f103898cf162450d0b8fba3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Wed, 27 Aug 2025 14:45:40 +0200 Subject: [PATCH 05/63] Py: WIP: Further mesh refactoring --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 17 +- pysplashsurf/src/lib.rs | 4 +- pysplashsurf/src/mesh.rs | 334 ++++++++++++--------- pysplashsurf/src/pipeline.rs | 32 +- 4 files changed, 224 insertions(+), 163 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 165b8c5..45fc9c8 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -330,7 +330,7 @@ class PyMeshWithData: r""" Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ - def copy_mesh(self) -> PyTriMesh3d: + def copy_mesh(self) -> typing.Union[PyTriMesh3d, PyMixedTriQuadMesh3d]: r""" Returns a copy of the contained mesh without associated data and attributes """ @@ -339,6 +339,17 @@ class PyMeshWithData: Returns a copy of the `Nx3` array of vertex positions """ +class PyMixedTriQuadMesh3d: + @property + def dtype(self) -> numpy.dtype: + r""" + Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + """ + def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: + r""" + Returns a copy of the `Nx3` array of vertex positions + """ + class PyTriMesh3d: @property def dtype(self) -> numpy.dtype: @@ -351,7 +362,7 @@ class PyTriMesh3d: """ def copy_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Returns a copy of the `Nx3` array of vertex positions + Returns a copy of the `Mx3` array of vertex indices per triangle """ class SphInterpolatorF32: @@ -626,5 +637,5 @@ class UniformGridF64: """ ... -def reconstruction_pipeline_multi(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> typing.Optional[PyMeshWithData]: ... +def reconstruction_pipeline_multi(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> PyMeshWithData: ... diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 603783f..ead15c0 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -4,10 +4,10 @@ use pyo3_stub_gen::define_stub_info_gatherer; use splashsurf::cli; -#[cfg(target_pointer_width = "64")] -pub(crate) use u64 as NumpyUsize; #[cfg(target_pointer_width = "32")] pub(crate) use u32 as NumpyUsize; +#[cfg(target_pointer_width = "64")] +pub(crate) use u64 as NumpyUsize; #[cfg(not(any(target_pointer_width = "64", target_pointer_width = "32")))] compile_error!("Unsupported target pointer width, only 32 and 64 bit are supported."); diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 1248770..5b27dc3 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,11 +1,12 @@ -use anyhow::anyhow; use crate::NumpyUsize; +use crate::aabb::{Aabb3dF32, Aabb3dF64}; use ndarray::{Array2, ArrayView, ArrayView2}; use numpy as np; use numpy::{ Element, IntoPyArray, PyArray, PyArray2, PyArrayDescr, PyArrayMethods, PyReadonlyArray2, PyUntypedArray, ToPyArray, }; +use pyo3::exceptions::PyTypeError; use pyo3::{ IntoPyObjectExt, exceptions::PyValueError, @@ -13,6 +14,7 @@ use pyo3::{ types::{PyDict, PyList, PyTuple}, }; use pyo3_stub_gen::derive::*; +use splashsurf_lib::mesh::TriangleCell; use splashsurf_lib::{ Real, mesh::{ @@ -21,9 +23,7 @@ use splashsurf_lib::{ }, nalgebra::{Unit, Vector3}, }; -use std::marker::PhantomData; - -use crate::aabb::{Aabb3dF32, Aabb3dF64}; +use std::any::TypeId; fn get_attribute_with_name<'py, R: Real + Element>( py: Python<'py>, @@ -69,105 +69,25 @@ fn add_attribute_with_name<'py, R: Real + Element>( } } -struct TriMeshInterface { - phantom_data: PhantomData, +fn get_vertices<'py, R: Real + Element>( + py: Python<'py>, + vertices: &[Vector3], +) -> PyResult>> { + let coordinates: &[R] = bytemuck::cast_slice(vertices); + let vertices: ArrayView2 = + ArrayView::from_shape((vertices.len(), 3), coordinates).map_err(anyhow::Error::new)?; + // Seems like at least one copy is necessary here (to_pyarray copies the data) + Ok(vertices.to_pyarray(py)) } -impl TriMeshInterface { - /// Returns a copy of the `Nx3` array of vertex positions - fn get_vertices<'py>( - mesh: &TriMesh3d, - py: Python<'py>, - ) -> PyResult>> { - let points: &[R] = bytemuck::cast_slice(&mesh.vertices); - let vertices: ArrayView2 = - ArrayView::from_shape((mesh.vertices.len(), 3), points).map_err(anyhow::Error::new)?; - Ok(vertices.to_pyarray(py)) // seems like at least one copy is necessary here (to_pyarray copies the data) - } - - /// Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - fn get_triangles<'py>( - mesh: &TriMesh3d, - py: Python<'py>, - ) -> PyResult>> { - let tris: &[NumpyUsize] = bytemuck::cast_slice(&mesh.triangles); - let triangles: ArrayView2 = - ArrayView::from_shape((mesh.triangles.len(), 3), tris).map_err(anyhow::Error::new)?; - Ok(triangles.to_pyarray(py)) - } - - /// Alias for `get_triangles` - fn get_cells<'py>(mesh: &TriMesh3d, py: Python<'py>) -> PyResult>> { - Self::get_triangles(mesh, py) - } - - /// Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - fn take_vertices<'py>( - mesh: &mut TriMesh3d, - py: Python<'py>, - ) -> PyResult>> { - let vertices = std::mem::take(&mut mesh.vertices); - let n = vertices.len(); - let vertices_scalar: Vec = bytemuck::cast_vec(vertices); - let vertices_array = PyArray::from_vec(py, vertices_scalar) - .reshape([n, 3]) - .map_err(anyhow::Error::new)?; - Ok(vertices_array) - } - - /// Returns the `Mx3` array of the vertex indices that make up the triangles by moving it out of the mesh (zero copy) - fn take_triangles<'py>( - mesh: &mut TriMesh3d, - py: Python<'py>, - ) -> PyResult>> { - let triangles = std::mem::take(&mut mesh.triangles); - let m = triangles.len(); - let triangles_scalar: Vec = bytemuck::cast_vec(triangles); - let triangles_array = PyArray::from_vec(py, triangles_scalar) - .reshape([m, 3]) - .map_err(anyhow::Error::new)?; - Ok(triangles_array) - } - - /// Alias for `take_triangles` - fn take_cells<'py>( - mesh: &mut TriMesh3d, - py: Python<'py>, - ) -> PyResult>> { - Self::take_triangles(mesh, py) - } - - /// Returns a tuple containing the vertices and triangles of the mesh by moving them out of the mesh (zero copy) - fn take_vertices_and_triangles<'py>( - mesh: &mut TriMesh3d, - py: Python<'py>, - ) -> PyResult> { - let tup = ( - Self::take_vertices(mesh, py)?, - Self::take_triangles(mesh, py)?, - ); - tup.into_pyobject(py) - } - - /// Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) - fn par_vertex_normals<'py>( - mesh: &TriMesh3d, - py: Python<'py>, - ) -> PyResult>> { - let normals_vec = mesh.par_vertex_normals(); - let normals_vec = bytemuck::allocation::cast_vec::>, R>(normals_vec); - - let normals: &[R] = normals_vec.as_slice(); - let normals: ArrayView2 = - ArrayView::from_shape((normals.len() / 3, 3), normals).map_err(anyhow::Error::new)?; - - Ok(normals.to_pyarray(py)) - } - - /// Returns a mapping of all mesh vertices to the set of their connected neighbor vertices - fn vertex_vertex_connectivity(mesh: &TriMesh3d) -> Vec> { - mesh.vertex_vertex_connectivity() - } +fn get_triangles<'py>( + py: Python<'py>, + triangles: &[TriangleCell], +) -> PyResult>> { + let vertex_indices: &[NumpyUsize] = bytemuck::cast_slice(triangles); + let triangles: ArrayView2 = + ArrayView::from_shape((triangles.len(), 3), vertex_indices).map_err(anyhow::Error::new)?; + Ok(triangles.to_pyarray(py)) } macro_rules! create_mesh_data_interface { @@ -406,7 +326,10 @@ macro_rules! create_tri_mesh_interface { } /// Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - fn get_triangles<'py>(&self, py: Python<'py>) -> PyResult>> { + fn get_triangles<'py>( + &self, + py: Python<'py>, + ) -> PyResult>> { let tris: &[NumpyUsize] = bytemuck::cast_slice(&self.inner.triangles); let triangles: ArrayView2 = ArrayView::from_shape((self.inner.triangles.len(), 3), tris) @@ -415,7 +338,10 @@ macro_rules! create_tri_mesh_interface { } /// Alias for `get_triangles` - fn get_cells<'py>(&self, py: Python<'py>) -> PyResult>> { + fn get_cells<'py>( + &self, + py: Python<'py>, + ) -> PyResult>> { self.get_triangles(py) } @@ -448,7 +374,10 @@ macro_rules! create_tri_mesh_interface { } /// Alias for `take_triangles` - fn take_cells<'py>(&mut self, py: Python<'py>) -> PyResult>> { + fn take_cells<'py>( + &mut self, + py: Python<'py>, + ) -> PyResult>> { self.take_triangles(py) } @@ -544,6 +473,18 @@ macro_rules! create_tri_quad_mesh_interface { }; } +macro_rules! impl_from_mesh { + ($pyclass:ident, $mesh:ty => $target_enum:path) => { + impl From<$mesh> for $pyclass { + fn from(mesh: $mesh) -> Self { + Self { + inner: $target_enum(mesh), + } + } + } + }; +} + enum PyTriMesh3dData { F32(TriMesh3d), F64(TriMesh3d), @@ -555,22 +496,21 @@ pub struct PyTriMesh3d { inner: PyTriMesh3dData, } -impl TryFrom> for PyTriMesh3d { - type Error = &'static str; +impl_from_mesh!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F32); +impl_from_mesh!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F64); - fn try_from(mesh: TriMesh3d) -> Result { - if std::any::TypeId::of::() == std::any::TypeId::of::() { +impl PyTriMesh3d { + pub fn try_from_generic(mesh: TriMesh3d) -> PyResult { + if TypeId::of::() == TypeId::of::() { let mesh = unsafe { std::mem::transmute::, TriMesh3d>(mesh) }; - Ok(Self { - inner: PyTriMesh3dData::F32(mesh), - }) - } else if std::any::TypeId::of::() == std::any::TypeId::of::() { + Ok(Self::from(mesh)) + } else if TypeId::of::() == TypeId::of::() { let mesh = unsafe { std::mem::transmute::, TriMesh3d>(mesh) }; - Ok(Self { - inner: PyTriMesh3dData::F64(mesh), - }) + Ok(Self::from(mesh)) } else { - Err("Unsupported scalar type for TriMesh3d. Only f32 and f64 are supported.") + Err(PyTypeError::new_err( + "Unsupported scalar type for TriMesh3d. Only f32 and f64 are supported.", + )) } } } @@ -590,18 +530,78 @@ impl PyTriMesh3d { /// Returns a copy of the `Nx3` array of vertex positions pub fn copy_vertices<'py>(&self, py: Python<'py>) -> PyResult> { match &self.inner { - PyTriMesh3dData::F32(mesh) => TriMeshInterface::get_vertices(mesh, py) + PyTriMesh3dData::F32(mesh) => get_vertices(py, mesh.vertices()) .map(|v| v.into_any().downcast_into::().unwrap()), - PyTriMesh3dData::F64(mesh) => TriMeshInterface::get_vertices(mesh, py) + PyTriMesh3dData::F64(mesh) => get_vertices(py, mesh.vertices()) .map(|v| v.into_any().downcast_into::().unwrap()), } } + /// Returns a copy of the `Mx3` array of vertex indices per triangle + pub fn copy_triangles<'py>( + &self, + py: Python<'py>, + ) -> PyResult>> { + match &self.inner { + PyTriMesh3dData::F32(mesh) => get_triangles(py, mesh.cells()), + PyTriMesh3dData::F64(mesh) => get_triangles(py, mesh.cells()), + } + } +} + +pub enum PyMixedTriQuadMesh3dData { + F32(MixedTriQuadMesh3d), + F64(MixedTriQuadMesh3d), +} + +#[gen_stub_pyclass] +#[pyclass] +pub struct PyMixedTriQuadMesh3d { + inner: PyMixedTriQuadMesh3dData, +} + +impl_from_mesh!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F32); +impl_from_mesh!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F64); + +impl PyMixedTriQuadMesh3d { + pub fn try_from_generic(mesh: MixedTriQuadMesh3d) -> PyResult { + if TypeId::of::() == TypeId::of::() { + let mesh = unsafe { + std::mem::transmute::, MixedTriQuadMesh3d>(mesh) + }; + Ok(Self::from(mesh)) + } else if TypeId::of::() == TypeId::of::() { + let mesh = unsafe { + std::mem::transmute::, MixedTriQuadMesh3d>(mesh) + }; + Ok(Self::from(mesh)) + } else { + Err(PyTypeError::new_err( + "Unsupported scalar type for MixedTriQuadMesh3d. Only f32 and f64 are supported.", + )) + } + } +} + +#[gen_stub_pymethods] +#[pymethods] +impl PyMixedTriQuadMesh3d { + /// Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + #[getter] + pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { + match &self.inner { + PyMixedTriQuadMesh3dData::F32(_) => np::dtype::(py), + PyMixedTriQuadMesh3dData::F64(_) => np::dtype::(py), + } + } + /// Returns a copy of the `Nx3` array of vertex positions - pub fn copy_triangles<'py>(&self, py: Python<'py>) -> PyResult>> { + pub fn copy_vertices<'py>(&self, py: Python<'py>) -> PyResult> { match &self.inner { - PyTriMesh3dData::F32(mesh) => TriMeshInterface::get_triangles(mesh, py), - PyTriMesh3dData::F64(mesh) => TriMeshInterface::get_triangles(mesh, py), + PyMixedTriQuadMesh3dData::F32(mesh) => get_vertices(py, mesh.vertices()) + .map(|v| v.into_any().downcast_into::().unwrap()), + PyMixedTriQuadMesh3dData::F64(mesh) => get_vertices(py, mesh.vertices()) + .map(|v| v.into_any().downcast_into::().unwrap()), } } } @@ -609,40 +609,67 @@ impl PyTriMesh3d { enum PyMeshWithDataData { Tri3dF32(MeshWithData>), Tri3dF64(MeshWithData>), + MixedTriQuadF32(MeshWithData>), + MixedTriQuadF64(MeshWithData>), } +impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::Tri3dF32); +impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::Tri3dF64); +impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::MixedTriQuadF32); +impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::MixedTriQuadF64); + #[gen_stub_pyclass] #[pyclass] pub struct PyMeshWithData { inner: PyMeshWithDataData, } -impl TryFrom>> for PyMeshWithData { - type Error = &'static str; +//fn transmute_into() - fn try_from(mesh: MeshWithData>) -> Result { - if std::any::TypeId::of::() == std::any::TypeId::of::() { +impl PyMeshWithData { + pub fn try_from_generic + 'static>( + mut mesh: MeshWithData, + ) -> PyResult { + if TypeId::of::>() == TypeId::of::>>() { + let mesh = unsafe { + std::mem::transmute::<&mut MeshWithData, &mut MeshWithData>>( + &mut mesh, + ) + }; + Ok(Self::from(std::mem::take(mesh))) + } else if TypeId::of::>() + == TypeId::of::>>() + { + let mesh = unsafe { + std::mem::transmute::<&mut MeshWithData, &mut MeshWithData>>( + &mut mesh, + ) + }; + Ok(Self::from(std::mem::take(mesh))) + } else if TypeId::of::>() + == TypeId::of::>>() + { let mesh = unsafe { std::mem::transmute::< - MeshWithData>, - MeshWithData>, - >(mesh) + &mut MeshWithData, + &mut MeshWithData>, + >(&mut mesh) }; - Ok(Self { - inner: PyMeshWithDataData::Tri3dF32(mesh), - }) - } else if std::any::TypeId::of::() == std::any::TypeId::of::() { + Ok(Self::from(std::mem::take(mesh))) + } else if TypeId::of::>() + == TypeId::of::>>() + { let mesh = unsafe { std::mem::transmute::< - MeshWithData>, - MeshWithData>, - >(mesh) + &mut MeshWithData, + &mut MeshWithData>, + >(&mut mesh) }; - Ok(Self { - inner: PyMeshWithDataData::Tri3dF64(mesh), - }) + Ok(Self::from(std::mem::take(mesh))) } else { - Err("Unsupported scalar type for MeshWithData. Only f32 and f64 are supported.") + Err(PyTypeError::new_err( + "Unsupported mesh type for MeshWithData. Only TriMesh3d and MixedTriQuadMesh3d are supported.", + )) } } } @@ -654,19 +681,30 @@ impl PyMeshWithData { #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { match &self.inner { - PyMeshWithDataData::Tri3dF32(_) => np::dtype::(py), - PyMeshWithDataData::Tri3dF64(_) => np::dtype::(py), + PyMeshWithDataData::Tri3dF32(_) | PyMeshWithDataData::MixedTriQuadF32(_) => { + np::dtype::(py) + } + PyMeshWithDataData::Tri3dF64(_) | PyMeshWithDataData::MixedTriQuadF64(_) => { + np::dtype::(py) + } } } /// Returns a copy of the contained mesh without associated data and attributes - pub fn copy_mesh(&self) -> PyResult { + #[gen_stub(override_return_type(type_repr="typing.Union[PyTriMesh3d, PyMixedTriQuadMesh3d]", imports=()))] + pub fn copy_mesh<'py>(&self, py: Python<'py>) -> PyResult> { match &self.inner { PyMeshWithDataData::Tri3dF32(mesh) => { - PyTriMesh3d::try_from(mesh.mesh.clone()).map_err(|e| anyhow!(e).into()) + PyTriMesh3d::from(mesh.mesh.clone()).into_bound_py_any(py) } PyMeshWithDataData::Tri3dF64(mesh) => { - PyTriMesh3d::try_from(mesh.mesh.clone()).map_err(|e| anyhow!(e).into()) + PyTriMesh3d::from(mesh.mesh.clone()).into_bound_py_any(py) + } + PyMeshWithDataData::MixedTriQuadF32(mesh) => { + PyMixedTriQuadMesh3d::from(mesh.mesh.clone()).into_bound_py_any(py) + } + PyMeshWithDataData::MixedTriQuadF64(mesh) => { + PyMixedTriQuadMesh3d::from(mesh.mesh.clone()).into_bound_py_any(py) } } } @@ -674,9 +712,13 @@ impl PyMeshWithData { /// Returns a copy of the `Nx3` array of vertex positions pub fn copy_vertices<'py>(&self, py: Python<'py>) -> PyResult> { match &self.inner { - PyMeshWithDataData::Tri3dF32(mesh) => TriMeshInterface::get_vertices(&mesh.mesh, py) + PyMeshWithDataData::Tri3dF32(mesh) => get_vertices(py, mesh.mesh.vertices()) + .map(|v| v.into_any().downcast_into::().unwrap()), + PyMeshWithDataData::Tri3dF64(mesh) => get_vertices(py, mesh.mesh.vertices()) + .map(|v| v.into_any().downcast_into::().unwrap()), + PyMeshWithDataData::MixedTriQuadF32(mesh) => get_vertices(py, mesh.mesh.vertices()) .map(|v| v.into_any().downcast_into::().unwrap()), - PyMeshWithDataData::Tri3dF64(mesh) => TriMeshInterface::get_vertices(&mesh.mesh, py) + PyMeshWithDataData::MixedTriQuadF64(mesh) => get_vertices(py, mesh.mesh.vertices()) .map(|v| v.into_any().downcast_into::().unwrap()), } } diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index 5c5ea8d..ea6355c 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -6,18 +6,18 @@ use crate::{ }, reconstruction::{SurfaceReconstructionF32, SurfaceReconstructionF64}, }; -use anyhow::anyhow; use numpy as np; use numpy::{ Element, PyArray1, PyArray2, PyArrayDescr, PyArrayDescrMethods, PyArrayMethods, PyReadonlyArray1, PyReadonlyArray2, PyUntypedArray, PyUntypedArrayMethods, }; -use pyo3::exceptions::PyTypeError; +use pyo3::exceptions::{PyRuntimeError, PyTypeError}; use pyo3::{ prelude::*, types::{PyDict, PyString}, }; use pyo3_stub_gen::derive::gen_stub_pyfunction; +use splashsurf::reconstruct::ReconstructionResult; use splashsurf_lib::{ Aabb3d, GridDecompositionParameters, Index, Real, SpatialDecomposition, mesh::{AttributeData, MeshAttribute}, @@ -78,7 +78,7 @@ pub fn reconstruction_pipeline_multi<'py>( mesh_aabb_max: Option<[f64; 3]>, mesh_aabb_clamp_vertices: bool, dtype: Option>, -) -> PyResult> { +) -> PyResult { let py = particles.py(); let element_type = dtype.unwrap_or_else(|| particles.dtype()); @@ -138,6 +138,20 @@ pub fn reconstruction_pipeline_multi<'py>( mesh_aabb_clamp_vertices, }; + fn reconstruction_to_pymesh( + reconstruction: ReconstructionResult, + ) -> PyResult { + if let Some(tri_mesh) = reconstruction.tri_mesh { + PyMeshWithData::try_from_generic(tri_mesh) + } else if let Some(tri_quad_mesh) = reconstruction.tri_quad_mesh { + PyMeshWithData::try_from_generic(tri_quad_mesh) + } else { + Err(PyRuntimeError::new_err( + "Reconstruction resulted in no mesh", + )) + } + } + if element_type.is_equiv_to(&np::dtype::(py)) { println!("Detected f32 particle array"); let particles = particles.downcast::>()?; @@ -149,10 +163,7 @@ pub fn reconstruction_pipeline_multi<'py>( .expect("failed to convert reconstruction parameters to f32"), &postprocessing_args, )?; - let mesh = reconstruction - .tri_mesh - .map(|mesh_with_data| PyMeshWithData::try_from(mesh_with_data)); - mesh.transpose().map_err(|err| anyhow!(err).into()) + reconstruction_to_pymesh(reconstruction) } else if element_type.is_equiv_to(&np::dtype::(py)) { println!("Detected f64 particle array"); let particles = particles.downcast::>()?; @@ -162,13 +173,10 @@ pub fn reconstruction_pipeline_multi<'py>( ¶meters, &postprocessing_args, )?; - let mesh = reconstruction - .tri_mesh - .map(|mesh_with_data| PyMeshWithData::try_from(mesh_with_data)); - mesh.transpose().map_err(|err| anyhow!(err).into()) + reconstruction_to_pymesh(reconstruction) } else { Err(PyTypeError::new_err(format!( - "Unsupported element type: {}", + "Unsupported scalar type for reconstruction: {}, only float32 and float64 are supported", element_type ))) } From 8066d803a4736d9b5d5f2a93f4189a1da7a54f21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Wed, 27 Aug 2025 17:58:09 +0200 Subject: [PATCH 06/63] Py: Simplify code --- pysplashsurf/src/lib.rs | 4 ++ pysplashsurf/src/mesh.rs | 113 ++++++++++++++------------------------- 2 files changed, 44 insertions(+), 73 deletions(-) diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index ead15c0..73ac7cf 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -36,6 +36,10 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { let _ = m.add_class::()?; let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; let _ = m.add_class::()?; diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 5b27dc3..68552d9 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -23,7 +23,6 @@ use splashsurf_lib::{ }, nalgebra::{Unit, Vector3}, }; -use std::any::TypeId; fn get_attribute_with_name<'py, R: Real + Element>( py: Python<'py>, @@ -485,6 +484,22 @@ macro_rules! impl_from_mesh { }; } +/// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type +fn transmute_take_into< + GenericSrc: 'static, + ConcreteSrc: Default + Into + 'static, + Target, +>( + value: &mut GenericSrc, +) -> Option { + if std::any::TypeId::of::() == std::any::TypeId::of::() { + let value_ref = unsafe { std::mem::transmute::<&mut GenericSrc, &mut ConcreteSrc>(value) }; + Some(std::mem::take(value_ref).into()) + } else { + None + } +} + enum PyTriMesh3dData { F32(TriMesh3d), F64(TriMesh3d), @@ -500,18 +515,14 @@ impl_from_mesh!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F32); impl_from_mesh!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F64); impl PyTriMesh3d { - pub fn try_from_generic(mesh: TriMesh3d) -> PyResult { - if TypeId::of::() == TypeId::of::() { - let mesh = unsafe { std::mem::transmute::, TriMesh3d>(mesh) }; - Ok(Self::from(mesh)) - } else if TypeId::of::() == TypeId::of::() { - let mesh = unsafe { std::mem::transmute::, TriMesh3d>(mesh) }; - Ok(Self::from(mesh)) - } else { - Err(PyTypeError::new_err( - "Unsupported scalar type for TriMesh3d. Only f32 and f64 are supported.", - )) - } + pub fn try_from_generic(mut mesh: TriMesh3d) -> PyResult { + transmute_take_into::<_, TriMesh3d, _>(&mut mesh) + .or_else(|| transmute_take_into::<_, TriMesh3d, _>(&mut mesh)) + .ok_or_else(|| { + PyTypeError::new_err( + "Unsupported scalar type for TriMesh3d. Only f32 and f64 are supported.", + ) + }) } } @@ -549,7 +560,7 @@ impl PyTriMesh3d { } } -pub enum PyMixedTriQuadMesh3dData { +enum PyMixedTriQuadMesh3dData { F32(MixedTriQuadMesh3d), F64(MixedTriQuadMesh3d), } @@ -564,22 +575,14 @@ impl_from_mesh!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadM impl_from_mesh!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F64); impl PyMixedTriQuadMesh3d { - pub fn try_from_generic(mesh: MixedTriQuadMesh3d) -> PyResult { - if TypeId::of::() == TypeId::of::() { - let mesh = unsafe { - std::mem::transmute::, MixedTriQuadMesh3d>(mesh) - }; - Ok(Self::from(mesh)) - } else if TypeId::of::() == TypeId::of::() { - let mesh = unsafe { - std::mem::transmute::, MixedTriQuadMesh3d>(mesh) - }; - Ok(Self::from(mesh)) - } else { - Err(PyTypeError::new_err( - "Unsupported scalar type for MixedTriQuadMesh3d. Only f32 and f64 are supported.", - )) - } + pub fn try_from_generic(mut mesh: MixedTriQuadMesh3d) -> PyResult { + transmute_take_into::<_, MixedTriQuadMesh3d, _>(&mut mesh) + .or_else(|| transmute_take_into::<_, MixedTriQuadMesh3d, _>(&mut mesh)) + .ok_or_else(|| { + PyTypeError::new_err( + "Unsupported scalar type for MixedTriQuadMesh3d. Only f32 and f64 are supported.", + ) + }) } } @@ -624,53 +627,17 @@ pub struct PyMeshWithData { inner: PyMeshWithDataData, } -//fn transmute_into() - impl PyMeshWithData { pub fn try_from_generic + 'static>( mut mesh: MeshWithData, ) -> PyResult { - if TypeId::of::>() == TypeId::of::>>() { - let mesh = unsafe { - std::mem::transmute::<&mut MeshWithData, &mut MeshWithData>>( - &mut mesh, - ) - }; - Ok(Self::from(std::mem::take(mesh))) - } else if TypeId::of::>() - == TypeId::of::>>() - { - let mesh = unsafe { - std::mem::transmute::<&mut MeshWithData, &mut MeshWithData>>( - &mut mesh, - ) - }; - Ok(Self::from(std::mem::take(mesh))) - } else if TypeId::of::>() - == TypeId::of::>>() - { - let mesh = unsafe { - std::mem::transmute::< - &mut MeshWithData, - &mut MeshWithData>, - >(&mut mesh) - }; - Ok(Self::from(std::mem::take(mesh))) - } else if TypeId::of::>() - == TypeId::of::>>() - { - let mesh = unsafe { - std::mem::transmute::< - &mut MeshWithData, - &mut MeshWithData>, - >(&mut mesh) - }; - Ok(Self::from(std::mem::take(mesh))) - } else { - Err(PyTypeError::new_err( - "Unsupported mesh type for MeshWithData. Only TriMesh3d and MixedTriQuadMesh3d are supported.", - )) - } + transmute_take_into::<_, MeshWithData>, _>(&mut mesh) + .or_else(|| transmute_take_into::<_, MeshWithData>, _>(&mut mesh)) + .or_else(|| transmute_take_into::<_, MeshWithData>, _>(&mut mesh)) + .or_else(|| transmute_take_into::<_, MeshWithData>, _>(&mut mesh)) + .ok_or_else(|| PyTypeError::new_err( + "Unsupported mesh type for MeshWithData. Only TriMesh3d and MixedTriQuadMesh3d with f32 or f64 scalar types are supported.", + )) } } From 3ef6270b34be8a2b36ffc85f1edb1274bf24616e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Wed, 27 Aug 2025 20:12:31 +0200 Subject: [PATCH 07/63] Automatically set global neighborhood lists in pipeline --- splashsurf/src/reconstruct.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/splashsurf/src/reconstruct.rs b/splashsurf/src/reconstruct.rs index b41119e..0aa25f9 100644 --- a/splashsurf/src/reconstruct.rs +++ b/splashsurf/src/reconstruct.rs @@ -1013,8 +1013,13 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( params: &splashsurf_lib::Parameters, postprocessing: &ReconstructionPostprocessingParameters, ) -> Result, anyhow::Error> { + // Ensure that we get global neighborhood lists if required for post-processing + let mut params = params.clone(); + params.global_neighborhood_list = + params.global_neighborhood_list || postprocessing.mesh_smoothing_weights; + // Perform the surface reconstruction - let reconstruction = splashsurf_lib::reconstruct_surface::(particle_positions, params)?; + let reconstruction = splashsurf_lib::reconstruct_surface::(particle_positions, ¶ms)?; // Filters a particle quantity based on an optional mask of particles inside the reconstruction domain fn filtered_quantity<'a, T: Clone>( From 6001ffb1d557340cca79dec7222cf5dcf8e5d4a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Wed, 27 Aug 2025 20:12:55 +0200 Subject: [PATCH 08/63] Py: Fixes in pipeline --- pysplashsurf/src/pipeline.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index ea6355c..e3d0cab 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -17,7 +17,6 @@ use pyo3::{ types::{PyDict, PyString}, }; use pyo3_stub_gen::derive::gen_stub_pyfunction; -use splashsurf::reconstruct::ReconstructionResult; use splashsurf_lib::{ Aabb3d, GridDecompositionParameters, Index, Real, SpatialDecomposition, mesh::{AttributeData, MeshAttribute}, @@ -80,7 +79,11 @@ pub fn reconstruction_pipeline_multi<'py>( dtype: Option>, ) -> PyResult { let py = particles.py(); - let element_type = dtype.unwrap_or_else(|| particles.dtype()); + let element_type = particles.dtype(); + + if let Some(target_dtype) = dtype && !target_dtype.is_equiv_to(&element_type) { + unimplemented!("Casting to different dtype is not implemented yet"); + } let particle_aabb = aabb_min .zip(aabb_max) @@ -108,7 +111,7 @@ pub fn reconstruction_pipeline_multi<'py>( particle_aabb, enable_multi_threading: multi_threading, spatial_decomposition, - global_neighborhood_list: mesh_smoothing_weights, + global_neighborhood_list: false, }; let postprocessing_args = splashsurf::reconstruct::ReconstructionPostprocessingParameters { @@ -139,7 +142,7 @@ pub fn reconstruction_pipeline_multi<'py>( }; fn reconstruction_to_pymesh( - reconstruction: ReconstructionResult, + reconstruction: splashsurf::reconstruct::ReconstructionResult, ) -> PyResult { if let Some(tri_mesh) = reconstruction.tri_mesh { PyMeshWithData::try_from_generic(tri_mesh) @@ -153,7 +156,6 @@ pub fn reconstruction_pipeline_multi<'py>( } if element_type.is_equiv_to(&np::dtype::(py)) { - println!("Detected f32 particle array"); let particles = particles.downcast::>()?; let reconstruction = reconstruction_pipeline_generic_impl::( particles, @@ -165,7 +167,6 @@ pub fn reconstruction_pipeline_multi<'py>( )?; reconstruction_to_pymesh(reconstruction) } else if element_type.is_equiv_to(&np::dtype::(py)) { - println!("Detected f64 particle array"); let particles = particles.downcast::>()?; let reconstruction = reconstruction_pipeline_generic_impl::( particles, From 6c2118a90bc1ad923afed83ca668dd4b8f7151e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Wed, 27 Aug 2025 21:53:57 +0200 Subject: [PATCH 09/63] Make zero constructor of UniformGrid public for Python API --- splashsurf_lib/src/uniform_grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/splashsurf_lib/src/uniform_grid.rs b/splashsurf_lib/src/uniform_grid.rs index 299fd6a..8781628 100644 --- a/splashsurf_lib/src/uniform_grid.rs +++ b/splashsurf_lib/src/uniform_grid.rs @@ -230,7 +230,7 @@ impl UniformCartesianCubeGrid3d { } /// Constructs a degenerate grid with zero extents, zero cells and zero points - pub(crate) fn new_zero() -> Self { + pub fn new_zero() -> Self { Self { aabb: Aabb3d::new(Vector3::zeros(), Vector3::zeros()), cell_size: R::zero(), From 2be1ebbdba0e118925e4500e62c5fd2202fc2d35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Wed, 27 Aug 2025 21:56:24 +0200 Subject: [PATCH 10/63] Py: Implement generic reconstruct surf., remove old pipeline functions --- pysplashsurf/pysplashsurf/__init__.py | 180 --------- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 43 ++- pysplashsurf/src/lib.rs | 21 +- pysplashsurf/src/mesh.rs | 29 +- pysplashsurf/src/pipeline.rs | 404 +-------------------- pysplashsurf/src/reconstruction.rs | 208 ++++++++++- pysplashsurf/src/utils.rs | 45 +++ 7 files changed, 305 insertions(+), 625 deletions(-) create mode 100644 pysplashsurf/src/utils.rs diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index 9b42767..dada9dc 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -522,184 +522,4 @@ def convert_tris_to_quads( else: raise ValueError("Invalid mesh type") - -def reconstruction_pipeline( - particles, *, attributes_to_interpolate=None, particle_radius, - rest_density=1000.0, smoothing_length=2.0, cube_size, - iso_surface_threshold=0.6, multi_threading=True, - check_mesh_closed=False, check_mesh_manifold=False, - check_mesh_orientation=False, check_mesh_debug=False, - mesh_smoothing_weights=False, sph_normals=False, - mesh_smoothing_weights_normalization=13.0, mesh_smoothing_iters=None, normals_smoothing_iters=None, - mesh_cleanup=False, mesh_cleanup_snap_dist=None, decimate_barnacles=False, keep_vertices=False, - compute_normals=False, output_raw_normals=False, output_raw_mesh=False, output_mesh_smoothing_weights=False, mesh_aabb_clamp_vertices=False, - subdomain_grid=True, subdomain_grid_auto_disable=True, subdomain_num_cubes_per_dim=64, aabb_min=None, aabb_max=None, mesh_aabb_min=None, mesh_aabb_max=None, - generate_quads=False, quad_max_edge_diag_ratio=1.75, quad_max_normal_angle=10.0, quad_max_interior_angle=135.0 -): - """Surface reconstruction based on particle positions with subsequent post-processing - - Parameters - ---------- - particles: np.ndarray - 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] - - attributes_to_interpolate: dict - Dictionary containing all attributes to interpolate. The keys are the attribute names and the values are the corresponding 1D/2D arrays.\n - The arrays must have the same length as the number of particles. \n - Supported array types are 2D float32/float64 arrays for vector attributes and 1D uint64/float32/float64 arrays for scalar attributes. - - particle_radius: float - Particle radius - - rest_density: float - Rest density of the fluid - - smoothing_length: float - Smoothing length of the fluid in multiples of the particle radius (compact support radius of SPH kernel will be twice the smoothing length) - - cube_size: float - Size of the cubes used for the marching cubes grid in multiples of the particle radius - - iso_surface_threshold: float - Threshold for the iso surface - - multi_threading: bool - Multi-threading flag - - check_mesh_closed: bool - Enable checking the final mesh for holes - - check_mesh_manifold: bool - Enable checking the final mesh for non-manifold edges and vertices - - check_mesh_orientation: bool - Enable checking the final mesh for inverted triangles (compares angle between vertex normals and adjacent face normals) - - check_mesh_debug: bool - Enable additional debug output for the check-mesh operations (has no effect if no other check-mesh option is enabled) - - sph_normals: bool - Flag to compute normals using SPH interpolation instead of geometry-based normals. - - mesh_smoothing_weights: bool - Flag to compute mesh smoothing weights\n - This implements the method from “Weighted Laplacian Smoothing for Surface Reconstruction of Particle-based Fluids” (Löschner, Böttcher, Jeske, Bender; 2023). - - mesh_smoothing_weights_normalization: float - Normalization factor for the mesh smoothing weights - - mesh_smoothing_iters: int - Number of iterations for the mesh smoothing - - normals_smoothing_iters: int - Number of iterations for the normal smoothing - - mesh_cleanup: bool - Flag to perform mesh cleanup\n - This implements the method from “Compact isocontours from sampled data” (Moore, Warren; 1992) - - mesh_cleanup_snap_dist: float - If MC mesh cleanup is enabled, vertex snapping can be limited to this distance relative to the MC edge length (should be in range of [0.0,0.5]) - - decimate_barnacles: bool - Flag to perform barnacle decimation\n - For details see “Weighted Laplacian Smoothing for Surface Reconstruction of Particle-based Fluids” (Löschner, Böttcher, Jeske, Bender; 2023). - - keep_vertices: bool - Flag to keep any vertices without connectivity resulting from mesh cleanup or decimation step - - compute_normals: bool - Flag to compute normals\n - If set to True, the normals will be computed and stored in the mesh. - - output_mesh_smoothing_weights: bool - Flag to store the mesh smoothing weights if smoothing weights are computed. - - output_raw_normals: bool - Flag to output the raw normals in addition to smoothed normals if smoothing of normals is enabled - - output_raw_mesh: bool - When true, also return the SurfaceReconstruction object with no post-processing applied - - mesh_aabb_clamp_vertices: bool - Flag to clamp the vertices of the mesh to the AABB - - subdomain_grid: bool - Enable spatial decomposition using by dividing the domain into subdomains with dense marching cube grids for efficient multi-threading - - subdomain_grid_auto_disable: bool - Whether to automatically disable the subdomain grid if the global domain is too small - - subdomain_num_cubes_per_dim: int - Each subdomain will be a cube consisting of this number of MC cube cells along each coordinate axis - - aabb_min: np.ndarray - Smallest corner of the axis-aligned bounding box - - aabb_max: np.ndarray - Largest corner of the axis-aligned bounding box - - mesh_aabb_min: np.ndarray - Smallest corner of the axis-aligned bounding box for the mesh - - mesh_aabb_max: np.ndarray - Largest corner of the axis-aligned bounding box for the mesh - - generate_quads: bool - Enable trying to convert triangles to quads if they meet quality criteria - - quad_max_edge_diag_ratio: float - Maximum allowed ratio of quad edge lengths to its diagonals to merge two triangles to a quad (inverse is used for minimum) - - quad_max_normal_angle: float - Maximum allowed angle (in degrees) between triangle normals to merge them to a quad - - quad_max_interior_angle: float - Maximum allowed vertex interior angle (in degrees) inside a quad to merge two triangles to a quad - - Returns - ------- - tuple[TriMeshWithDataF32 | TriMeshWithDataF64 | MixedTriQuadMeshWithDataF32 | MixedTriQuadMeshWithDataF64, Optional[SurfaceReconstructionF32] | Optional[SurfaceReconstructionF64]] - Mesh with data object and SurfaceReconstruction object containing the reconstructed mesh and used grid - """ - if attributes_to_interpolate is None: - attributes_to_interpolate = {} - - if particles.dtype == 'float32': - tri_mesh, tri_quad_mesh, reconstruction = reconstruction_pipeline_f32(particles, attributes_to_interpolate=attributes_to_interpolate, particle_radius=particle_radius, rest_density=rest_density, - smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - aabb_min=aabb_min, aabb_max=aabb_max, multi_threading=multi_threading, - subdomain_grid=subdomain_grid, subdomain_grid_auto_disable=subdomain_grid_auto_disable, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, - check_mesh_closed=check_mesh_closed, check_mesh_manifold=check_mesh_manifold, check_mesh_orientation=check_mesh_orientation, check_mesh_debug=check_mesh_debug, - mesh_cleanup=mesh_cleanup, mesh_cleanup_snap_dist=mesh_cleanup_snap_dist, decimate_barnacles=decimate_barnacles, - keep_vertices=keep_vertices, compute_normals=compute_normals, sph_normals=sph_normals, normals_smoothing_iters=normals_smoothing_iters, - mesh_smoothing_iters=mesh_smoothing_iters, mesh_smoothing_weights=mesh_smoothing_weights, mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, - output_mesh_smoothing_weights=output_mesh_smoothing_weights, output_raw_normals=output_raw_normals, output_raw_mesh=output_raw_mesh, - mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, - generate_quads=generate_quads, quad_max_edge_diag_ratio=quad_max_edge_diag_ratio, quad_max_normal_angle=quad_max_normal_angle, quad_max_interior_angle=quad_max_interior_angle) - - if tri_mesh == None: - return (tri_quad_mesh, reconstruction) - else: - return (tri_mesh, reconstruction) - - elif particles.dtype == 'float64': - tri_mesh, tri_quad_mesh, reconstruction = reconstruction_pipeline_f64(particles, attributes_to_interpolate=attributes_to_interpolate, particle_radius=particle_radius, rest_density=rest_density, - smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - aabb_min=aabb_min, aabb_max=aabb_max, multi_threading=multi_threading, - subdomain_grid=subdomain_grid, subdomain_grid_auto_disable=subdomain_grid_auto_disable, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, - check_mesh_closed=check_mesh_closed, check_mesh_manifold=check_mesh_manifold, check_mesh_orientation=check_mesh_orientation, check_mesh_debug=check_mesh_debug, - mesh_cleanup=mesh_cleanup, mesh_cleanup_snap_dist=mesh_cleanup_snap_dist, decimate_barnacles=decimate_barnacles, - keep_vertices=keep_vertices, compute_normals=compute_normals, sph_normals=sph_normals, normals_smoothing_iters=normals_smoothing_iters, - mesh_smoothing_iters=mesh_smoothing_iters, mesh_smoothing_weights=mesh_smoothing_weights, mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, - output_mesh_smoothing_weights=output_mesh_smoothing_weights, output_raw_normals=output_raw_normals, output_raw_mesh=output_raw_mesh, - mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, - generate_quads=generate_quads, quad_max_edge_diag_ratio=quad_max_edge_diag_ratio, quad_max_normal_angle=quad_max_normal_angle, quad_max_interior_angle=quad_max_interior_angle) - - if tri_mesh == None: - return (tri_quad_mesh, reconstruction) - else: - return (tri_mesh, reconstruction) - else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particles)") diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 45fc9c8..f5ba284 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -350,6 +350,29 @@ class PyMixedTriQuadMesh3d: Returns a copy of the `Nx3` array of vertex positions """ +class PySurfaceReconstruction: + r""" + Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data + """ + def copy_mesh(self) -> PyTriMesh3d: + r""" + Returns a copy of the surface mesh of the reconstruction + """ + def copy_grid(self) -> PyUniformGrid: + r""" + Returns a copy of the uniform grid parameters used for the reconstruction + """ + def copy_particle_densities(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: + r""" + Returns a copy of the particle densities computed during the reconstruction + """ + def copy_particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: + r""" + Returns a copy of the per-particle neighborhood lists computed during the reconstruction if available + + The neighborhood lists are only available if the flag for global neighborhood list was set in the reconstruction parameters. + """ + class PyTriMesh3d: @property def dtype(self) -> numpy.dtype: @@ -365,6 +388,12 @@ class PyTriMesh3d: Returns a copy of the `Mx3` array of vertex indices per triangle """ +class PyUniformGrid: + r""" + Struct containing the parameters of a uniform grid used for the surface reconstruction + """ + ... + class SphInterpolatorF32: r""" SphInterpolator wrapper @@ -637,5 +666,17 @@ class UniformGridF64: """ ... -def reconstruction_pipeline_multi(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> PyMeshWithData: ... +def reconstruct_surface_multi(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> PySurfaceReconstruction: + r""" + Performs a surface reconstruction of the given particles without post-processing + + Note that all parameters use absolute distance units and are not relative to the particle radius. + """ + +def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> PyMeshWithData: + r""" + Runs the surface reconstruction pipeline for the given particle positions with optional post-processing + + Note that smoothing length and cube size are given in multiples of the particle radius. + """ diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 73ac7cf..2fb6c3e 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -22,6 +22,8 @@ mod pipeline; mod post_processing; mod reconstruction; +pub(crate) mod utils; + /// High-Level Bindings of the splashsurf surface reconstruction implementation. /// Support reconstructing Level-Set surfaces from particle clouds or from regular grids. #[pymodule] @@ -61,6 +63,11 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m )?); + let _ = m.add_function(wrap_pyfunction!( + reconstruction::reconstruct_surface_multi, + m + )?); + let _ = m.add_function(wrap_pyfunction!( post_processing::convert_tris_to_quads_py_f32, m @@ -118,19 +125,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m )?); - let _ = m.add_function(wrap_pyfunction!( - pipeline::reconstruction_pipeline_multi, - m - )?); - - let _ = m.add_function(wrap_pyfunction!( - pipeline::reconstruction_pipeline_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - pipeline::reconstruction_pipeline_py_f64, - m - )?); + let _ = m.add_function(wrap_pyfunction!(pipeline::reconstruction_pipeline, m)?); let _ = m.add_function(wrap_pyfunction!(run_splashsurf_py, m)?); diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 68552d9..6ad5bc1 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,5 +1,6 @@ use crate::NumpyUsize; use crate::aabb::{Aabb3dF32, Aabb3dF64}; +use crate::utils::*; use ndarray::{Array2, ArrayView, ArrayView2}; use numpy as np; use numpy::{ @@ -472,34 +473,6 @@ macro_rules! create_tri_quad_mesh_interface { }; } -macro_rules! impl_from_mesh { - ($pyclass:ident, $mesh:ty => $target_enum:path) => { - impl From<$mesh> for $pyclass { - fn from(mesh: $mesh) -> Self { - Self { - inner: $target_enum(mesh), - } - } - } - }; -} - -/// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type -fn transmute_take_into< - GenericSrc: 'static, - ConcreteSrc: Default + Into + 'static, - Target, ->( - value: &mut GenericSrc, -) -> Option { - if std::any::TypeId::of::() == std::any::TypeId::of::() { - let value_ref = unsafe { std::mem::transmute::<&mut GenericSrc, &mut ConcreteSrc>(value) }; - Some(std::mem::take(value_ref).into()) - } else { - None - } -} - enum PyTriMesh3dData { F32(TriMesh3d), F64(TriMesh3d), diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index e3d0cab..6a5739e 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -1,11 +1,4 @@ use crate::mesh::PyMeshWithData; -use crate::{ - mesh::{ - MixedTriQuadMeshWithDataF32, MixedTriQuadMeshWithDataF64, TriMeshWithDataF32, - TriMeshWithDataF64, - }, - reconstruction::{SurfaceReconstructionF32, SurfaceReconstructionF64}, -}; use numpy as np; use numpy::{ Element, PyArray1, PyArray2, PyArrayDescr, PyArrayDescrMethods, PyArrayMethods, @@ -24,9 +17,12 @@ use splashsurf_lib::{ }; use std::borrow::Cow; +/// Runs the surface reconstruction pipeline for the given particle positions with optional post-processing +/// +/// Note that smoothing length and cube size are given in multiples of the particle radius. #[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "reconstruction_pipeline_multi")] +#[pyo3(name = "reconstruction_pipeline")] #[pyo3(signature = (particles, *, attributes_to_interpolate = None, particle_radius, rest_density = 1000.0, smoothing_length, cube_size, iso_surface_threshold = 0.6, aabb_min = None, aabb_max = None, multi_threading = true, @@ -38,7 +34,7 @@ use std::borrow::Cow; output_mesh_smoothing_weights = false, output_raw_normals = false, output_raw_mesh = false, mesh_aabb_min = None, mesh_aabb_max = None, mesh_aabb_clamp_vertices = true, dtype = None ))] -pub fn reconstruction_pipeline_multi<'py>( +pub fn reconstruction_pipeline<'py>( particles: &Bound<'py, PyUntypedArray>, attributes_to_interpolate: Option>, particle_radius: f64, @@ -81,7 +77,9 @@ pub fn reconstruction_pipeline_multi<'py>( let py = particles.py(); let element_type = particles.dtype(); - if let Some(target_dtype) = dtype && !target_dtype.is_equiv_to(&element_type) { + if let Some(target_dtype) = dtype + && !target_dtype.is_equiv_to(&element_type) + { unimplemented!("Casting to different dtype is not implemented yet"); } @@ -177,7 +175,7 @@ pub fn reconstruction_pipeline_multi<'py>( reconstruction_to_pymesh(reconstruction) } else { Err(PyTypeError::new_err(format!( - "Unsupported scalar type for reconstruction: {}, only float32 and float64 are supported", + "Unsupported scalar type {} for reconstruction, only float32 and float64 are supported", element_type ))) } @@ -254,387 +252,3 @@ fn reconstruction_pipeline_generic_impl<'py, I: Index, R: Real + Element>( &postprocessing_args, ) } - -fn reconstruction_pipeline_generic<'py, I: Index, R: Real + Element>( - particles: &Bound<'py, PyArray2>, - attributes_to_interpolate: Bound<'py, PyDict>, - particle_radius: R, - rest_density: R, - smoothing_length: R, - cube_size: R, - iso_surface_threshold: R, - aabb_min: Option<[R; 3]>, - aabb_max: Option<[R; 3]>, - multi_threading: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - check_mesh_closed: bool, - check_mesh_manifold: bool, - check_mesh_orientation: bool, - check_mesh_debug: bool, - mesh_cleanup: bool, - mesh_cleanup_snap_dist: Option, - decimate_barnacles: bool, - keep_vertices: bool, - compute_normals: bool, - sph_normals: bool, - normals_smoothing_iters: Option, - mesh_smoothing_iters: Option, - mesh_smoothing_weights: bool, - mesh_smoothing_weights_normalization: f64, - generate_quads: bool, - quad_max_edge_diag_ratio: f64, - quad_max_normal_angle: f64, - quad_max_interior_angle: f64, - output_mesh_smoothing_weights: bool, - output_raw_normals: bool, - output_raw_mesh: bool, - mesh_aabb_min: Option<[f64; 3]>, - mesh_aabb_max: Option<[f64; 3]>, - mesh_aabb_clamp_vertices: bool, -) -> Result, anyhow::Error> { - let particles: PyReadonlyArray2 = particles.readonly(); - let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); - - enum AttributePyView<'a, R: Real + Element> { - U64(PyReadonlyArray1<'a, u64>), - Float(PyReadonlyArray1<'a, R>), - FloatVec3(PyReadonlyArray2<'a, R>), - } - - let mut attr_names = Vec::new(); - let mut attr_views = Vec::new(); - - // Collect readonly views of all attribute arrays - for (key, value) in attributes_to_interpolate.iter() { - let key_str: String = key - .downcast::() - .expect("Key wasn't a string") - .extract()?; - - if let Ok(value) = value.downcast::>() { - attr_views.push(AttributePyView::U64(value.readonly())); - attr_names.push(key_str); - } else if let Ok(value) = value.downcast::>() { - attr_views.push(AttributePyView::Float(value.readonly())); - attr_names.push(key_str); - } else if let Ok(value) = value.downcast::>() { - attr_views.push(AttributePyView::FloatVec3(value.readonly())); - attr_names.push(key_str); - } else { - println!("Couldn't downcast attribute {} to valid type", &key_str); - } - } - - // Get slices from attribute views and construct borrowed MeshAttributes - let attributes = attr_names - .into_iter() - .zip(attr_views.iter()) - .map(|(name, view)| -> Result, anyhow::Error> { - let data = match view { - AttributePyView::U64(view) => { - AttributeData::ScalarU64(Cow::Borrowed(view.as_slice()?.into())) - } - AttributePyView::Float(view) => { - AttributeData::ScalarReal(Cow::Borrowed(view.as_slice()?.into())) - } - AttributePyView::FloatVec3(view) => { - let vec3_slice: &[Vector3] = bytemuck::cast_slice(view.as_slice()?); - AttributeData::Vector3Real(Cow::Borrowed(vec3_slice.into())) - } - }; - Ok(MeshAttribute::new(name, data)) - }) - .collect::, _>>()?; - - let aabb = if let (Some(aabb_min), Some(aabb_max)) = (aabb_min, aabb_max) { - // Convert the min and max arrays to Vector3 - Some(Aabb3d::new( - Vector3::from(aabb_min), - Vector3::from(aabb_max), - )) - } else { - None - }; - - let spatial_decomposition = if subdomain_grid { - SpatialDecomposition::UniformGrid(GridDecompositionParameters { - subdomain_num_cubes_per_dim, - auto_disable: subdomain_grid_auto_disable, - }) - } else { - SpatialDecomposition::None - }; - - let params = splashsurf_lib::Parameters { - particle_radius, - rest_density, - compact_support_radius: R::from_float(2.0) * smoothing_length * particle_radius, - cube_size: cube_size * particle_radius, - iso_surface_threshold, - particle_aabb: aabb, - enable_multi_threading: multi_threading, - spatial_decomposition, - global_neighborhood_list: mesh_smoothing_weights, - }; - - let mesh_aabb = - if let (Some(mesh_aabb_min), Some(mesh_aabb_max)) = (mesh_aabb_min, mesh_aabb_max) { - // Convert the min and max arrays to Vector3 - Some(Aabb3d::new( - Vector3::from(mesh_aabb_min), - Vector3::from(mesh_aabb_max), - )) - } else { - None - }; - - let postprocessing_args = splashsurf::reconstruct::ReconstructionPostprocessingParameters { - check_mesh_closed, - check_mesh_manifold, - check_mesh_orientation, - check_mesh_debug, - mesh_cleanup, - mesh_cleanup_snap_dist, - decimate_barnacles, - keep_vertices, - compute_normals, - sph_normals, - normals_smoothing_iters, - interpolate_attributes: Some(attributes.iter().map(|a| a.name.clone()).collect()), - mesh_smoothing_iters, - mesh_smoothing_weights, - mesh_smoothing_weights_normalization, - generate_quads, - quad_max_edge_diag_ratio, - quad_max_normal_angle, - quad_max_interior_angle, - output_mesh_smoothing_weights, - output_raw_normals, - output_raw_mesh, - mesh_aabb, - mesh_aabb_clamp_vertices, - }; - - splashsurf::reconstruct::reconstruction_pipeline( - particle_positions, - &attributes, - ¶ms, - &postprocessing_args, - ) -} - -#[pyfunction] -#[pyo3(name = "reconstruction_pipeline_f32")] -#[pyo3(signature = (particles, *, attributes_to_interpolate, particle_radius, rest_density, - smoothing_length, cube_size, iso_surface_threshold, - aabb_min = None, aabb_max = None, multi_threading = true, - subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, - check_mesh_closed = false, check_mesh_manifold = false, check_mesh_orientation = false, check_mesh_debug = false, - mesh_cleanup, mesh_cleanup_snap_dist = None, decimate_barnacles, keep_vertices, compute_normals, sph_normals, - normals_smoothing_iters, mesh_smoothing_iters, mesh_smoothing_weights, mesh_smoothing_weights_normalization, - generate_quads = false, quad_max_edge_diag_ratio = 1.75, quad_max_normal_angle = 10.0, quad_max_interior_angle = 135.0, - output_mesh_smoothing_weights, output_raw_normals, output_raw_mesh=false, - mesh_aabb_min, mesh_aabb_max, mesh_aabb_clamp_vertices -))] -pub fn reconstruction_pipeline_py_f32<'py>( - particles: &Bound<'py, PyArray2>, - attributes_to_interpolate: Bound<'py, PyDict>, - particle_radius: f32, - rest_density: f32, - smoothing_length: f32, - cube_size: f32, - iso_surface_threshold: f32, - aabb_min: Option<[f32; 3]>, - aabb_max: Option<[f32; 3]>, - multi_threading: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - check_mesh_closed: bool, - check_mesh_manifold: bool, - check_mesh_orientation: bool, - check_mesh_debug: bool, - mesh_cleanup: bool, - mesh_cleanup_snap_dist: Option, - decimate_barnacles: bool, - keep_vertices: bool, - compute_normals: bool, - sph_normals: bool, - normals_smoothing_iters: Option, - mesh_smoothing_iters: Option, - mesh_smoothing_weights: bool, - mesh_smoothing_weights_normalization: f64, - generate_quads: bool, - quad_max_edge_diag_ratio: f64, - quad_max_normal_angle: f64, - quad_max_interior_angle: f64, - output_mesh_smoothing_weights: bool, - output_raw_normals: bool, - output_raw_mesh: bool, - mesh_aabb_min: Option<[f64; 3]>, - mesh_aabb_max: Option<[f64; 3]>, - mesh_aabb_clamp_vertices: bool, -) -> PyResult<( - Option, - Option, - Option, -)> { - let splashsurf::reconstruct::ReconstructionResult { - tri_mesh, - tri_quad_mesh, - raw_reconstruction: reconstruction, - } = reconstruction_pipeline_generic::( - particles, - attributes_to_interpolate, - particle_radius, - rest_density, - smoothing_length, - cube_size, - iso_surface_threshold, - aabb_min, - aabb_max, - multi_threading, - subdomain_grid, - subdomain_grid_auto_disable, - subdomain_num_cubes_per_dim, - check_mesh_closed, - check_mesh_manifold, - check_mesh_orientation, - check_mesh_debug, - mesh_cleanup, - mesh_cleanup_snap_dist, - decimate_barnacles, - keep_vertices, - compute_normals, - sph_normals, - normals_smoothing_iters, - mesh_smoothing_iters, - mesh_smoothing_weights, - mesh_smoothing_weights_normalization, - generate_quads, - quad_max_edge_diag_ratio, - quad_max_normal_angle, - quad_max_interior_angle, - output_mesh_smoothing_weights, - output_raw_normals, - output_raw_mesh, - mesh_aabb_min, - mesh_aabb_max, - mesh_aabb_clamp_vertices, - )?; - - Ok(( - tri_mesh.map(TriMeshWithDataF32::new), - tri_quad_mesh.map(MixedTriQuadMeshWithDataF32::new), - reconstruction.map(SurfaceReconstructionF32::new), - )) -} - -#[pyfunction] -#[pyo3(name = "reconstruction_pipeline_f64")] -#[pyo3(signature = (particles, *, attributes_to_interpolate, particle_radius, rest_density, - smoothing_length, cube_size, iso_surface_threshold, - aabb_min = None, aabb_max = None, multi_threading = true, - subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, - check_mesh_closed = false, check_mesh_manifold = false, check_mesh_orientation = false, check_mesh_debug = false, - mesh_cleanup, mesh_cleanup_snap_dist = None, decimate_barnacles, keep_vertices, compute_normals, sph_normals, - normals_smoothing_iters, mesh_smoothing_iters, mesh_smoothing_weights, mesh_smoothing_weights_normalization, - generate_quads = false, quad_max_edge_diag_ratio = 1.75, quad_max_normal_angle = 10.0, quad_max_interior_angle = 135.0, - output_mesh_smoothing_weights, output_raw_normals, output_raw_mesh=false, - mesh_aabb_min, mesh_aabb_max, mesh_aabb_clamp_vertices -))] -pub fn reconstruction_pipeline_py_f64<'py>( - particles: &Bound<'py, PyArray2>, - attributes_to_interpolate: Bound<'py, PyDict>, - particle_radius: f64, - rest_density: f64, - smoothing_length: f64, - cube_size: f64, - iso_surface_threshold: f64, - aabb_min: Option<[f64; 3]>, - aabb_max: Option<[f64; 3]>, - multi_threading: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - check_mesh_closed: bool, - check_mesh_manifold: bool, - check_mesh_orientation: bool, - check_mesh_debug: bool, - mesh_cleanup: bool, - mesh_cleanup_snap_dist: Option, - decimate_barnacles: bool, - keep_vertices: bool, - compute_normals: bool, - sph_normals: bool, - normals_smoothing_iters: Option, - mesh_smoothing_iters: Option, - mesh_smoothing_weights: bool, - mesh_smoothing_weights_normalization: f64, - generate_quads: bool, - quad_max_edge_diag_ratio: f64, - quad_max_normal_angle: f64, - quad_max_interior_angle: f64, - output_mesh_smoothing_weights: bool, - output_raw_normals: bool, - output_raw_mesh: bool, - mesh_aabb_min: Option<[f64; 3]>, - mesh_aabb_max: Option<[f64; 3]>, - mesh_aabb_clamp_vertices: bool, -) -> PyResult<( - Option, - Option, - Option, -)> { - let splashsurf::reconstruct::ReconstructionResult { - tri_mesh, - tri_quad_mesh, - raw_reconstruction: reconstruction, - } = reconstruction_pipeline_generic::( - particles, - attributes_to_interpolate, - particle_radius, - rest_density, - smoothing_length, - cube_size, - iso_surface_threshold, - aabb_min, - aabb_max, - multi_threading, - subdomain_grid, - subdomain_grid_auto_disable, - subdomain_num_cubes_per_dim, - check_mesh_closed, - check_mesh_manifold, - check_mesh_orientation, - check_mesh_debug, - mesh_cleanup, - mesh_cleanup_snap_dist, - decimate_barnacles, - keep_vertices, - compute_normals, - sph_normals, - normals_smoothing_iters, - mesh_smoothing_iters, - mesh_smoothing_weights, - mesh_smoothing_weights_normalization, - generate_quads, - quad_max_edge_diag_ratio, - quad_max_normal_angle, - quad_max_interior_angle, - output_mesh_smoothing_weights, - output_raw_normals, - output_raw_mesh, - mesh_aabb_min, - mesh_aabb_max, - mesh_aabb_clamp_vertices, - )?; - - Ok(( - tri_mesh.map(TriMeshWithDataF64::new), - tri_quad_mesh.map(MixedTriQuadMeshWithDataF64::new), - reconstruction.map(SurfaceReconstructionF64::new), - )) -} diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index aabf5aa..e2ba8d5 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -1,14 +1,19 @@ -use numpy::{PyArray2, PyReadonlyArray2}; +use crate::mesh::PyTriMesh3d; +use crate::utils::*; +use crate::{ + mesh::{TriMesh3dF32, TriMesh3dF64}, + uniform_grid::{UniformGridF32, UniformGridF64}, +}; +use anyhow::anyhow; +use numpy as np; +use numpy::prelude::*; +use numpy::{Element, PyArray2, PyReadonlyArray2, PyUntypedArray}; +use pyo3::exceptions::PyTypeError; use pyo3::{Bound, prelude::*}; use pyo3_stub_gen::derive::*; use splashsurf_lib::{ Aabb3d, GridDecompositionParameters, Index, Real, SpatialDecomposition, SurfaceReconstruction, - nalgebra::Vector3, reconstruct_surface, -}; - -use crate::{ - mesh::{TriMesh3dF32, TriMesh3dF64}, - uniform_grid::{UniformGridF32, UniformGridF64}, + UniformGrid, nalgebra::Vector3, }; macro_rules! create_reconstruction_interface { @@ -69,6 +74,193 @@ macro_rules! create_reconstruction_interface { create_reconstruction_interface!(SurfaceReconstructionF64, f64, TriMesh3dF64, UniformGridF64); create_reconstruction_interface!(SurfaceReconstructionF32, f32, TriMesh3dF32, UniformGridF32); +enum PyUniformGridData { + F32(UniformGrid), + F64(UniformGrid), +} + +/// Struct containing the parameters of a uniform grid used for the surface reconstruction +#[gen_stub_pyclass] +#[pyclass] +pub struct PyUniformGrid { + inner: PyUniformGridData, +} + +impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); +impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); + +enum PySurfaceReconstructionData { + F32(SurfaceReconstruction), + F64(SurfaceReconstruction), +} + +/// Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data +#[gen_stub_pyclass] +#[pyclass] +pub struct PySurfaceReconstruction { + inner: PySurfaceReconstructionData, +} + +impl_from_mesh!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F32); +impl_from_mesh!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F64); + +impl PySurfaceReconstruction { + pub fn try_from_generic( + mut reconstruction: SurfaceReconstruction, + ) -> PyResult { + transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) + .or_else(|| { + transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) + }) + .ok_or_else(|| PyTypeError::new_err("unsupported type of reconstruction, only u64 for Index and f32 and f64 for Real type are supported")) + } +} + +#[gen_stub_pymethods] +#[pymethods] +impl PySurfaceReconstruction { + /// Returns a copy of the surface mesh of the reconstruction + fn copy_mesh(&self) -> PyResult { + match &self.inner { + PySurfaceReconstructionData::F32(reconstruction) => { + Ok(PyTriMesh3d::from(reconstruction.mesh().clone())) + } + PySurfaceReconstructionData::F64(reconstruction) => { + Ok(PyTriMesh3d::from(reconstruction.mesh().clone())) + } + } + } + + /// Returns a copy of the uniform grid parameters used for the reconstruction + fn copy_grid(&self) -> PyUniformGrid { + match &self.inner { + PySurfaceReconstructionData::F32(reconstruction) => { + PyUniformGrid::from(reconstruction.grid().clone()) + } + PySurfaceReconstructionData::F64(reconstruction) => { + PyUniformGrid::from(reconstruction.grid().clone()) + } + } + } + + /// Returns a copy of the particle densities computed during the reconstruction + fn copy_particle_densities<'py>(&self, py: Python<'py>) -> Option> { + match &self.inner { + PySurfaceReconstructionData::F32(reconstruction) => Some( + reconstruction + .particle_densities()? + .to_pyarray(py) + .into_any() + .downcast_into::() + .expect("downcasting should not fail"), + ), + PySurfaceReconstructionData::F64(reconstruction) => Some( + reconstruction + .particle_densities()? + .to_pyarray(py) + .into_any() + .downcast_into::() + .expect("downcasting should not fail"), + ), + } + } + + /// Returns a copy of the per-particle neighborhood lists computed during the reconstruction if available + /// + /// The neighborhood lists are only available if the flag for global neighborhood list was set in the reconstruction parameters. + fn copy_particle_neighbors(&self) -> Option>> { + match &self.inner { + PySurfaceReconstructionData::F32(reconstruction) => reconstruction + .particle_neighbors() + .map(|neighbors| neighbors.clone()), + PySurfaceReconstructionData::F64(reconstruction) => reconstruction + .particle_neighbors() + .map(|neighbors| neighbors.clone()), + } + } +} + +/// Performs a surface reconstruction from the given particles without additional post-processing +/// +/// Note that all parameters use absolute distance units and are not relative to the particle radius. +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "reconstruct_surface_multi")] +#[pyo3(signature = (particles, *, + particle_radius, rest_density = 1000.0, smoothing_length, cube_size, iso_surface_threshold = 0.6, + multi_threading = true, global_neighborhood_list = false, + subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, + aabb_min = None, aabb_max = None +))] +pub fn reconstruct_surface_multi<'py>( + particles: &Bound<'py, PyUntypedArray>, + particle_radius: f64, + rest_density: f64, + smoothing_length: f64, + cube_size: f64, + iso_surface_threshold: f64, + multi_threading: bool, + global_neighborhood_list: bool, + subdomain_grid: bool, + subdomain_grid_auto_disable: bool, + subdomain_num_cubes_per_dim: u32, + aabb_min: Option<[f64; 3]>, + aabb_max: Option<[f64; 3]>, +) -> PyResult { + let py = particles.py(); + + let particle_aabb = aabb_min + .zip(aabb_max) + .map(|(min, max)| Aabb3d::new(Vector3::from(min), Vector3::from(max))); + + let spatial_decomposition = if subdomain_grid { + SpatialDecomposition::UniformGrid(GridDecompositionParameters { + subdomain_num_cubes_per_dim, + auto_disable: subdomain_grid_auto_disable, + }) + } else { + SpatialDecomposition::None + }; + + let parameters = splashsurf_lib::Parameters { + particle_radius, + rest_density, + compact_support_radius: 2.0 * smoothing_length * particle_radius, + cube_size: cube_size * particle_radius, + iso_surface_threshold, + particle_aabb, + enable_multi_threading: multi_threading, + spatial_decomposition, + global_neighborhood_list, + }; + + let element_type = particles.dtype(); + if element_type.is_equiv_to(&np::dtype::(py)) { + let particles = particles.downcast::>()?.readonly(); + let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); + let reconstruction = splashsurf_lib::reconstruct_surface::( + particle_positions, + ¶meters + .try_convert() + .expect("failed to convert reconstruction parameters to f32"), + ) + .map_err(|e| anyhow!(e))?; + PySurfaceReconstruction::try_from_generic(reconstruction) + } else if element_type.is_equiv_to(&np::dtype::(py)) { + let particles = particles.downcast::>()?.readonly(); + let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); + let reconstruction = + splashsurf_lib::reconstruct_surface::(particle_positions, ¶meters) + .map_err(|e| anyhow!(e))?; + PySurfaceReconstruction::try_from_generic(reconstruction) + } else { + Err(PyTypeError::new_err(format!( + "unsupported scalar type {} for reconstruction, only float32 and float64 are supported", + element_type + ))) + } +} + /// Reconstruct the surface from only particle positions pub fn reconstruct_surface_py( particles: &[Vector3], @@ -119,7 +311,7 @@ pub fn reconstruct_surface_py( global_neighborhood_list, }; - let surface = reconstruct_surface(&particles, ¶ms).unwrap(); + let surface = splashsurf_lib::reconstruct_surface(&particles, ¶ms).unwrap(); surface } diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs new file mode 100644 index 0000000..94a93d5 --- /dev/null +++ b/pysplashsurf/src/utils.rs @@ -0,0 +1,45 @@ +macro_rules! impl_from_mesh { + ($pyclass:ident, $mesh:ty => $target_enum:path) => { + impl From<$mesh> for $pyclass { + fn from(mesh: $mesh) -> Self { + Self { + inner: $target_enum(mesh), + } + } + } + }; +} + +pub(crate) use impl_from_mesh; + +/// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type +pub fn transmute_take_into< + GenericSrc: 'static, + ConcreteSrc: Default + Into + 'static, + Target, +>( + value: &mut GenericSrc, +) -> Option { + if std::any::TypeId::of::() == std::any::TypeId::of::() { + let value_ref = unsafe { std::mem::transmute::<&mut GenericSrc, &mut ConcreteSrc>(value) }; + Some(std::mem::take(value_ref).into()) + } else { + None + } +} + +pub fn transmute_replace_into< + GenericSrc: 'static, + ConcreteSrc: Into + 'static, + Target, +>( + value: &mut GenericSrc, + replace: ConcreteSrc, +) -> Option { + if std::any::TypeId::of::() == std::any::TypeId::of::() { + let value_ref = unsafe { std::mem::transmute::<&mut GenericSrc, &mut ConcreteSrc>(value) }; + Some(std::mem::replace(value_ref, replace).into()) + } else { + None + } +} From 51c7f7c03440bf41a0a04d113d1367d6d02fdf8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 00:03:29 +0200 Subject: [PATCH 11/63] Py: Implement check_mesh_consistency, remove old code --- pysplashsurf/pysplashsurf/__init__.py | 118 -------------- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 28 +++- pysplashsurf/src/lib.rs | 26 +--- pysplashsurf/src/marching_cubes.rs | 137 ++++++++--------- pysplashsurf/src/mesh.rs | 60 +++++++- pysplashsurf/src/reconstruction.rs | 171 +++------------------ 6 files changed, 174 insertions(+), 366 deletions(-) diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index dada9dc..c69712d 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -200,88 +200,6 @@ def create_aabb_object_from_points(points): else: raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)") -def reconstruct_surface( - particles, *, - particle_radius: float = 0.025, - rest_density: float = 1000.0, - smoothing_length: float = 2.0, - cube_size: float = 0.5, - iso_surface_threshold: float = 0.6, - multi_threading: bool = True, - global_neighborhood_list: bool = False, - subdomain_grid: bool = True, - subdomain_grid_auto_disable: bool = True, - subdomain_num_cubes_per_dim: int = 64, - aabb_min = None, - aabb_max = None, -): - """Reconstruct the surface from only particle positions - - Performs a marching cubes surface construction of the fluid represented by the given particle positions - - Parameters - ---------- - particles: np.ndarray - 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] - - particle_radius: float, optional (default=0.025) - Particle radius - - rest_density: float - Rest density of the fluid - - smoothing_length: float - Smoothing length of the fluid - - cube_size: float - Size of the cubes used in the uniform grid - - iso_surface_threshold: float - Threshold for the iso surface - - multi_threading: bool - Multi-threading flag - - global_neighborhood_list: bool - Global neighborhood list flag - - subdomain_grid: bool - Enable spatial decomposition using by dividing the domain into subdomains with dense marching cube grids for efficient multi-threading - - subdomain_grid_auto_disable: bool - Whether to automatically disable the subdomain grid if the global domain is too small - - subdomain_num_cubes_per_dim: int - Each subdomain will be a cube consisting of this number of MC cube cells along each coordinate axis - - aabb_min: np.ndarray - Smallest corner of the axis-aligned bounding box - - aabb_max: np.ndarray - Largest corner of the axis-aligned bounding box - - Returns - ------- - SurfaceReconstructionF32 | SurfaceReconstructionF64 - SurfaceReconstruction object containing the reconstructed mesh and used grid - - """ - - if particles.dtype == 'float32': - return reconstruct_surface_f32(particles, particle_radius=particle_radius, rest_density=rest_density, - smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - multi_threading=multi_threading, global_neighborhood_list=global_neighborhood_list, - subdomain_grid=subdomain_grid, subdomain_grid_auto_disable=subdomain_grid_auto_disable, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, - aabb_min=aabb_min, aabb_max=aabb_max) - elif particles.dtype == 'float64': - return reconstruct_surface_f64(particles, particle_radius=particle_radius, rest_density=rest_density, - smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - multi_threading=multi_threading, global_neighborhood_list=global_neighborhood_list, - subdomain_grid=subdomain_grid, subdomain_grid_auto_disable=subdomain_grid_auto_disable, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, - aabb_min=aabb_min, aabb_max=aabb_max) - else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particles)") - def marching_cubes_cleanup( mesh, grid, @@ -449,42 +367,6 @@ def neighborhood_search_spatial_hashing_parallel( else: raise ValueError("Invalid domain type") -def check_mesh_consistency( - grid, - mesh, *, - check_closed: bool, - check_manifold: bool, - debug: bool, -): - """Checks the consistency of the mesh (currently checks for holes, non-manifold edges and vertices) and returns a string with debug information in case of problems - - Parameters - ---------- - grid: UniformGridF32 | UniformGridF64 - Uniform grid object - - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Triangular mesh object - - check_closed: bool - Flag to check for closed mesh - - check_manifold: bool - Flag to check for manifold mesh - - debug: bool - Flag to enable debug output - """ - - if type(grid) is UniformGridF32 and (type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32): - return check_mesh_consistency_f32(grid, mesh, check_closed=check_closed, check_manifold=check_manifold, debug=debug) - - elif type(grid) is UniformGridF64 and (type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64): - return check_mesh_consistency_f64(grid, mesh, check_closed=check_closed, check_manifold=check_manifold, debug=debug) - - else: - raise ValueError("Invalid grid or mesh type") - def convert_tris_to_quads( mesh, *, non_squareness_limit: float, diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index f5ba284..bba7c63 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -5,6 +5,7 @@ import builtins import numpy import numpy.typing import typing +from enum import Enum class Aabb3dF32: r""" @@ -330,6 +331,11 @@ class PyMeshWithData: r""" Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ + @property + def mesh_cell_type(self) -> MeshType: + r""" + Returns the type of the underlying mesh + """ def copy_mesh(self) -> typing.Union[PyTriMesh3d, PyMixedTriQuadMesh3d]: r""" Returns a copy of the contained mesh without associated data and attributes @@ -666,9 +672,27 @@ class UniformGridF64: """ ... -def reconstruct_surface_multi(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> PySurfaceReconstruction: +class MeshType(Enum): + r""" + Enum specifying the type of mesh contained in a `MeshWithData` + """ + Tri3d = ... + r""" + 3D triangle mesh + """ + MixedTriQuad3d = ... + r""" + 3D mixed triangle and quad mesh + """ + +def check_mesh_consistency(grid:PyUniformGrid, mesh:typing.Any, *, check_closed:builtins.bool=True, check_manifold:builtins.bool=True, debug:builtins.bool=False) -> typing.Optional[builtins.str]: + r""" + Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found + """ + +def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> PySurfaceReconstruction: r""" - Performs a surface reconstruction of the given particles without post-processing + Performs a surface reconstruction from the given particles without additional post-processing Note that all parameters use absolute distance units and are not relative to the particle radius. """ diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 2fb6c3e..b5206e9 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -48,25 +48,16 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { let _ = m.add_class::()?; let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; + let _ = m.add_class::()?; let _ = m.add_class::()?; let _ = m.add_class::()?; let _ = m.add_class::()?; - let _ = m.add_function(wrap_pyfunction!( - reconstruction::reconstruct_surface_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - reconstruction::reconstruct_surface_py_f64, - m - )?); - - let _ = m.add_function(wrap_pyfunction!( - reconstruction::reconstruct_surface_multi, - m - )?); + let _ = m.add_function(wrap_pyfunction!(reconstruction::reconstruct_surface, m)?); let _ = m.add_function(wrap_pyfunction!( post_processing::convert_tris_to_quads_py_f32, @@ -86,14 +77,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m )?); - let _ = m.add_function(wrap_pyfunction!( - marching_cubes::check_mesh_consistency_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - marching_cubes::check_mesh_consistency_py_f64, - m - )?); + let _ = m.add_function(wrap_pyfunction!(marching_cubes::check_mesh_consistency, m)?); let _ = m.add_function(wrap_pyfunction!(post_processing::decimation_py_f32, m)?); let _ = m.add_function(wrap_pyfunction!(post_processing::decimation_py_f64, m)?); diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index 8dedd67..dbab1ca 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -1,77 +1,76 @@ -use pyo3::{ - exceptions::{PyRuntimeError, PyValueError}, - prelude::*, -}; - -use crate::{ - mesh::{TriMesh3dF32, TriMesh3dF64, TriMeshWithDataF32, TriMeshWithDataF64}, - uniform_grid::{UniformGridF32, UniformGridF64}, -}; - -#[pyfunction] -#[pyo3(name = "check_mesh_consistency_f32")] -#[pyo3(signature = (grid, mesh, *, check_closed, check_manifold, debug))] -pub fn check_mesh_consistency_py_f32<'py>( - py: Python, - grid: &UniformGridF32, - mesh: PyObject, - check_closed: bool, - check_manifold: bool, - debug: bool, -) -> PyResult<()> { - if let Ok(mesh) = mesh.downcast_bound::(py) { - splashsurf_lib::marching_cubes::check_mesh_consistency( - &grid.inner, - &mesh.borrow().inner, - check_closed, - check_manifold, - debug, - ) - .map_err(|x| PyErr::new::(x)) - } else if let Ok(mesh) = mesh.downcast_bound::(py) { - splashsurf_lib::marching_cubes::check_mesh_consistency( - &grid.inner, - &mesh.borrow().inner.mesh, - check_closed, - check_manifold, - debug, - ) - .map_err(|x| PyErr::new::(x)) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} +use crate::mesh::{MeshType, PyMeshWithData, PyTriMesh3d}; +use crate::reconstruction::PyUniformGrid; +use pyo3::exceptions::PyTypeError; +use pyo3::prelude::*; +use pyo3_stub_gen::derive::*; +/// Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "check_mesh_consistency_f64")] -#[pyo3(signature = (grid, mesh, *, check_closed, check_manifold, debug))] -pub fn check_mesh_consistency_py_f64<'py>( - py: Python, - grid: &UniformGridF64, - mesh: PyObject, +#[pyo3(name = "check_mesh_consistency")] +#[pyo3(signature = (mesh, grid, *, check_closed = true, check_manifold = true, debug = false))] +pub fn check_mesh_consistency<'py>( + mesh: Bound<'py, PyAny>, + grid: &PyUniformGrid, check_closed: bool, check_manifold: bool, debug: bool, -) -> PyResult<()> { - if let Ok(mesh) = mesh.downcast_bound::(py) { - splashsurf_lib::marching_cubes::check_mesh_consistency( - &grid.inner, - &mesh.borrow().inner, - check_closed, - check_manifold, - debug, - ) - .map_err(|x| PyErr::new::(x)) - } else if let Ok(mesh) = mesh.downcast_bound::(py) { - splashsurf_lib::marching_cubes::check_mesh_consistency( - &grid.inner, - &mesh.borrow().inner.mesh, - check_closed, - check_manifold, - debug, - ) - .map_err(|x| PyErr::new::(x)) +) -> PyResult> { + if let Ok(mesh) = mesh.downcast::() { + let mesh = mesh.borrow(); + if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32()) { + Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( + grid, + mesh, + check_closed, + check_manifold, + debug, + ) + .err()) + } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64()) { + Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( + grid, + mesh, + check_closed, + check_manifold, + debug, + ) + .err()) + } else { + Err(PyTypeError::new_err( + "invalid combination of grid and mesh scalar data types", + )) + } + } else if let Ok(mesh) = mesh.downcast::() + && let mesh = mesh.borrow() + && mesh.mesh_cell_type() == MeshType::Tri3d + { + if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_tri_f32()) { + Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( + grid, + &mesh.mesh, + check_closed, + check_manifold, + debug, + ) + .err()) + } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_tri_f64()) { + Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( + grid, + &mesh.mesh, + check_closed, + check_manifold, + debug, + ) + .err()) + } else { + Err(PyTypeError::new_err( + "invalid combination of grid and mesh scalar data types", + )) + } } else { - Err(PyErr::new::("Invalid mesh type")) + Err(PyTypeError::new_err( + "unsupported mesh type for consistency check, only triangle meshes are supported", + )) } } diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 6ad5bc1..4d60b27 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -497,6 +497,20 @@ impl PyTriMesh3d { ) }) } + + pub fn as_f32(&self) -> Option<&TriMesh3d> { + match &self.inner { + PyTriMesh3dData::F32(mesh) => Some(mesh), + _ => None, + } + } + + pub fn as_f64(&self) -> Option<&TriMesh3d> { + match &self.inner { + PyTriMesh3dData::F64(mesh) => Some(mesh), + _ => None, + } + } } #[gen_stub_pymethods] @@ -589,17 +603,28 @@ enum PyMeshWithDataData { MixedTriQuadF64(MeshWithData>), } -impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::Tri3dF32); -impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::Tri3dF64); -impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::MixedTriQuadF32); -impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::MixedTriQuadF64); - #[gen_stub_pyclass] #[pyclass] pub struct PyMeshWithData { inner: PyMeshWithDataData, } +/// Enum specifying the type of mesh contained in a `MeshWithData` +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[gen_stub_pyclass_enum] +#[pyclass] +pub enum MeshType { + /// 3D triangle mesh + Tri3d, + /// 3D mixed triangle and quad mesh + MixedTriQuad3d, +} + +impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::Tri3dF32); +impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::Tri3dF64); +impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::MixedTriQuadF32); +impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::MixedTriQuadF64); + impl PyMeshWithData { pub fn try_from_generic + 'static>( mut mesh: MeshWithData, @@ -612,6 +637,20 @@ impl PyMeshWithData { "Unsupported mesh type for MeshWithData. Only TriMesh3d and MixedTriQuadMesh3d with f32 or f64 scalar types are supported.", )) } + + pub fn as_tri_f32(&self) -> Option<&MeshWithData>> { + match &self.inner { + PyMeshWithDataData::Tri3dF32(mesh) => Some(mesh), + _ => None, + } + } + + pub fn as_tri_f64(&self) -> Option<&MeshWithData>> { + match &self.inner { + PyMeshWithDataData::Tri3dF64(mesh) => Some(mesh), + _ => None, + } + } } #[gen_stub_pymethods] @@ -630,6 +669,17 @@ impl PyMeshWithData { } } + /// Returns the type of the underlying mesh + #[getter] + pub fn mesh_cell_type(&self) -> MeshType { + match &self.inner { + PyMeshWithDataData::Tri3dF32(_) | PyMeshWithDataData::Tri3dF64(_) => MeshType::Tri3d, + PyMeshWithDataData::MixedTriQuadF32(_) | PyMeshWithDataData::MixedTriQuadF64(_) => { + MeshType::MixedTriQuad3d + } + } + } + /// Returns a copy of the contained mesh without associated data and attributes #[gen_stub(override_return_type(type_repr="typing.Union[PyTriMesh3d, PyMixedTriQuadMesh3d]", imports=()))] pub fn copy_mesh<'py>(&self, py: Python<'py>) -> PyResult> { diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index e2ba8d5..6a3e0bd 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -7,12 +7,12 @@ use crate::{ use anyhow::anyhow; use numpy as np; use numpy::prelude::*; -use numpy::{Element, PyArray2, PyReadonlyArray2, PyUntypedArray}; +use numpy::{Element, PyArray2, PyUntypedArray}; use pyo3::exceptions::PyTypeError; use pyo3::{Bound, prelude::*}; use pyo3_stub_gen::derive::*; use splashsurf_lib::{ - Aabb3d, GridDecompositionParameters, Index, Real, SpatialDecomposition, SurfaceReconstruction, + Aabb3d, GridDecompositionParameters, Real, SpatialDecomposition, SurfaceReconstruction, UniformGrid, nalgebra::Vector3, }; @@ -89,6 +89,22 @@ pub struct PyUniformGrid { impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); +impl PyUniformGrid { + pub(crate) fn as_f32(&self) -> Option<&UniformGrid> { + match &self.inner { + PyUniformGridData::F32(grid) => Some(grid), + _ => None, + } + } + + pub(crate) fn as_f64(&self) -> Option<&UniformGrid> { + match &self.inner { + PyUniformGridData::F64(grid) => Some(grid), + _ => None, + } + } +} + enum PySurfaceReconstructionData { F32(SurfaceReconstruction), F64(SurfaceReconstruction), @@ -185,14 +201,14 @@ impl PySurfaceReconstruction { /// Note that all parameters use absolute distance units and are not relative to the particle radius. #[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "reconstruct_surface_multi")] +#[pyo3(name = "reconstruct_surface")] #[pyo3(signature = (particles, *, particle_radius, rest_density = 1000.0, smoothing_length, cube_size, iso_surface_threshold = 0.6, multi_threading = true, global_neighborhood_list = false, subdomain_grid = true, subdomain_grid_auto_disable = true, subdomain_num_cubes_per_dim = 64, aabb_min = None, aabb_max = None ))] -pub fn reconstruct_surface_multi<'py>( +pub fn reconstruct_surface<'py>( particles: &Bound<'py, PyUntypedArray>, particle_radius: f64, rest_density: f64, @@ -260,150 +276,3 @@ pub fn reconstruct_surface_multi<'py>( ))) } } - -/// Reconstruct the surface from only particle positions -pub fn reconstruct_surface_py( - particles: &[Vector3], - particle_radius: R, - rest_density: R, - smoothing_length: R, - cube_size: R, - iso_surface_threshold: R, - multi_threading: bool, - global_neighborhood_list: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - aabb_min: Option<[R; 3]>, - aabb_max: Option<[R; 3]>, -) -> SurfaceReconstruction { - let aabb; - if let (Some(aabb_min), Some(aabb_max)) = (aabb_min, aabb_max) { - // Convert the min and max arrays to Vector3 - aabb = Some(Aabb3d::new( - Vector3::from(aabb_min), - Vector3::from(aabb_max), - )); - } else { - aabb = None; - } - - let spatial_decomposition; - if subdomain_grid { - spatial_decomposition = SpatialDecomposition::UniformGrid(GridDecompositionParameters { - subdomain_num_cubes_per_dim, - auto_disable: subdomain_grid_auto_disable, - }); - } else { - spatial_decomposition = SpatialDecomposition::None; - } - - let params = splashsurf_lib::Parameters { - particle_radius, - rest_density, - // Compact support is twice the smoothing length - compact_support_radius: (smoothing_length * particle_radius) * R::from_float(2.0), - cube_size: cube_size * particle_radius, - iso_surface_threshold, - particle_aabb: aabb, - enable_multi_threading: multi_threading, - spatial_decomposition, - global_neighborhood_list, - }; - - let surface = splashsurf_lib::reconstruct_surface(&particles, ¶ms).unwrap(); - - surface -} - -#[pyfunction] -#[pyo3(name = "reconstruct_surface_f32")] -#[pyo3(signature = (particles, *, particle_radius, rest_density, - smoothing_length, cube_size, iso_surface_threshold, multi_threading=true, - global_neighborhood_list=false, subdomain_grid=true, subdomain_grid_auto_disable=true, subdomain_num_cubes_per_dim=64, - aabb_min = None, aabb_max = None -))] -pub fn reconstruct_surface_py_f32<'py>( - particles: &Bound<'py, PyArray2>, - particle_radius: f32, - rest_density: f32, - smoothing_length: f32, - cube_size: f32, - iso_surface_threshold: f32, - multi_threading: bool, - global_neighborhood_list: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - aabb_min: Option<[f32; 3]>, - aabb_max: Option<[f32; 3]>, -) -> PyResult { - let particles: PyReadonlyArray2 = particles.extract()?; - - let particle_positions = particles.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); - - let reconstruction = reconstruct_surface_py::( - particle_positions, - particle_radius, - rest_density, - smoothing_length, - cube_size, - iso_surface_threshold, - multi_threading, - global_neighborhood_list, - subdomain_grid, - subdomain_grid_auto_disable, - subdomain_num_cubes_per_dim, - aabb_min, - aabb_max, - ); - - Ok(SurfaceReconstructionF32::new(reconstruction.to_owned())) -} - -#[pyfunction] -#[pyo3(name = "reconstruct_surface_f64")] -#[pyo3(signature = (particles, *, particle_radius, rest_density, - smoothing_length, cube_size, iso_surface_threshold, multi_threading=true, - global_neighborhood_list=false, subdomain_grid=true, subdomain_grid_auto_disable=true, subdomain_num_cubes_per_dim=64, - aabb_min = None, aabb_max = None -))] -pub fn reconstruct_surface_py_f64<'py>( - particles: &Bound<'py, PyArray2>, - particle_radius: f64, - rest_density: f64, - smoothing_length: f64, - cube_size: f64, - iso_surface_threshold: f64, - multi_threading: bool, - global_neighborhood_list: bool, - subdomain_grid: bool, - subdomain_grid_auto_disable: bool, - subdomain_num_cubes_per_dim: u32, - aabb_min: Option<[f64; 3]>, - aabb_max: Option<[f64; 3]>, -) -> PyResult { - let particles: PyReadonlyArray2 = particles.extract()?; - - let particle_positions = particles.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); - - let reconstruction = reconstruct_surface_py::( - particle_positions, - particle_radius, - rest_density, - smoothing_length, - cube_size, - iso_surface_threshold, - multi_threading, - global_neighborhood_list, - subdomain_grid, - subdomain_grid_auto_disable, - subdomain_num_cubes_per_dim, - aabb_min, - aabb_max, - ); - - Ok(SurfaceReconstructionF64::new(reconstruction.to_owned())) -} From 3eabe97927efe82733a23042848f3e27dc272f36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 00:32:30 +0200 Subject: [PATCH 12/63] Py: Clean up --- pysplashsurf/pysplashsurf/__init__.py | 39 ------- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 7 +- pysplashsurf/src/lib.rs | 6 +- pysplashsurf/src/marching_cubes.rs | 1 + pysplashsurf/src/mesh.rs | 28 ++++++ pysplashsurf/src/post_processing.rs | 112 +++++++++------------ 6 files changed, 86 insertions(+), 107 deletions(-) diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index c69712d..13c2a30 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -199,45 +199,6 @@ def create_aabb_object_from_points(points): return Aabb3dF64.from_points(points) else: raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)") - -def marching_cubes_cleanup( - mesh, - grid, - max_iter: int = 5, - keep_vertices: bool = False -): - """Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren - - See Moore and Warren: `"Mesh Displacement: An Improved Contouring Method for Trivariate Data" `_ (1991) - or Moore and Warren: "Compact Isocontours from Sampled Data" in "Graphics Gems III" (1992). - - Parameters - ---------- - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Mesh object to simplify - - grid: UniformGridF32 | UniformGridF64 - Uniform grid object that was used to construct the mesh - - max_iter: int - Maximum number of iterations - - keep_vertices: bool - Flag to keep vertices - - Returns - ------- - list - vertex connectivity list of the simplified mesh - """ - if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: - return marching_cubes_cleanup_f32(mesh, grid, max_iter=max_iter, keep_vertices=keep_vertices) - - elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: - return marching_cubes_cleanup_f64(mesh, grid, max_iter=max_iter, keep_vertices=keep_vertices) - - else: - raise ValueError("Invalid mesh type") def decimation( mesh, diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index bba7c63..e773ca7 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -685,11 +685,16 @@ class MeshType(Enum): 3D mixed triangle and quad mesh """ -def check_mesh_consistency(grid:PyUniformGrid, mesh:typing.Any, *, check_closed:builtins.bool=True, check_manifold:builtins.bool=True, debug:builtins.bool=False) -> typing.Optional[builtins.str]: +def check_mesh_consistency(mesh:typing.Union[PyTriMesh3d, PyMeshWithData], grid:PyUniformGrid, *, check_closed:builtins.bool=True, check_manifold:builtins.bool=True, debug:builtins.bool=False) -> typing.Optional[builtins.str]: r""" Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found """ +def marching_cubes_cleanup(mesh:typing.Union[PyTriMesh3d, PyMeshWithData], grid:PyUniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> None: + r""" + Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren + """ + def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> PySurfaceReconstruction: r""" Performs a surface reconstruction from the given particles without additional post-processing diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index b5206e9..64142a3 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -69,11 +69,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { )?); let _ = m.add_function(wrap_pyfunction!( - post_processing::marching_cubes_cleanup_py_f32, - m - )?); - let _ = m.add_function(wrap_pyfunction!( - post_processing::marching_cubes_cleanup_py_f64, + post_processing::marching_cubes_cleanup, m )?); diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index dbab1ca..8350b74 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -10,6 +10,7 @@ use pyo3_stub_gen::derive::*; #[pyo3(name = "check_mesh_consistency")] #[pyo3(signature = (mesh, grid, *, check_closed = true, check_manifold = true, debug = false))] pub fn check_mesh_consistency<'py>( + #[gen_stub(override_type(type_repr="typing.Union[PyTriMesh3d, PyMeshWithData]", imports=()))] mesh: Bound<'py, PyAny>, grid: &PyUniformGrid, check_closed: bool, diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 4d60b27..664cd99 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -511,6 +511,20 @@ impl PyTriMesh3d { _ => None, } } + + pub fn as_f32_mut(&mut self) -> Option<&mut TriMesh3d> { + match &mut self.inner { + PyTriMesh3dData::F32(mesh) => Some(mesh), + _ => None, + } + } + + pub fn as_f64_mut(&mut self) -> Option<&mut TriMesh3d> { + match &mut self.inner { + PyTriMesh3dData::F64(mesh) => Some(mesh), + _ => None, + } + } } #[gen_stub_pymethods] @@ -651,6 +665,20 @@ impl PyMeshWithData { _ => None, } } + + pub fn as_tri_f32_mut(&mut self) -> Option<&mut MeshWithData>> { + match &mut self.inner { + PyMeshWithDataData::Tri3dF32(mesh) => Some(mesh), + _ => None, + } + } + + pub fn as_tri_f64_mut(&mut self) -> Option<&mut MeshWithData>> { + match &mut self.inner { + PyMeshWithDataData::Tri3dF64(mesh) => Some(mesh), + _ => None, + } + } } #[gen_stub_pymethods] diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 9fb3286..347c90e 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -1,16 +1,16 @@ use ndarray::ArrayViewMut2; use numpy::{PyArray2, PyArrayMethods}; -use pyo3::{exceptions::PyValueError, prelude::*}; +use pyo3::exceptions::{PyTypeError, PyValueError}; +use pyo3::prelude::*; +use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::nalgebra::Vector3; -use crate::{ - mesh::{ - MixedTriQuadMesh3dF32, MixedTriQuadMesh3dF64, MixedTriQuadMeshWithDataF32, - MixedTriQuadMeshWithDataF64, TriMesh3dF32, TriMesh3dF64, TriMeshWithDataF32, - TriMeshWithDataF64, - }, - uniform_grid::{UniformGridF32, UniformGridF64}, +use crate::mesh::{ + MeshType, MixedTriQuadMesh3dF32, MixedTriQuadMesh3dF64, MixedTriQuadMeshWithDataF32, + MixedTriQuadMeshWithDataF64, PyMeshWithData, PyTriMesh3d, TriMesh3dF32, TriMesh3dF64, + TriMeshWithDataF32, TriMeshWithDataF64, }; +use crate::reconstruction::PyUniformGrid; #[pyfunction] #[pyo3(name = "convert_tris_to_quads_f64")] @@ -258,66 +258,54 @@ pub fn decimation_py_f32<'py>( } } +/// Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "marching_cubes_cleanup_f64")] +#[pyo3(name = "marching_cubes_cleanup")] #[pyo3(signature = (mesh, grid, *, max_rel_snap_dist = None, max_iter = 5, keep_vertices = false))] -pub fn marching_cubes_cleanup_py_f64<'py>( - py: Python, - mesh: PyObject, - grid: &UniformGridF64, +pub fn marching_cubes_cleanup<'py>( + #[gen_stub(override_type(type_repr="typing.Union[PyTriMesh3d, PyMeshWithData]", imports=()))] + mesh: Bound<'py, PyAny>, + grid: &PyUniformGrid, max_rel_snap_dist: Option, max_iter: usize, keep_vertices: bool, -) -> PyResult>> { - if let Ok(mesh) = mesh.downcast_bound::(py) { - Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( - &mut mesh.borrow_mut().inner, - &grid.inner, - max_rel_snap_dist, - max_iter, - keep_vertices, - )) - } else if let Ok(mesh) = mesh.downcast_bound::(py) { - Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( - &mut mesh.borrow_mut().inner.mesh, - &grid.inner, - max_rel_snap_dist, - max_iter, - keep_vertices, - )) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} +) -> PyResult<()> { + use splashsurf_lib::postprocessing::marching_cubes_cleanup as cleanup; -#[pyfunction] -#[pyo3(name = "marching_cubes_cleanup_f32")] -#[pyo3(signature = (mesh, grid, *, max_rel_snap_dist = None, max_iter = 5, keep_vertices = false))] -pub fn marching_cubes_cleanup_py_f32<'py>( - py: Python, - mesh: PyObject, - grid: &UniformGridF32, - max_rel_snap_dist: Option, - max_iter: usize, - keep_vertices: bool, -) -> PyResult>> { - if let Ok(mesh) = mesh.downcast_bound::(py) { - Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( - &mut mesh.borrow_mut().inner, - &grid.inner, - max_rel_snap_dist, - max_iter, - keep_vertices, - )) - } else if let Ok(mesh) = mesh.downcast_bound::(py) { - Ok(splashsurf_lib::postprocessing::marching_cubes_cleanup( - &mut mesh.borrow_mut().inner.mesh, - &grid.inner, - max_rel_snap_dist, - max_iter, - keep_vertices, - )) + if let Ok(mesh) = mesh.downcast::() { + let mut mesh = mesh.borrow_mut(); + if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32_mut()) { + let max_rel_snap_dist = max_rel_snap_dist.map(|d| d as f32); + cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); + } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64_mut()) { + cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); + } else { + return Err(PyTypeError::new_err( + "invalid combination of grid and mesh scalar data types", + )); + } + } else if let Ok(mesh) = mesh.downcast::() + && let mut mesh = mesh.borrow_mut() + && mesh.mesh_cell_type() == MeshType::Tri3d + { + if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_tri_f32_mut()) { + let mesh = &mut mesh.mesh; + let max_rel_snap_dist = max_rel_snap_dist.map(|d| d as f32); + cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); + } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_tri_f64_mut()) { + let mesh = &mut mesh.mesh; + cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); + } else { + return Err(PyTypeError::new_err( + "invalid combination of grid and mesh scalar data types", + )); + } } else { - Err(PyErr::new::("Invalid mesh type")) + return Err(PyTypeError::new_err( + "unsupported mesh type for marching cubes clean up, only triangle meshes are supported", + )); } + + Ok(()) } From a8c8ebc8311dc088ee1df78bc0032699dc3fb8a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 00:46:57 +0200 Subject: [PATCH 13/63] Py: Refactor --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 170 +++++++-------------- pysplashsurf/src/lib.rs | 88 +++++------ pysplashsurf/src/marching_cubes.rs | 4 +- pysplashsurf/src/mesh.rs | 5 +- pysplashsurf/src/post_processing.rs | 11 +- pysplashsurf/src/reconstruction.rs | 97 +----------- pysplashsurf/src/uniform_grid.rs | 43 ++++-- 7 files changed, 139 insertions(+), 279 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index e773ca7..e81406a 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -195,6 +195,37 @@ class Aabb3dF64: Returns the smallest cubical AABB with the same center that encloses this AABB """ +class MeshWithData: + @property + def dtype(self) -> numpy.dtype: + r""" + Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + """ + @property + def mesh_cell_type(self) -> MeshType: + r""" + Returns the type of the underlying mesh + """ + def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: + r""" + Returns a copy of the contained mesh without associated data and attributes + """ + def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: + r""" + Returns a copy of the `Nx3` array of vertex positions + """ + +class MixedTriQuadMesh3d: + @property + def dtype(self) -> numpy.dtype: + r""" + Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + """ + def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: + r""" + Returns a copy of the `Nx3` array of vertex positions + """ + class MixedTriQuadMesh3dF32: r""" MixedTriQuadMesh3d wrapper @@ -325,81 +356,6 @@ class MixedTriQuadMeshWithDataF64: Get all registered cell attribute names """ -class PyMeshWithData: - @property - def dtype(self) -> numpy.dtype: - r""" - Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) - """ - @property - def mesh_cell_type(self) -> MeshType: - r""" - Returns the type of the underlying mesh - """ - def copy_mesh(self) -> typing.Union[PyTriMesh3d, PyMixedTriQuadMesh3d]: - r""" - Returns a copy of the contained mesh without associated data and attributes - """ - def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - -class PyMixedTriQuadMesh3d: - @property - def dtype(self) -> numpy.dtype: - r""" - Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) - """ - def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - -class PySurfaceReconstruction: - r""" - Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data - """ - def copy_mesh(self) -> PyTriMesh3d: - r""" - Returns a copy of the surface mesh of the reconstruction - """ - def copy_grid(self) -> PyUniformGrid: - r""" - Returns a copy of the uniform grid parameters used for the reconstruction - """ - def copy_particle_densities(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: - r""" - Returns a copy of the particle densities computed during the reconstruction - """ - def copy_particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: - r""" - Returns a copy of the per-particle neighborhood lists computed during the reconstruction if available - - The neighborhood lists are only available if the flag for global neighborhood list was set in the reconstruction parameters. - """ - -class PyTriMesh3d: - @property - def dtype(self) -> numpy.dtype: - r""" - Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) - """ - def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - def copy_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns a copy of the `Mx3` array of vertex indices per triangle - """ - -class PyUniformGrid: - r""" - Struct containing the parameters of a uniform grid used for the surface reconstruction - """ - ... - class SphInterpolatorF32: r""" SphInterpolator wrapper @@ -436,50 +392,42 @@ class SphInterpolatorF64: Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles """ -class SurfaceReconstructionF32: +class SurfaceReconstruction: r""" - SurfaceReconstruction wrapper + Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data """ - @property - def mesh(self) -> TriMesh3dF32: + def copy_mesh(self) -> TriMesh3d: r""" - PyTrimesh3d clone of the contained mesh + Returns a copy of the surface mesh of the reconstruction """ - @property - def grid(self) -> UniformGridF32: + def copy_grid(self) -> UniformGrid: r""" - PyUniformGrid clone of the contained grid + Returns a copy of the uniform grid parameters used for the reconstruction """ - def particle_densities(self) -> builtins.list[builtins.float]: + def copy_particle_densities(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: r""" - Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) + Returns a copy of the particle densities computed during the reconstruction """ - def particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: + def copy_particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: r""" - Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) + Returns a copy of the per-particle neighborhood lists computed during the reconstruction if available + + The neighborhood lists are only available if the flag for global neighborhood list was set in the reconstruction parameters. """ -class SurfaceReconstructionF64: - r""" - SurfaceReconstruction wrapper - """ - @property - def mesh(self) -> TriMesh3dF64: - r""" - PyTrimesh3d clone of the contained mesh - """ +class TriMesh3d: @property - def grid(self) -> UniformGridF64: + def dtype(self) -> numpy.dtype: r""" - PyUniformGrid clone of the contained grid + Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ - def particle_densities(self) -> builtins.list[builtins.float]: + def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: r""" - Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) + Returns a copy of the `Nx3` array of vertex positions """ - def particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: + def copy_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) + Returns a copy of the `Mx3` array of vertex indices per triangle """ class TriMesh3dF32: @@ -660,15 +608,9 @@ class TriMeshWithDataF64: Get all registered cell attribute names """ -class UniformGridF32: - r""" - UniformGrid wrapper - """ - ... - -class UniformGridF64: +class UniformGrid: r""" - UniformGrid wrapper + Struct containing the parameters of the uniform grid used for the surface reconstruction """ ... @@ -685,24 +627,24 @@ class MeshType(Enum): 3D mixed triangle and quad mesh """ -def check_mesh_consistency(mesh:typing.Union[PyTriMesh3d, PyMeshWithData], grid:PyUniformGrid, *, check_closed:builtins.bool=True, check_manifold:builtins.bool=True, debug:builtins.bool=False) -> typing.Optional[builtins.str]: +def check_mesh_consistency(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, check_closed:builtins.bool=True, check_manifold:builtins.bool=True, debug:builtins.bool=False) -> typing.Optional[builtins.str]: r""" Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found """ -def marching_cubes_cleanup(mesh:typing.Union[PyTriMesh3d, PyMeshWithData], grid:PyUniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> None: +def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> None: r""" Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren """ -def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> PySurfaceReconstruction: +def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> SurfaceReconstruction: r""" Performs a surface reconstruction from the given particles without additional post-processing Note that all parameters use absolute distance units and are not relative to the particle radius. """ -def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> PyMeshWithData: +def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> MeshWithData: r""" Runs the surface reconstruction pipeline for the given particle positions with optional post-processing diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 64142a3..5402ed3 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -28,86 +28,80 @@ pub(crate) mod utils; /// Support reconstructing Level-Set surfaces from particle clouds or from regular grids. #[pymodule] fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; + m.add_class::()?; + m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; + m.add_class::()?; + m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; + m.add_class::()?; + m.add_class::()?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; + m.add_function(wrap_pyfunction!(reconstruction::reconstruct_surface, m)?)?; - let _ = m.add_class::()?; - let _ = m.add_class::()?; - - let _ = m.add_function(wrap_pyfunction!(reconstruction::reconstruct_surface, m)?); - - let _ = m.add_function(wrap_pyfunction!( + m.add_function(wrap_pyfunction!( post_processing::convert_tris_to_quads_py_f32, m - )?); - let _ = m.add_function(wrap_pyfunction!( + )?)?; + m.add_function(wrap_pyfunction!( post_processing::convert_tris_to_quads_py_f64, m - )?); + )?)?; - let _ = m.add_function(wrap_pyfunction!( + m.add_function(wrap_pyfunction!( post_processing::marching_cubes_cleanup, m - )?); + )?)?; - let _ = m.add_function(wrap_pyfunction!(marching_cubes::check_mesh_consistency, m)?); + m.add_function(wrap_pyfunction!(marching_cubes::check_mesh_consistency, m)?)?; - let _ = m.add_function(wrap_pyfunction!(post_processing::decimation_py_f32, m)?); - let _ = m.add_function(wrap_pyfunction!(post_processing::decimation_py_f64, m)?); + m.add_function(wrap_pyfunction!(post_processing::decimation_py_f32, m)?)?; + m.add_function(wrap_pyfunction!(post_processing::decimation_py_f64, m)?)?; - let _ = m.add_function(wrap_pyfunction!( + m.add_function(wrap_pyfunction!( post_processing::par_laplacian_smoothing_inplace_py_f32, m - )?); - let _ = m.add_function(wrap_pyfunction!( + )?)?; + m.add_function(wrap_pyfunction!( post_processing::par_laplacian_smoothing_inplace_py_f64, m - )?); + )?)?; - let _ = m.add_function(wrap_pyfunction!( + m.add_function(wrap_pyfunction!( post_processing::par_laplacian_smoothing_normals_inplace_py_f32, m - )?); - let _ = m.add_function(wrap_pyfunction!( + )?)?; + m.add_function(wrap_pyfunction!( post_processing::par_laplacian_smoothing_normals_inplace_py_f64, m - )?); + )?)?; - let _ = m.add_function(wrap_pyfunction!( + m.add_function(wrap_pyfunction!( neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f32, m - )?); - let _ = m.add_function(wrap_pyfunction!( + )?)?; + m.add_function(wrap_pyfunction!( neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f64, m - )?); + )?)?; - let _ = m.add_function(wrap_pyfunction!(pipeline::reconstruction_pipeline, m)?); + m.add_function(wrap_pyfunction!(pipeline::reconstruction_pipeline, m)?)?; - let _ = m.add_function(wrap_pyfunction!(run_splashsurf_py, m)?); + m.add_function(wrap_pyfunction!(run_splashsurf_py, m)?)?; Ok(()) } diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index 8350b74..408edeb 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -1,5 +1,5 @@ use crate::mesh::{MeshType, PyMeshWithData, PyTriMesh3d}; -use crate::reconstruction::PyUniformGrid; +use crate::uniform_grid::PyUniformGrid; use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; @@ -10,7 +10,7 @@ use pyo3_stub_gen::derive::*; #[pyo3(name = "check_mesh_consistency")] #[pyo3(signature = (mesh, grid, *, check_closed = true, check_manifold = true, debug = false))] pub fn check_mesh_consistency<'py>( - #[gen_stub(override_type(type_repr="typing.Union[PyTriMesh3d, PyMeshWithData]", imports=()))] + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] mesh: Bound<'py, PyAny>, grid: &PyUniformGrid, check_closed: bool, diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 664cd99..131886d 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -480,6 +480,7 @@ enum PyTriMesh3dData { #[gen_stub_pyclass] #[pyclass] +#[pyo3(name = "TriMesh3d")] pub struct PyTriMesh3d { inner: PyTriMesh3dData, } @@ -568,6 +569,7 @@ enum PyMixedTriQuadMesh3dData { #[gen_stub_pyclass] #[pyclass] +#[pyo3(name = "MixedTriQuadMesh3d")] pub struct PyMixedTriQuadMesh3d { inner: PyMixedTriQuadMesh3dData, } @@ -619,6 +621,7 @@ enum PyMeshWithDataData { #[gen_stub_pyclass] #[pyclass] +#[pyo3(name = "MeshWithData")] pub struct PyMeshWithData { inner: PyMeshWithDataData, } @@ -709,7 +712,7 @@ impl PyMeshWithData { } /// Returns a copy of the contained mesh without associated data and attributes - #[gen_stub(override_return_type(type_repr="typing.Union[PyTriMesh3d, PyMixedTriQuadMesh3d]", imports=()))] + #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] pub fn copy_mesh<'py>(&self, py: Python<'py>) -> PyResult> { match &self.inner { PyMeshWithDataData::Tri3dF32(mesh) => { diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 347c90e..57e0e35 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -10,7 +10,7 @@ use crate::mesh::{ MixedTriQuadMeshWithDataF64, PyMeshWithData, PyTriMesh3d, TriMesh3dF32, TriMesh3dF64, TriMeshWithDataF32, TriMeshWithDataF64, }; -use crate::reconstruction::PyUniformGrid; +use crate::uniform_grid::PyUniformGrid; #[pyfunction] #[pyo3(name = "convert_tris_to_quads_f64")] @@ -264,20 +264,20 @@ pub fn decimation_py_f32<'py>( #[pyo3(name = "marching_cubes_cleanup")] #[pyo3(signature = (mesh, grid, *, max_rel_snap_dist = None, max_iter = 5, keep_vertices = false))] pub fn marching_cubes_cleanup<'py>( - #[gen_stub(override_type(type_repr="typing.Union[PyTriMesh3d, PyMeshWithData]", imports=()))] + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] mesh: Bound<'py, PyAny>, grid: &PyUniformGrid, max_rel_snap_dist: Option, max_iter: usize, keep_vertices: bool, ) -> PyResult<()> { + let max_rel_snap_dist_f32 = max_rel_snap_dist.map(|d| d as f32); use splashsurf_lib::postprocessing::marching_cubes_cleanup as cleanup; if let Ok(mesh) = mesh.downcast::() { let mut mesh = mesh.borrow_mut(); if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32_mut()) { - let max_rel_snap_dist = max_rel_snap_dist.map(|d| d as f32); - cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); + cleanup(mesh, grid, max_rel_snap_dist_f32, max_iter, keep_vertices); } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64_mut()) { cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); } else { @@ -291,8 +291,7 @@ pub fn marching_cubes_cleanup<'py>( { if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_tri_f32_mut()) { let mesh = &mut mesh.mesh; - let max_rel_snap_dist = max_rel_snap_dist.map(|d| d as f32); - cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); + cleanup(mesh, grid, max_rel_snap_dist_f32, max_iter, keep_vertices); } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_tri_f64_mut()) { let mesh = &mut mesh.mesh; cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index 6a3e0bd..075d344 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -1,9 +1,6 @@ use crate::mesh::PyTriMesh3d; +use crate::uniform_grid::PyUniformGrid; use crate::utils::*; -use crate::{ - mesh::{TriMesh3dF32, TriMesh3dF64}, - uniform_grid::{UniformGridF32, UniformGridF64}, -}; use anyhow::anyhow; use numpy as np; use numpy::prelude::*; @@ -13,98 +10,9 @@ use pyo3::{Bound, prelude::*}; use pyo3_stub_gen::derive::*; use splashsurf_lib::{ Aabb3d, GridDecompositionParameters, Real, SpatialDecomposition, SurfaceReconstruction, - UniformGrid, nalgebra::Vector3, + nalgebra::Vector3, }; -macro_rules! create_reconstruction_interface { - ($name: ident, $type: ident, $mesh_class: ident, $grid_class: ident) => { - /// SurfaceReconstruction wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: SurfaceReconstruction, - } - - impl $name { - pub fn new(data: SurfaceReconstruction) -> Self { - Self { inner: data } - } - } - - #[gen_stub_pymethods] - #[pymethods] - impl $name { - /// PyTrimesh3d clone of the contained mesh - #[getter] - fn mesh(&self) -> $mesh_class { - $mesh_class::new(self.inner.mesh().clone()) - } - - /// PyUniformGrid clone of the contained grid - #[getter] - fn grid(&self) -> $grid_class { - $grid_class::new(self.inner.grid().clone()) - } - - // Doesn't work because SurfaceReconstruction.mesh() only returns an immutable reference - // /// Returns PyTrimesh3dF32/F64 without copying the mesh data, removes the mesh from the object - // fn take_mesh(&mut self) -> $mesh_class { - // let mesh = std::mem::take(&mut self.inner.mesh()); - // $mesh_class::new(mesh) - // } - - /// Returns a reference to the global particle density vector if computed during the reconstruction (currently, all reconstruction approaches return this) - fn particle_densities(&self) -> &Vec<$type> { - self.inner - .particle_densities() - .ok_or_else(|| { - anyhow::anyhow!("Surface Reconstruction did not return particle densities") - }) - .unwrap() - } - - /// Returns a reference to the global list of per-particle neighborhood lists if computed during the reconstruction (`None` if not specified in the parameters) - fn particle_neighbors(&self) -> Option<&Vec>> { - self.inner.particle_neighbors() - } - } - }; -} - -create_reconstruction_interface!(SurfaceReconstructionF64, f64, TriMesh3dF64, UniformGridF64); -create_reconstruction_interface!(SurfaceReconstructionF32, f32, TriMesh3dF32, UniformGridF32); - -enum PyUniformGridData { - F32(UniformGrid), - F64(UniformGrid), -} - -/// Struct containing the parameters of a uniform grid used for the surface reconstruction -#[gen_stub_pyclass] -#[pyclass] -pub struct PyUniformGrid { - inner: PyUniformGridData, -} - -impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); -impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); - -impl PyUniformGrid { - pub(crate) fn as_f32(&self) -> Option<&UniformGrid> { - match &self.inner { - PyUniformGridData::F32(grid) => Some(grid), - _ => None, - } - } - - pub(crate) fn as_f64(&self) -> Option<&UniformGrid> { - match &self.inner { - PyUniformGridData::F64(grid) => Some(grid), - _ => None, - } - } -} - enum PySurfaceReconstructionData { F32(SurfaceReconstruction), F64(SurfaceReconstruction), @@ -113,6 +21,7 @@ enum PySurfaceReconstructionData { /// Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data #[gen_stub_pyclass] #[pyclass] +#[pyo3(name = "SurfaceReconstruction")] pub struct PySurfaceReconstruction { inner: PySurfaceReconstructionData, } diff --git a/pysplashsurf/src/uniform_grid.rs b/pysplashsurf/src/uniform_grid.rs index b33b116..597fd9f 100644 --- a/pysplashsurf/src/uniform_grid.rs +++ b/pysplashsurf/src/uniform_grid.rs @@ -1,23 +1,36 @@ +use crate::utils::impl_from_mesh; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; use splashsurf_lib::UniformGrid; -macro_rules! create_grid_interface { - ($name: ident, $type: ident) => { - /// UniformGrid wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: UniformGrid, +enum PyUniformGridData { + F32(UniformGrid), + F64(UniformGrid), +} + +/// Struct containing the parameters of the uniform grid used for the surface reconstruction +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "UniformGrid")] +pub struct PyUniformGrid { + inner: PyUniformGridData, +} + +impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); +impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); + +impl PyUniformGrid { + pub(crate) fn as_f32(&self) -> Option<&UniformGrid> { + match &self.inner { + PyUniformGridData::F32(grid) => Some(grid), + _ => None, } + } - impl $name { - pub fn new(data: UniformGrid) -> Self { - Self { inner: data } - } + pub(crate) fn as_f64(&self) -> Option<&UniformGrid> { + match &self.inner { + PyUniformGridData::F64(grid) => Some(grid), + _ => None, } - }; + } } - -create_grid_interface!(UniformGridF64, f64); -create_grid_interface!(UniformGridF32, f32); From 2faf6ede0fdfc0b9409997f0361d7bf5e16ef812 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 08:48:52 +0200 Subject: [PATCH 14/63] Rename argument --- splashsurf_lib/src/postprocessing.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/splashsurf_lib/src/postprocessing.rs b/splashsurf_lib/src/postprocessing.rs index fe2a2c3..e86a162 100644 --- a/splashsurf_lib/src/postprocessing.rs +++ b/splashsurf_lib/src/postprocessing.rs @@ -677,7 +677,7 @@ pub fn merge_double_barnacle_configurations_he(mesh: &mut HalfEdgeTriMe pub fn convert_tris_to_quads( mesh: &TriMesh3d, non_squareness_limit: R, - normal_angle_limit_rad: R, + normal_angle_limit: R, max_interior_angle: R, ) -> MixedTriQuadMesh3d { profile!("tri_to_quad"); @@ -694,7 +694,7 @@ pub fn convert_tris_to_quads( }) .collect::>(); - let min_dot = normal_angle_limit_rad.cos(); + let min_dot = normal_angle_limit.cos(); let max_non_squareness = non_squareness_limit; let sqrt_two = R::from_float(2.0_f64.sqrt()); From 7758de7ff83d87edf3fbe11d7683d4b370829c85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 08:49:35 +0200 Subject: [PATCH 15/63] Py: Re-Implement tris_to_quads --- pysplashsurf/pysplashsurf/__init__.py | 39 ------ pysplashsurf/pysplashsurf/pysplashsurf.pyi | 5 + pysplashsurf/src/lib.rs | 39 +++--- pysplashsurf/src/post_processing.rs | 139 ++++++++++----------- 4 files changed, 86 insertions(+), 136 deletions(-) diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index 13c2a30..8d0e72e 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -327,42 +327,3 @@ def neighborhood_search_spatial_hashing_parallel( else: raise ValueError("Invalid domain type") - -def convert_tris_to_quads( - mesh, *, - non_squareness_limit: float, - normal_angle_limit_rad: float, - max_interior_angle: float, -): - """Merges triangles sharing an edge to quads if they fulfill the given criteria - - Parameters - ---------- - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Triangular mesh object\n - When called with a MeshWithData Object, the resulting MixedTriQuadMeshWithData won't inherit the cell attributes from the input. - - non_squareness_limit: float - Non-squareness limit - - normal_angle_limit_rad: float - Normal angle limit in radians - - max_interior_angle: float - Maximum interior angle in radians - - Returns - ------- - MixedTriQuadMesh3dF32 | MixedTriQuadMesh3dF64 | MixedTriQuadMeshWithDataF32 | MixedTriQuadMeshWithDataF64 - Mixed triangular and quadrilateral mesh object - """ - - if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: - return convert_tris_to_quads_f32(mesh, non_squareness_limit=non_squareness_limit, normal_angle_limit_rad=normal_angle_limit_rad, max_interior_angle=max_interior_angle) - - elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: - return convert_tris_to_quads_f64(mesh, non_squareness_limit=non_squareness_limit, normal_angle_limit_rad=normal_angle_limit_rad, max_interior_angle=max_interior_angle) - - else: - raise ValueError("Invalid mesh type") - diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index e81406a..ff040a3 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -632,6 +632,11 @@ def check_mesh_consistency(mesh:typing.Union[TriMesh3d, MeshWithData], grid:Unif Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found """ +def convert_tris_to_quads(mesh:typing.Union[TriMesh3d, MeshWithData], *, non_squareness_limit:builtins.float=1.75, normal_angle_limit:builtins.float=10.0, max_interior_angle:builtins.float=135.0) -> typing.Any: + r""" + Merges triangles sharing an edge to quads if they fulfill the given criteria + """ + def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> None: r""" Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 5402ed3..b7ba084 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -51,28 +51,17 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; - m.add_function(wrap_pyfunction!(reconstruction::reconstruct_surface, m)?)?; + use wrap_pyfunction as wrap; - m.add_function(wrap_pyfunction!( - post_processing::convert_tris_to_quads_py_f32, - m - )?)?; - m.add_function(wrap_pyfunction!( - post_processing::convert_tris_to_quads_py_f64, - m - )?)?; - - m.add_function(wrap_pyfunction!( - post_processing::marching_cubes_cleanup, - m - )?)?; + m.add_function(wrap!(reconstruction::reconstruct_surface, m)?)?; + m.add_function(wrap!(marching_cubes::check_mesh_consistency, m)?)?; + m.add_function(wrap!(post_processing::marching_cubes_cleanup, m)?)?; + m.add_function(wrap!(post_processing::convert_tris_to_quads, m)?)?; - m.add_function(wrap_pyfunction!(marching_cubes::check_mesh_consistency, m)?)?; + m.add_function(wrap!(post_processing::decimation_py_f32, m)?)?; + m.add_function(wrap!(post_processing::decimation_py_f64, m)?)?; - m.add_function(wrap_pyfunction!(post_processing::decimation_py_f32, m)?)?; - m.add_function(wrap_pyfunction!(post_processing::decimation_py_f64, m)?)?; - - m.add_function(wrap_pyfunction!( + m.add_function(wrap!( post_processing::par_laplacian_smoothing_inplace_py_f32, m )?)?; @@ -81,27 +70,27 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m )?)?; - m.add_function(wrap_pyfunction!( + m.add_function(wrap!( post_processing::par_laplacian_smoothing_normals_inplace_py_f32, m )?)?; - m.add_function(wrap_pyfunction!( + m.add_function(wrap!( post_processing::par_laplacian_smoothing_normals_inplace_py_f64, m )?)?; - m.add_function(wrap_pyfunction!( + m.add_function(wrap!( neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f32, m )?)?; - m.add_function(wrap_pyfunction!( + m.add_function(wrap!( neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f64, m )?)?; - m.add_function(wrap_pyfunction!(pipeline::reconstruction_pipeline, m)?)?; + m.add_function(wrap!(pipeline::reconstruction_pipeline, m)?)?; - m.add_function(wrap_pyfunction!(run_splashsurf_py, m)?)?; + m.add_function(wrap!(run_splashsurf_py, m)?)?; Ok(()) } diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 57e0e35..7df877b 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -1,94 +1,89 @@ use ndarray::ArrayViewMut2; use numpy::{PyArray2, PyArrayMethods}; +use pyo3::IntoPyObjectExt; use pyo3::exceptions::{PyTypeError, PyValueError}; use pyo3::prelude::*; use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::nalgebra::Vector3; use crate::mesh::{ - MeshType, MixedTriQuadMesh3dF32, MixedTriQuadMesh3dF64, MixedTriQuadMeshWithDataF32, - MixedTriQuadMeshWithDataF64, PyMeshWithData, PyTriMesh3d, TriMesh3dF32, TriMesh3dF64, + MeshType, PyMeshWithData, PyMixedTriQuadMesh3d, PyTriMesh3d, TriMesh3dF32, TriMesh3dF64, TriMeshWithDataF32, TriMeshWithDataF64, }; use crate::uniform_grid::PyUniformGrid; +/// Merges triangles sharing an edge to quads if they fulfill the given criteria +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "convert_tris_to_quads_f64")] -#[pyo3(signature = (mesh, *, non_squareness_limit, normal_angle_limit_rad, max_interior_angle))] -pub fn convert_tris_to_quads_py_f64<'py>( - mesh: PyObject, - py: Python<'py>, +#[pyo3(name = "convert_tris_to_quads")] +#[pyo3(signature = (mesh, *, non_squareness_limit = 1.75, normal_angle_limit = 10.0, max_interior_angle = 135.0))] +#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] +pub fn convert_tris_to_quads<'py>( + #[gen_stub(override_type(type_repr="typing.Union[MixedTriQuadMesh3d, MeshWithData]", imports=()))] + mesh: Bound<'py, PyAny>, non_squareness_limit: f64, - normal_angle_limit_rad: f64, + normal_angle_limit: f64, max_interior_angle: f64, -) -> PyResult { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - let quad_mesh = - MixedTriQuadMesh3dF64::new(splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.borrow().inner, - non_squareness_limit, - normal_angle_limit_rad, - max_interior_angle, - )); - Ok(quad_mesh.into_pyobject(py).unwrap().into()) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - let mut quad_mesh = - MixedTriQuadMeshWithDataF64::new(splashsurf_lib::mesh::MeshWithData::new( - splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.borrow().inner.mesh, - non_squareness_limit, - normal_angle_limit_rad, - max_interior_angle, - ), - )); +) -> PyResult> { + let py = mesh.py(); - quad_mesh.inner.point_attributes = mesh.borrow().inner.point_attributes.clone(); + let normal_angle_limit = normal_angle_limit.to_radians(); + let max_interior_angle = max_interior_angle.to_radians(); - Ok(quad_mesh.into_pyobject(py).unwrap().into()) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} - -#[pyfunction] -#[pyo3(name = "convert_tris_to_quads_f32")] -#[pyo3(signature = (mesh, *, non_squareness_limit, normal_angle_limit_rad, max_interior_angle))] -pub fn convert_tris_to_quads_py_f32<'py>( - py: Python<'py>, - mesh: PyObject, - non_squareness_limit: f32, - normal_angle_limit_rad: f32, - max_interior_angle: f32, -) -> PyResult { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - let quad_mesh = - MixedTriQuadMesh3dF32::new(splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.borrow().inner, + if let Ok(mesh) = mesh.downcast::() { + let mesh = mesh.borrow(); + if let Some(mesh) = mesh.as_f32() { + let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( + mesh, + non_squareness_limit as f32, + normal_angle_limit as f32, + max_interior_angle as f32, + ); + PyMixedTriQuadMesh3d::from(quad_mesh).into_bound_py_any(py) + } else if let Some(mesh) = mesh.as_f64() { + let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( + mesh, non_squareness_limit, - normal_angle_limit_rad, + normal_angle_limit, max_interior_angle, - )); - Ok(quad_mesh.into_pyobject(py).unwrap().into()) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - let mut quad_mesh = - MixedTriQuadMeshWithDataF32::new(splashsurf_lib::mesh::MeshWithData::new( - splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.borrow().inner.mesh, - non_squareness_limit, - normal_angle_limit_rad, - max_interior_angle, - ), - )); - - quad_mesh.inner.point_attributes = mesh.borrow().inner.point_attributes.clone(); - - Ok(quad_mesh.into_pyobject(py).unwrap().into()) + ); + PyMixedTriQuadMesh3d::from(quad_mesh).into_bound_py_any(py) + } else { + Err(PyTypeError::new_err( + "unsupported mesh scalar data type, only f32 and f64 are supported", + )) + } + } else if let Ok(mesh) = mesh.downcast::() { + let mesh = mesh.borrow(); + if let Some(mesh) = mesh.as_tri_f32() { + let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( + &mesh.mesh, + non_squareness_limit as f32, + normal_angle_limit as f32, + max_interior_angle as f32, + ); + let mut quad_mesh = splashsurf_lib::mesh::MeshWithData::new(quad_mesh); + quad_mesh.point_attributes = mesh.point_attributes.clone(); + PyMeshWithData::from(quad_mesh).into_bound_py_any(py) + } else if let Some(mesh) = mesh.as_tri_f64() { + let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( + &mesh.mesh, + non_squareness_limit, + normal_angle_limit, + max_interior_angle, + ); + let mut quad_mesh = splashsurf_lib::mesh::MeshWithData::new(quad_mesh); + quad_mesh.point_attributes = mesh.point_attributes.clone(); + PyMeshWithData::from(quad_mesh).into_bound_py_any(py) + } else { + Err(PyTypeError::new_err( + "unsupported mesh scalar data type, only f32 and f64 are supported", + )) + } } else { - Err(PyErr::new::("Invalid mesh type")) + Err(PyTypeError::new_err( + "unsupported mesh type, only triangle meshes are supported", + )) } } @@ -302,7 +297,7 @@ pub fn marching_cubes_cleanup<'py>( } } else { return Err(PyTypeError::new_err( - "unsupported mesh type for marching cubes clean up, only triangle meshes are supported", + "unsupported mesh type for, only triangle meshes are supported", )); } From 0de099c7b23b1608ca75712bfb9ab0c0cd2455aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 09:10:16 +0200 Subject: [PATCH 16/63] Py: Update decimation --- pysplashsurf/pysplashsurf/__init__.py | 30 ------ pysplashsurf/pysplashsurf/pysplashsurf.pyi | 7 +- pysplashsurf/src/lib.rs | 4 +- pysplashsurf/src/post_processing.rs | 120 ++++++++++----------- 4 files changed, 65 insertions(+), 96 deletions(-) diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index 8d0e72e..8996a1e 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -199,36 +199,6 @@ def create_aabb_object_from_points(points): return Aabb3dF64.from_points(points) else: raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)") - -def decimation( - mesh, - keep_vertices: bool = False -): - """Barnacle decimation - - For details see “Weighted Laplacian Smoothing for Surface Reconstruction of Particle-based Fluids” (Löschner, Böttcher, Jeske, Bender; 2023). - - Parameters - ---------- - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Mesh object to simplify - - keep_vertices: bool - Flag to keep vertices - - Returns - ------- - list - vertex connectivity list of the simplified mesh - """ - if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: - return decimation_f32(mesh, keep_vertices=keep_vertices) - - elif type(mesh) is TriMesh3dF64 or type(mesh) is TriMeshWithDataF64: - return decimation_f64(mesh, keep_vertices=keep_vertices) - - else: - raise ValueError("Invalid mesh type") def par_laplacian_smoothing_inplace( mesh, diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index ff040a3..e2add7a 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -627,12 +627,17 @@ class MeshType(Enum): 3D mixed triangle and quad mesh """ +def barnacle_decimation(mesh:typing.Union[TriMesh3d, MeshWithData], *, keep_vertices:builtins.bool) -> typing.Union[TriMesh3d, MeshWithData]: + r""" + Decimation to prevent "barnacles" when applying weighted Laplacian smoothing + """ + def check_mesh_consistency(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, check_closed:builtins.bool=True, check_manifold:builtins.bool=True, debug:builtins.bool=False) -> typing.Optional[builtins.str]: r""" Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found """ -def convert_tris_to_quads(mesh:typing.Union[TriMesh3d, MeshWithData], *, non_squareness_limit:builtins.float=1.75, normal_angle_limit:builtins.float=10.0, max_interior_angle:builtins.float=135.0) -> typing.Any: +def convert_tris_to_quads(mesh:typing.Union[MixedTriQuadMesh3d, MeshWithData], *, non_squareness_limit:builtins.float=1.75, normal_angle_limit:builtins.float=10.0, max_interior_angle:builtins.float=135.0) -> typing.Union[TriMesh3d, MeshWithData]: r""" Merges triangles sharing an edge to quads if they fulfill the given criteria """ diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index b7ba084..c3d490f 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -57,9 +57,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap!(marching_cubes::check_mesh_consistency, m)?)?; m.add_function(wrap!(post_processing::marching_cubes_cleanup, m)?)?; m.add_function(wrap!(post_processing::convert_tris_to_quads, m)?)?; - - m.add_function(wrap!(post_processing::decimation_py_f32, m)?)?; - m.add_function(wrap!(post_processing::decimation_py_f64, m)?)?; + m.add_function(wrap!(post_processing::barnacle_decimation, m)?)?; m.add_function(wrap!( post_processing::par_laplacian_smoothing_inplace_py_f32, diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 7df877b..69ec45e 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -12,12 +12,30 @@ use crate::mesh::{ }; use crate::uniform_grid::PyUniformGrid; +fn pyerr_unsupported_scalar() -> PyResult { + Err(PyTypeError::new_err( + "unsupported mesh scalar data type, only f32 and f64 are supported", + )) +} + +fn pyerr_mesh_grid_scalar_mismatch() -> PyResult { + Err(PyTypeError::new_err( + "unsupported mesh and grid scalar data type combination, both have to be either f32 or f64", + )) +} + +fn pyerr_only_triangle_mesh() -> PyResult { + Err(PyTypeError::new_err( + "unsupported mesh type, only triangle meshes are supported", + )) +} + /// Merges triangles sharing an edge to quads if they fulfill the given criteria #[gen_stub_pyfunction] #[pyfunction] #[pyo3(name = "convert_tris_to_quads")] #[pyo3(signature = (mesh, *, non_squareness_limit = 1.75, normal_angle_limit = 10.0, max_interior_angle = 135.0))] -#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] +#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] pub fn convert_tris_to_quads<'py>( #[gen_stub(override_type(type_repr="typing.Union[MixedTriQuadMesh3d, MeshWithData]", imports=()))] mesh: Bound<'py, PyAny>, @@ -49,12 +67,12 @@ pub fn convert_tris_to_quads<'py>( ); PyMixedTriQuadMesh3d::from(quad_mesh).into_bound_py_any(py) } else { - Err(PyTypeError::new_err( - "unsupported mesh scalar data type, only f32 and f64 are supported", - )) + pyerr_unsupported_scalar() } - } else if let Ok(mesh) = mesh.downcast::() { - let mesh = mesh.borrow(); + } else if let Ok(mesh) = mesh.downcast::() + && let mesh = mesh.borrow() + && mesh.mesh_cell_type() == MeshType::Tri3d + { if let Some(mesh) = mesh.as_tri_f32() { let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( &mesh.mesh, @@ -76,14 +94,10 @@ pub fn convert_tris_to_quads<'py>( quad_mesh.point_attributes = mesh.point_attributes.clone(); PyMeshWithData::from(quad_mesh).into_bound_py_any(py) } else { - Err(PyTypeError::new_err( - "unsupported mesh scalar data type, only f32 and f64 are supported", - )) + pyerr_unsupported_scalar() } } else { - Err(PyTypeError::new_err( - "unsupported mesh type, only triangle meshes are supported", - )) + pyerr_only_triangle_mesh() } } @@ -203,53 +217,41 @@ pub fn par_laplacian_smoothing_normals_inplace_py_f64<'py>( .copy_from_slice(&bytemuck::cast_slice(normals_vec.as_slice())); // Copy back to numpy array } +/// Decimation to prevent "barnacles" when applying weighted Laplacian smoothing +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "decimation_f64")] +#[pyo3(name = "barnacle_decimation")] #[pyo3(signature = (mesh, *, keep_vertices))] -pub fn decimation_py_f64<'py>( - py: Python, - mesh: PyObject, +#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] +pub fn barnacle_decimation<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: Bound<'py, PyAny>, keep_vertices: bool, ) -> PyResult>> { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - Ok(splashsurf_lib::postprocessing::decimation( - &mut mesh.borrow_mut().inner, - keep_vertices, - )) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - Ok(splashsurf_lib::postprocessing::decimation( - &mut mesh.borrow_mut().inner.mesh, - keep_vertices, - )) - } else { - Err(PyErr::new::("Invalid mesh type")) - } -} + use splashsurf_lib::postprocessing::decimation; -#[pyfunction] -#[pyo3(name = "decimation_f32")] -#[pyo3(signature = (mesh, *, keep_vertices))] -pub fn decimation_py_f32<'py>( - py: Python, - mesh: PyObject, - keep_vertices: bool, -) -> PyResult>> { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - Ok(splashsurf_lib::postprocessing::decimation( - &mut mesh.borrow_mut().inner, - keep_vertices, - )) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - Ok(splashsurf_lib::postprocessing::decimation( - &mut mesh.borrow_mut().inner.mesh, - keep_vertices, - )) + if let Ok(mesh) = mesh.downcast::() { + let mut mesh = mesh.borrow_mut(); + if let Some(mesh) = mesh.as_f32_mut() { + Ok(decimation(mesh, keep_vertices)) + } else if let Some(mesh) = mesh.as_f64_mut() { + Ok(decimation(mesh, keep_vertices)) + } else { + pyerr_unsupported_scalar() + } + } else if let Ok(mesh) = mesh.downcast::() { + let mut mesh = mesh.borrow_mut(); + if let Some(mesh) = mesh.as_tri_f32_mut() { + let mesh = &mut mesh.mesh; + Ok(decimation(mesh, keep_vertices)) + } else if let Some(mesh) = mesh.as_tri_f64_mut() { + let mesh = &mut mesh.mesh; + Ok(decimation(mesh, keep_vertices)) + } else { + pyerr_unsupported_scalar() + } } else { - Err(PyErr::new::("Invalid mesh type")) + pyerr_only_triangle_mesh() } } @@ -276,9 +278,7 @@ pub fn marching_cubes_cleanup<'py>( } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64_mut()) { cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); } else { - return Err(PyTypeError::new_err( - "invalid combination of grid and mesh scalar data types", - )); + return pyerr_mesh_grid_scalar_mismatch(); } } else if let Ok(mesh) = mesh.downcast::() && let mut mesh = mesh.borrow_mut() @@ -291,14 +291,10 @@ pub fn marching_cubes_cleanup<'py>( let mesh = &mut mesh.mesh; cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); } else { - return Err(PyTypeError::new_err( - "invalid combination of grid and mesh scalar data types", - )); + return pyerr_mesh_grid_scalar_mismatch(); } } else { - return Err(PyTypeError::new_err( - "unsupported mesh type for, only triangle meshes are supported", - )); + return pyerr_only_triangle_mesh(); } Ok(()) From 89d421a7611f89b563ef0080f6e50877afb75eee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 12:28:36 +0200 Subject: [PATCH 17/63] Py: Reimplement smoothing functions --- pysplashsurf/pysplashsurf/__init__.py | 68 ----- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 50 +++- pysplashsurf/src/lib.rs | 17 +- pysplashsurf/src/mesh.rs | 37 +++ pysplashsurf/src/post_processing.rs | 278 +++++++++++---------- 5 files changed, 230 insertions(+), 220 deletions(-) diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index 8996a1e..7127e9e 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -200,74 +200,6 @@ def create_aabb_object_from_points(points): else: raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)") -def par_laplacian_smoothing_inplace( - mesh, - vertex_connectivity: list[list[int]], - iterations: int, - beta: float, - weights: list[float] -): - """Laplacian Smoothing with feature weights - - Move each vertex towards the mean position of its neighbors.\n - Factor beta in [0;1] proportional to amount of smoothing (for beta=1 each vertex is placed at the mean position).\n - Additionally, feature weights can be specified to apply a varying amount of smoothing over the mesh. - - Parameters - ---------- - mesh: TriMesh3dF32 | TriMesh3dF64 | TriMeshWithDataF32 | TriMeshWithDataF64 - Mesh object to smooth - - vertex_connectivity: list[list[int]] - Vertex connectivity list - - iterations: int - Number of iterations - - beta: float - Smoothing factor - - weights: list[float] - Feature weights for the vertices - """ - - if type(mesh) is TriMesh3dF32 or type(mesh) is TriMeshWithDataF32: - par_laplacian_smoothing_inplace_f32(mesh, vertex_connectivity, iterations, beta, weights) - - elif type(mesh) is TriMesh3dF64 or type(mesh) is TiMeshWithDataF64: - par_laplacian_smoothing_inplace_f64(mesh, vertex_connectivity, iterations, beta, weights) - - else: - raise ValueError("Invalid mesh type") - -def par_laplacian_smoothing_normals_inplace( - normals: np.ndarray, - vertex_connectivity: list[list[int]], - iterations: int -): - """Laplacian smoothing of a normal field - - Parameters - ---------- - normals: np.ndarray - 2D-Array of vertex normals to smooth - - vertex_connectivity: list[list[int]] - Vertex connectivity list - - iterations: int - Number of iterations - """ - - if normals.dtype == 'float32': - par_laplacian_smoothing_normals_inplace_f32(normals, vertex_connectivity, iterations) - - elif normals.dtype == 'float64': - par_laplacian_smoothing_normals_inplace_f64(normals, vertex_connectivity, iterations) - - else: - raise ValueError("Invalid mesh type") - def neighborhood_search_spatial_hashing_parallel( domain, particle_positions: np.ndarray, diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index e2add7a..b88a7ff 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -429,6 +429,10 @@ class TriMesh3d: r""" Returns a copy of the `Mx3` array of vertex indices per triangle """ + def vertex_vertex_connectivity(self) -> VertexVertexConnectivity: + r""" + Returns the vertex-vertex connectivity of the mesh + """ class TriMesh3dF32: r""" @@ -614,6 +618,19 @@ class UniformGrid: """ ... +class VertexVertexConnectivity: + r""" + Vertex-vertex connectivity of a mesh + """ + def copy_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: + r""" + Returns a copy of the contained connectivity data + """ + def take_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: + r""" + Returns the contained connectivity data by moving it out of this object (zero copy) + """ + class MeshType(Enum): r""" Enum specifying the type of mesh contained in a `MeshWithData` @@ -629,7 +646,11 @@ class MeshType(Enum): def barnacle_decimation(mesh:typing.Union[TriMesh3d, MeshWithData], *, keep_vertices:builtins.bool) -> typing.Union[TriMesh3d, MeshWithData]: r""" - Decimation to prevent "barnacles" when applying weighted Laplacian smoothing + Performs specialized decimation on the given mesh to prevent "barnacles" when applying weighted Laplacian smoothing + + The decimation is performed inplace and modifies the given mesh. + Returns the vertex-vertex connectivity of the decimated mesh which can be used for other + post-processing steps. """ def check_mesh_consistency(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, check_closed:builtins.bool=True, check_manifold:builtins.bool=True, debug:builtins.bool=False) -> typing.Optional[builtins.str]: @@ -637,14 +658,33 @@ def check_mesh_consistency(mesh:typing.Union[TriMesh3d, MeshWithData], grid:Unif Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found """ -def convert_tris_to_quads(mesh:typing.Union[MixedTriQuadMesh3d, MeshWithData], *, non_squareness_limit:builtins.float=1.75, normal_angle_limit:builtins.float=10.0, max_interior_angle:builtins.float=135.0) -> typing.Union[TriMesh3d, MeshWithData]: +def convert_tris_to_quads(mesh:typing.Union[TriMesh3d, MeshWithData], *, non_squareness_limit:builtins.float=1.75, normal_angle_limit:builtins.float=10.0, max_interior_angle:builtins.float=135.0) -> typing.Union[MixedTriQuadMesh3d, MeshWithData]: r""" - Merges triangles sharing an edge to quads if they fulfill the given criteria + Converts triangles to quads by merging triangles sharing an edge if they fulfill the given criteria + + This operation creates a new mesh and does not modify the input mesh. + Angles are specified in degrees. + """ + +def laplacian_smoothing_normals_parallel(normals:numpy.typing.NDArray[typing.Any], vertex_connectivity:VertexVertexConnectivity, *, iterations:builtins.int) -> None: + r""" + Laplacian smoothing of a normal field + + The smoothing is performed inplace and modifies the given normal array. """ -def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> None: +def laplacian_smoothing_parallel(mesh:typing.Union[TriMesh3d, MeshWithData], vertex_connectivity:VertexVertexConnectivity, *, iterations:builtins.int, beta:builtins.float=1.0, weights:numpy.typing.NDArray[typing.Any]) -> None: r""" - Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren + Laplacian smoothing of mesh vertices with feature weights + + The smoothing is performed inplace and modifies the vertices of the given mesh. + """ + +def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> typing.Union[TriMesh3d, MeshWithData]: + r""" + Performs simplification on the given mesh designed for marching cubes reconstructions inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren + + The simplification is performed inplace and modifies the given mesh. """ def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> SurfaceReconstruction: diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index c3d490f..7fa20cb 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -58,22 +58,9 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap!(post_processing::marching_cubes_cleanup, m)?)?; m.add_function(wrap!(post_processing::convert_tris_to_quads, m)?)?; m.add_function(wrap!(post_processing::barnacle_decimation, m)?)?; - - m.add_function(wrap!( - post_processing::par_laplacian_smoothing_inplace_py_f32, - m - )?)?; - m.add_function(wrap_pyfunction!( - post_processing::par_laplacian_smoothing_inplace_py_f64, - m - )?)?; - - m.add_function(wrap!( - post_processing::par_laplacian_smoothing_normals_inplace_py_f32, - m - )?)?; + m.add_function(wrap!(post_processing::laplacian_smoothing_parallel, m)?)?; m.add_function(wrap!( - post_processing::par_laplacian_smoothing_normals_inplace_py_f64, + post_processing::laplacian_smoothing_normals_parallel, m )?)?; diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 131886d..5c3e7bd 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -473,6 +473,34 @@ macro_rules! create_tri_quad_mesh_interface { }; } +/// Vertex-vertex connectivity of a mesh +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "VertexVertexConnectivity")] +pub struct PyVertexVertexConnectivity { + pub(crate) connectivity: Vec>, +} + +impl PyVertexVertexConnectivity { + pub fn new(connectivity: Vec>) -> Self { + Self { connectivity } + } +} + +#[gen_stub_pymethods] +impl PyVertexVertexConnectivity { + /// Returns a copy of the contained connectivity data + pub fn copy_connectivity(&self) -> Vec> { + self.connectivity.clone() + } + + /// Returns the contained connectivity data by moving it out of this object (zero copy) + pub fn take_connectivity(&mut self) -> Vec> { + // TODO: Check if this is actually zero-copy with the conversion to Python lists + std::mem::take(&mut self.connectivity) + } +} + enum PyTriMesh3dData { F32(TriMesh3d), F64(TriMesh3d), @@ -560,6 +588,15 @@ impl PyTriMesh3d { PyTriMesh3dData::F64(mesh) => get_triangles(py, mesh.cells()), } } + + /// Returns the vertex-vertex connectivity of the mesh + pub fn vertex_vertex_connectivity(&self) -> PyVertexVertexConnectivity { + let connectivity = match &self.inner { + PyTriMesh3dData::F32(mesh) => mesh.vertex_vertex_connectivity(), + PyTriMesh3dData::F64(mesh) => mesh.vertex_vertex_connectivity(), + }; + PyVertexVertexConnectivity::new(connectivity) + } } enum PyMixedTriQuadMesh3dData { diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 69ec45e..0a5ac6c 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -1,43 +1,42 @@ -use ndarray::ArrayViewMut2; -use numpy::{PyArray2, PyArrayMethods}; +use numpy as np; +use numpy::prelude::*; +use numpy::{PyArray1, PyArray2, PyArrayMethods, PyUntypedArray}; use pyo3::IntoPyObjectExt; -use pyo3::exceptions::{PyTypeError, PyValueError}; +use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::nalgebra::Vector3; use crate::mesh::{ - MeshType, PyMeshWithData, PyMixedTriQuadMesh3d, PyTriMesh3d, TriMesh3dF32, TriMesh3dF64, - TriMeshWithDataF32, TriMeshWithDataF64, + MeshType, PyMeshWithData, PyMixedTriQuadMesh3d, PyTriMesh3d, PyVertexVertexConnectivity, }; use crate::uniform_grid::PyUniformGrid; -fn pyerr_unsupported_scalar() -> PyResult { - Err(PyTypeError::new_err( - "unsupported mesh scalar data type, only f32 and f64 are supported", - )) +fn pyerr_unsupported_scalar() -> PyErr { + PyTypeError::new_err("unsupported mesh scalar data type, only f32 and f64 are supported") } -fn pyerr_mesh_grid_scalar_mismatch() -> PyResult { - Err(PyTypeError::new_err( +fn pyerr_mesh_grid_scalar_mismatch() -> PyErr { + PyTypeError::new_err( "unsupported mesh and grid scalar data type combination, both have to be either f32 or f64", - )) + ) } -fn pyerr_only_triangle_mesh() -> PyResult { - Err(PyTypeError::new_err( - "unsupported mesh type, only triangle meshes are supported", - )) +fn pyerr_only_triangle_mesh() -> PyErr { + PyTypeError::new_err("unsupported mesh type, only triangle meshes are supported") } -/// Merges triangles sharing an edge to quads if they fulfill the given criteria +/// Converts triangles to quads by merging triangles sharing an edge if they fulfill the given criteria +/// +/// This operation creates a new mesh and does not modify the input mesh. +/// Angles are specified in degrees. #[gen_stub_pyfunction] #[pyfunction] #[pyo3(name = "convert_tris_to_quads")] #[pyo3(signature = (mesh, *, non_squareness_limit = 1.75, normal_angle_limit = 10.0, max_interior_angle = 135.0))] -#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] +#[gen_stub(override_return_type(type_repr="typing.Union[MixedTriQuadMesh3d, MeshWithData]", imports=()))] pub fn convert_tris_to_quads<'py>( - #[gen_stub(override_type(type_repr="typing.Union[MixedTriQuadMesh3d, MeshWithData]", imports=()))] + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] mesh: Bound<'py, PyAny>, non_squareness_limit: f64, normal_angle_limit: f64, @@ -67,7 +66,7 @@ pub fn convert_tris_to_quads<'py>( ); PyMixedTriQuadMesh3d::from(quad_mesh).into_bound_py_any(py) } else { - pyerr_unsupported_scalar() + Err(pyerr_unsupported_scalar()) } } else if let Ok(mesh) = mesh.downcast::() && let mesh = mesh.borrow() @@ -94,130 +93,130 @@ pub fn convert_tris_to_quads<'py>( quad_mesh.point_attributes = mesh.point_attributes.clone(); PyMeshWithData::from(quad_mesh).into_bound_py_any(py) } else { - pyerr_unsupported_scalar() + Err(pyerr_unsupported_scalar()) } } else { - pyerr_only_triangle_mesh() + Err(pyerr_only_triangle_mesh()) } } +/// Laplacian smoothing of mesh vertices with feature weights +/// +/// The smoothing is performed inplace and modifies the vertices of the given mesh. +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "par_laplacian_smoothing_inplace_f64")] -#[pyo3(signature = (mesh, vertex_connectivity, iterations, beta, weights))] -pub fn par_laplacian_smoothing_inplace_py_f64<'py>( - py: Python, - mesh: PyObject, - vertex_connectivity: Vec>, // ToDo: only take reference to data here +#[pyo3(name = "laplacian_smoothing_parallel")] +#[pyo3(signature = (mesh, vertex_connectivity, *, iterations, beta = 1.0, weights))] +pub fn laplacian_smoothing_parallel<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: &Bound<'py, PyAny>, + vertex_connectivity: &Bound<'py, PyVertexVertexConnectivity>, iterations: usize, beta: f64, - weights: Vec, // ToDo: Same here + weights: &Bound<'py, PyUntypedArray>, ) -> PyResult<()> { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.borrow_mut().inner, - &vertex_connectivity, - iterations, - beta, - &weights, - ); - Ok(()) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.borrow_mut().inner.mesh, - &vertex_connectivity, - iterations, - beta, - &weights, - ); - Ok(()) + if let Ok(mesh) = mesh.downcast::() { + let mut mesh = mesh.borrow_mut(); + if let Some(mesh) = mesh.as_f32_mut() { + let weights = weights.downcast::>()?.try_readonly()?; + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + mesh, + &vertex_connectivity.borrow().connectivity, + iterations, + beta as f32, + weights.as_slice()?, + ); + } else if let Some(mesh) = mesh.as_f64_mut() { + let weights = weights.downcast::>()?.try_readonly()?; + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + mesh, + &vertex_connectivity.borrow().connectivity, + iterations, + beta, + weights.as_slice()?, + ); + } else { + return Err(pyerr_unsupported_scalar()); + } + } else if let Ok(mesh) = mesh.downcast::() + && let mut mesh = mesh.borrow_mut() + && mesh.mesh_cell_type() == MeshType::Tri3d + { + if let Some(mesh) = mesh.as_tri_f32_mut() { + let weights = weights.downcast::>()?.try_readonly()?; + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + &mut mesh.mesh, + &vertex_connectivity.borrow().connectivity, + iterations, + beta as f32, + weights.as_slice()?, + ); + } else if let Some(mesh) = mesh.as_tri_f64_mut() { + let weights = weights.downcast::>()?.try_readonly()?; + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + &mut mesh.mesh, + &vertex_connectivity.borrow().connectivity, + iterations, + beta, + weights.as_slice()?, + ); + } else { + return Err(pyerr_unsupported_scalar()); + } } else { - Err(PyErr::new::("Invalid mesh type")) + return Err(pyerr_only_triangle_mesh()); } + + Ok(()) } +/// Laplacian smoothing of a normal field +/// +/// The smoothing is performed inplace and modifies the given normal array. +#[gen_stub_pyfunction] #[pyfunction] -#[pyo3(name = "par_laplacian_smoothing_inplace_f32")] -#[pyo3(signature = (mesh, vertex_connectivity, iterations, beta, weights))] -pub fn par_laplacian_smoothing_inplace_py_f32<'py>( - py: Python, - mesh: PyObject, - vertex_connectivity: Vec>, // ToDo: only take reference to data here +#[pyo3(name = "laplacian_smoothing_normals_parallel")] +#[pyo3(signature = (normals, vertex_connectivity, *, iterations))] +pub fn laplacian_smoothing_normals_parallel<'py>( + normals: &Bound<'py, PyUntypedArray>, + vertex_connectivity: &Bound<'py, PyVertexVertexConnectivity>, iterations: usize, - beta: f32, - weights: Vec, // ToDo: Same here ) -> PyResult<()> { - if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.borrow_mut().inner, - &vertex_connectivity, + // TODO: Avoid copy to temporary Vec + let py = normals.py(); + let element_type = normals.dtype(); + if element_type.is_equiv_to(&np::dtype::(py)) { + let mut normals = normals.downcast::>()?.try_readwrite()?; + let normals_vec3: &mut [Vector3] = bytemuck::cast_slice_mut(normals.as_slice_mut()?); + let mut normals_vec3_copy = normals_vec3.to_vec(); + splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( + &mut normals_vec3_copy, + &vertex_connectivity.borrow().connectivity, iterations, - beta, - &weights, ); - Ok(()) - } else if mesh.downcast_bound::(py).is_ok() { - let mesh = mesh.downcast_bound::(py).unwrap(); - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.borrow_mut().inner.mesh, - &vertex_connectivity, + normals_vec3.copy_from_slice(&normals_vec3_copy); + } else if element_type.is_equiv_to(&np::dtype::(py)) { + let mut normals = normals.downcast::>()?.try_readwrite()?; + let normals_vec3: &mut [Vector3] = bytemuck::cast_slice_mut(normals.as_slice_mut()?); + let mut normals_vec3_copy = normals_vec3.to_vec(); + splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( + &mut normals_vec3_copy, + &vertex_connectivity.borrow().connectivity, iterations, - beta, - &weights, ); - Ok(()) + normals_vec3.copy_from_slice(&normals_vec3_copy); } else { - Err(PyErr::new::("Invalid mesh type")) + return Err(pyerr_unsupported_scalar()); } -} - -#[pyfunction] -#[pyo3(name = "par_laplacian_smoothing_normals_inplace_f32")] -#[pyo3(signature = (normals, vertex_connectivity, iterations))] -pub fn par_laplacian_smoothing_normals_inplace_py_f32<'py>( - normals: &Bound<'py, PyArray2>, - vertex_connectivity: Vec>, - iterations: usize, -) { - let mut normals: ArrayViewMut2 = unsafe { normals.as_array_mut() }; - let mut normals_vec: Vec> = - bytemuck::cast_vec(normals.as_slice().unwrap().to_vec()); // Copies data temporarily into a vec - splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( - &mut normals_vec, - &vertex_connectivity, - iterations, - ); - normals - .as_slice_mut() - .unwrap() - .copy_from_slice(&bytemuck::cast_slice(normals_vec.as_slice())); // Copy back to numpy array -} -#[pyfunction] -#[pyo3(name = "par_laplacian_smoothing_normals_inplace_f64")] -#[pyo3(signature = (normals, vertex_connectivity, iterations))] -pub fn par_laplacian_smoothing_normals_inplace_py_f64<'py>( - normals: &Bound<'py, PyArray2>, - vertex_connectivity: Vec>, - iterations: usize, -) { - let mut normals: ArrayViewMut2 = unsafe { normals.as_array_mut() }; - let mut normals_vec: Vec> = - bytemuck::cast_vec(normals.as_slice().unwrap().to_vec()); // Copies data temporarily into a vec - splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( - &mut normals_vec, - &vertex_connectivity, - iterations, - ); - normals - .as_slice_mut() - .unwrap() - .copy_from_slice(&bytemuck::cast_slice(normals_vec.as_slice())); // Copy back to numpy array + Ok(()) } -/// Decimation to prevent "barnacles" when applying weighted Laplacian smoothing +/// Performs specialized decimation on the given mesh to prevent "barnacles" when applying weighted Laplacian smoothing +/// +/// The decimation is performed inplace and modifies the given mesh. +/// Returns the vertex-vertex connectivity of the decimated mesh which can be used for other +/// post-processing steps. #[gen_stub_pyfunction] #[pyfunction] #[pyo3(name = "barnacle_decimation")] @@ -227,42 +226,57 @@ pub fn barnacle_decimation<'py>( #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] mesh: Bound<'py, PyAny>, keep_vertices: bool, -) -> PyResult>> { +) -> PyResult { use splashsurf_lib::postprocessing::decimation; if let Ok(mesh) = mesh.downcast::() { let mut mesh = mesh.borrow_mut(); if let Some(mesh) = mesh.as_f32_mut() { - Ok(decimation(mesh, keep_vertices)) + Ok(PyVertexVertexConnectivity::new(decimation( + mesh, + keep_vertices, + ))) } else if let Some(mesh) = mesh.as_f64_mut() { - Ok(decimation(mesh, keep_vertices)) + Ok(PyVertexVertexConnectivity::new(decimation( + mesh, + keep_vertices, + ))) } else { - pyerr_unsupported_scalar() + Err(pyerr_unsupported_scalar()) } } else if let Ok(mesh) = mesh.downcast::() { let mut mesh = mesh.borrow_mut(); if let Some(mesh) = mesh.as_tri_f32_mut() { let mesh = &mut mesh.mesh; - Ok(decimation(mesh, keep_vertices)) + Ok(PyVertexVertexConnectivity::new(decimation( + mesh, + keep_vertices, + ))) } else if let Some(mesh) = mesh.as_tri_f64_mut() { let mesh = &mut mesh.mesh; - Ok(decimation(mesh, keep_vertices)) + Ok(PyVertexVertexConnectivity::new(decimation( + mesh, + keep_vertices, + ))) } else { - pyerr_unsupported_scalar() + Err(pyerr_unsupported_scalar()) } } else { - pyerr_only_triangle_mesh() + Err(pyerr_only_triangle_mesh()) } } -/// Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren +/// Performs simplification on the given mesh designed for marching cubes reconstructions inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren +/// +/// The simplification is performed inplace and modifies the given mesh. #[gen_stub_pyfunction] #[pyfunction] #[pyo3(name = "marching_cubes_cleanup")] #[pyo3(signature = (mesh, grid, *, max_rel_snap_dist = None, max_iter = 5, keep_vertices = false))] +#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] pub fn marching_cubes_cleanup<'py>( #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] - mesh: Bound<'py, PyAny>, + mesh: &Bound<'py, PyAny>, grid: &PyUniformGrid, max_rel_snap_dist: Option, max_iter: usize, @@ -278,7 +292,7 @@ pub fn marching_cubes_cleanup<'py>( } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64_mut()) { cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); } else { - return pyerr_mesh_grid_scalar_mismatch(); + return Err(pyerr_mesh_grid_scalar_mismatch()); } } else if let Ok(mesh) = mesh.downcast::() && let mut mesh = mesh.borrow_mut() @@ -291,10 +305,10 @@ pub fn marching_cubes_cleanup<'py>( let mesh = &mut mesh.mesh; cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); } else { - return pyerr_mesh_grid_scalar_mismatch(); + return Err(pyerr_mesh_grid_scalar_mismatch()); } } else { - return pyerr_only_triangle_mesh(); + return Err(pyerr_only_triangle_mesh()); } Ok(()) From 62f0953cd0520a0804ba006d1f2d513e5392cbe3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 13:02:00 +0200 Subject: [PATCH 18/63] Use slices for normal smoothing --- pysplashsurf/src/post_processing.rs | 9 ++------- splashsurf_lib/src/postprocessing.rs | 28 ++++++++++++++++++++-------- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 0a5ac6c..78039d1 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -182,29 +182,24 @@ pub fn laplacian_smoothing_normals_parallel<'py>( vertex_connectivity: &Bound<'py, PyVertexVertexConnectivity>, iterations: usize, ) -> PyResult<()> { - // TODO: Avoid copy to temporary Vec let py = normals.py(); let element_type = normals.dtype(); if element_type.is_equiv_to(&np::dtype::(py)) { let mut normals = normals.downcast::>()?.try_readwrite()?; let normals_vec3: &mut [Vector3] = bytemuck::cast_slice_mut(normals.as_slice_mut()?); - let mut normals_vec3_copy = normals_vec3.to_vec(); splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( - &mut normals_vec3_copy, + normals_vec3, &vertex_connectivity.borrow().connectivity, iterations, ); - normals_vec3.copy_from_slice(&normals_vec3_copy); } else if element_type.is_equiv_to(&np::dtype::(py)) { let mut normals = normals.downcast::>()?.try_readwrite()?; let normals_vec3: &mut [Vector3] = bytemuck::cast_slice_mut(normals.as_slice_mut()?); - let mut normals_vec3_copy = normals_vec3.to_vec(); splashsurf_lib::postprocessing::par_laplacian_smoothing_normals_inplace( - &mut normals_vec3_copy, + normals_vec3, &vertex_connectivity.borrow().connectivity, iterations, ); - normals_vec3.copy_from_slice(&normals_vec3_copy); } else { return Err(pyerr_unsupported_scalar()); } diff --git a/splashsurf_lib/src/postprocessing.rs b/splashsurf_lib/src/postprocessing.rs index e86a162..a103fe7 100644 --- a/splashsurf_lib/src/postprocessing.rs +++ b/splashsurf_lib/src/postprocessing.rs @@ -53,31 +53,43 @@ pub fn par_laplacian_smoothing_inplace( /// Laplacian smoothing of a normal field pub fn par_laplacian_smoothing_normals_inplace( - normals: &mut Vec>, + normals: &mut [Vector3], vertex_connectivity: &[Vec], iterations: usize, ) { profile!("par_laplacian_smoothing_normals_inplace"); - let mut normal_buffer = normals.clone(); + let mut normals_buffer_vec = vec![Vector3::zeros(); normals.len()]; + let mut normals_old = normals_buffer_vec.as_mut_slice(); + let mut normals_smoothed = normals; + let mut buffer_contains_output = false; for _ in 0..iterations { profile!("smoothing iteration"); - std::mem::swap(&mut normal_buffer, normals); + std::mem::swap(&mut normals_old, &mut normals_smoothed); + buffer_contains_output = !buffer_contains_output; - normals + // After the first swap, normals_smoothed points to the temporary buffer which will be used + // to store the smoothed normals below. This alternates every iteration. + + normals_smoothed .par_iter_mut() - .enumerate() - .for_each(|(i, normal_i)| { + .zip(vertex_connectivity.par_iter()) + .for_each(|(normal_i, connectivity_i)| { *normal_i = Vector3::zeros(); - for j in vertex_connectivity[i].iter().copied() { - let normal_j = normal_buffer[j]; + for j in connectivity_i.iter().copied() { + let normal_j = normals_old[j]; *normal_i += normal_j; } normal_i.normalize_mut(); }); } + + if buffer_contains_output { + // normals_smoothed points to temporary buffer, copy back to original slice + normals_old.copy_from_slice(normals_smoothed); + } } /// Mesh simplification designed for marching cubes surfaces meshes inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren From 0a5b130711cc7cd2bae511bca40f12d3f6d05646 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 13:10:18 +0200 Subject: [PATCH 19/63] Py: Refactor --- pysplashsurf/src/marching_cubes.rs | 14 ++++--------- pysplashsurf/src/post_processing.rs | 16 +-------------- pysplashsurf/src/reconstruction.rs | 9 +++----- pysplashsurf/src/utils.rs | 32 ++++++++++++++--------------- 4 files changed, 24 insertions(+), 47 deletions(-) diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index 408edeb..cb06752 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -1,6 +1,6 @@ use crate::mesh::{MeshType, PyMeshWithData, PyTriMesh3d}; use crate::uniform_grid::PyUniformGrid; -use pyo3::exceptions::PyTypeError; +use crate::utils::*; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; @@ -38,9 +38,7 @@ pub fn check_mesh_consistency<'py>( ) .err()) } else { - Err(PyTypeError::new_err( - "invalid combination of grid and mesh scalar data types", - )) + Err(pyerr_mesh_grid_scalar_mismatch()) } } else if let Ok(mesh) = mesh.downcast::() && let mesh = mesh.borrow() @@ -65,13 +63,9 @@ pub fn check_mesh_consistency<'py>( ) .err()) } else { - Err(PyTypeError::new_err( - "invalid combination of grid and mesh scalar data types", - )) + Err(pyerr_mesh_grid_scalar_mismatch()) } } else { - Err(PyTypeError::new_err( - "unsupported mesh type for consistency check, only triangle meshes are supported", - )) + Err(pyerr_only_triangle_mesh()) } } diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 78039d1..f42599e 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -2,30 +2,16 @@ use numpy as np; use numpy::prelude::*; use numpy::{PyArray1, PyArray2, PyArrayMethods, PyUntypedArray}; use pyo3::IntoPyObjectExt; -use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::nalgebra::Vector3; +use crate::utils::*; use crate::mesh::{ MeshType, PyMeshWithData, PyMixedTriQuadMesh3d, PyTriMesh3d, PyVertexVertexConnectivity, }; use crate::uniform_grid::PyUniformGrid; -fn pyerr_unsupported_scalar() -> PyErr { - PyTypeError::new_err("unsupported mesh scalar data type, only f32 and f64 are supported") -} - -fn pyerr_mesh_grid_scalar_mismatch() -> PyErr { - PyTypeError::new_err( - "unsupported mesh and grid scalar data type combination, both have to be either f32 or f64", - ) -} - -fn pyerr_only_triangle_mesh() -> PyErr { - PyTypeError::new_err("unsupported mesh type, only triangle meshes are supported") -} - /// Converts triangles to quads by merging triangles sharing an edge if they fulfill the given criteria /// /// This operation creates a new mesh and does not modify the input mesh. diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index 075d344..b6bf7bd 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -161,7 +161,7 @@ pub fn reconstruct_surface<'py>( let element_type = particles.dtype(); if element_type.is_equiv_to(&np::dtype::(py)) { - let particles = particles.downcast::>()?.readonly(); + let particles = particles.downcast::>()?.try_readonly()?; let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); let reconstruction = splashsurf_lib::reconstruct_surface::( particle_positions, @@ -172,16 +172,13 @@ pub fn reconstruct_surface<'py>( .map_err(|e| anyhow!(e))?; PySurfaceReconstruction::try_from_generic(reconstruction) } else if element_type.is_equiv_to(&np::dtype::(py)) { - let particles = particles.downcast::>()?.readonly(); + let particles = particles.downcast::>()?.try_readonly()?; let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); let reconstruction = splashsurf_lib::reconstruct_surface::(particle_positions, ¶meters) .map_err(|e| anyhow!(e))?; PySurfaceReconstruction::try_from_generic(reconstruction) } else { - Err(PyTypeError::new_err(format!( - "unsupported scalar type {} for reconstruction, only float32 and float64 are supported", - element_type - ))) + Err(pyerr_unsupported_scalar()) } } diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index 94a93d5..e6cd4aa 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -1,3 +1,17 @@ +pub(crate) fn pyerr_unsupported_scalar() -> PyErr { + PyTypeError::new_err("unsupported mesh scalar data type, only f32 and f64 are supported") +} + +pub(crate) fn pyerr_mesh_grid_scalar_mismatch() -> PyErr { + PyTypeError::new_err( + "unsupported mesh and grid scalar data type combination, both have to be either f32 or f64", + ) +} + +pub(crate) fn pyerr_only_triangle_mesh() -> PyErr { + PyTypeError::new_err("unsupported mesh type, only triangle meshes are supported") +} + macro_rules! impl_from_mesh { ($pyclass:ident, $mesh:ty => $target_enum:path) => { impl From<$mesh> for $pyclass { @@ -10,6 +24,8 @@ macro_rules! impl_from_mesh { }; } +use pyo3::exceptions::PyTypeError; +use pyo3::PyErr; pub(crate) use impl_from_mesh; /// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type @@ -27,19 +43,3 @@ pub fn transmute_take_into< None } } - -pub fn transmute_replace_into< - GenericSrc: 'static, - ConcreteSrc: Into + 'static, - Target, ->( - value: &mut GenericSrc, - replace: ConcreteSrc, -) -> Option { - if std::any::TypeId::of::() == std::any::TypeId::of::() { - let value_ref = unsafe { std::mem::transmute::<&mut GenericSrc, &mut ConcreteSrc>(value) }; - Some(std::mem::replace(value_ref, replace).into()) - } else { - None - } -} From b73fb852e3b000591827db6a99c17edda57f16e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 15:13:19 +0200 Subject: [PATCH 20/63] WIP: Signed Index type --- pysplashsurf/src/pipeline.rs | 5 +- pysplashsurf/src/post_processing.rs | 2 +- pysplashsurf/src/reconstruction.rs | 20 +++---- pysplashsurf/src/uniform_grid.rs | 14 ++--- pysplashsurf/src/utils.rs | 6 +- .../examples/minimal_sph_levelset.rs | 17 ++++-- splashsurf_lib/src/dense_subdomains.rs | 55 +++++++++++++------ splashsurf_lib/src/density_map.rs | 6 ++ splashsurf_lib/src/traits.rs | 5 +- splashsurf_lib/src/uniform_grid.rs | 3 + 10 files changed, 88 insertions(+), 45 deletions(-) diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index 6a5739e..a41fdbf 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -1,4 +1,5 @@ use crate::mesh::PyMeshWithData; +use crate::utils::IndexT; use numpy as np; use numpy::{ Element, PyArray1, PyArray2, PyArrayDescr, PyArrayDescrMethods, PyArrayMethods, @@ -155,7 +156,7 @@ pub fn reconstruction_pipeline<'py>( if element_type.is_equiv_to(&np::dtype::(py)) { let particles = particles.downcast::>()?; - let reconstruction = reconstruction_pipeline_generic_impl::( + let reconstruction = reconstruction_pipeline_generic_impl::( particles, attributes_to_interpolate, ¶meters @@ -166,7 +167,7 @@ pub fn reconstruction_pipeline<'py>( reconstruction_to_pymesh(reconstruction) } else if element_type.is_equiv_to(&np::dtype::(py)) { let particles = particles.downcast::>()?; - let reconstruction = reconstruction_pipeline_generic_impl::( + let reconstruction = reconstruction_pipeline_generic_impl::( particles, attributes_to_interpolate, ¶meters, diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index f42599e..e2b2adc 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -6,11 +6,11 @@ use pyo3::prelude::*; use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::nalgebra::Vector3; -use crate::utils::*; use crate::mesh::{ MeshType, PyMeshWithData, PyMixedTriQuadMesh3d, PyTriMesh3d, PyVertexVertexConnectivity, }; use crate::uniform_grid::PyUniformGrid; +use crate::utils::*; /// Converts triangles to quads by merging triangles sharing an edge if they fulfill the given criteria /// diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index b6bf7bd..7b262d8 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -14,8 +14,8 @@ use splashsurf_lib::{ }; enum PySurfaceReconstructionData { - F32(SurfaceReconstruction), - F64(SurfaceReconstruction), + F32(SurfaceReconstruction), + F64(SurfaceReconstruction), } /// Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data @@ -26,18 +26,18 @@ pub struct PySurfaceReconstruction { inner: PySurfaceReconstructionData, } -impl_from_mesh!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F32); -impl_from_mesh!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F64); +impl_from_mesh!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F32); +impl_from_mesh!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F64); impl PySurfaceReconstruction { pub fn try_from_generic( - mut reconstruction: SurfaceReconstruction, + mut reconstruction: SurfaceReconstruction, ) -> PyResult { - transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) + transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) .or_else(|| { - transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) + transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) }) - .ok_or_else(|| PyTypeError::new_err("unsupported type of reconstruction, only u64 for Index and f32 and f64 for Real type are supported")) + .ok_or_else(|| PyTypeError::new_err("unsupported type of reconstruction, only i64 for Index and f32 and f64 for Real type are supported")) } } @@ -163,7 +163,7 @@ pub fn reconstruct_surface<'py>( if element_type.is_equiv_to(&np::dtype::(py)) { let particles = particles.downcast::>()?.try_readonly()?; let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); - let reconstruction = splashsurf_lib::reconstruct_surface::( + let reconstruction = splashsurf_lib::reconstruct_surface::( particle_positions, ¶meters .try_convert() @@ -175,7 +175,7 @@ pub fn reconstruct_surface<'py>( let particles = particles.downcast::>()?.try_readonly()?; let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); let reconstruction = - splashsurf_lib::reconstruct_surface::(particle_positions, ¶meters) + splashsurf_lib::reconstruct_surface::(particle_positions, ¶meters) .map_err(|e| anyhow!(e))?; PySurfaceReconstruction::try_from_generic(reconstruction) } else { diff --git a/pysplashsurf/src/uniform_grid.rs b/pysplashsurf/src/uniform_grid.rs index 597fd9f..e87bb11 100644 --- a/pysplashsurf/src/uniform_grid.rs +++ b/pysplashsurf/src/uniform_grid.rs @@ -1,11 +1,11 @@ -use crate::utils::impl_from_mesh; +use crate::utils::*; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; use splashsurf_lib::UniformGrid; enum PyUniformGridData { - F32(UniformGrid), - F64(UniformGrid), + F32(UniformGrid), + F64(UniformGrid), } /// Struct containing the parameters of the uniform grid used for the surface reconstruction @@ -16,18 +16,18 @@ pub struct PyUniformGrid { inner: PyUniformGridData, } -impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); -impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); +impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); +impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); impl PyUniformGrid { - pub(crate) fn as_f32(&self) -> Option<&UniformGrid> { + pub(crate) fn as_f32(&self) -> Option<&UniformGrid> { match &self.inner { PyUniformGridData::F32(grid) => Some(grid), _ => None, } } - pub(crate) fn as_f64(&self) -> Option<&UniformGrid> { + pub(crate) fn as_f64(&self) -> Option<&UniformGrid> { match &self.inner { PyUniformGridData::F64(grid) => Some(grid), _ => None, diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index e6cd4aa..f1ef5c6 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -1,3 +1,5 @@ +pub(crate) type IndexT = i64; + pub(crate) fn pyerr_unsupported_scalar() -> PyErr { PyTypeError::new_err("unsupported mesh scalar data type, only f32 and f64 are supported") } @@ -24,9 +26,9 @@ macro_rules! impl_from_mesh { }; } -use pyo3::exceptions::PyTypeError; -use pyo3::PyErr; pub(crate) use impl_from_mesh; +use pyo3::PyErr; +use pyo3::exceptions::PyTypeError; /// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type pub fn transmute_take_into< diff --git a/splashsurf_lib/examples/minimal_sph_levelset.rs b/splashsurf_lib/examples/minimal_sph_levelset.rs index 13a767c..e6f1902 100644 --- a/splashsurf_lib/examples/minimal_sph_levelset.rs +++ b/splashsurf_lib/examples/minimal_sph_levelset.rs @@ -22,7 +22,7 @@ use thread_local::ThreadLocal; // The real type used for the reconstruction, can be changed to f64 if higher precision is needed type R = f32; // The index type used for the grid -type I = usize; +type I = i64; #[derive(Clone, Debug, clap::Parser)] struct CommandlineArgs { @@ -172,7 +172,7 @@ impl MarchingCubesLevelSet for MarchingCubesGrid { ]; if let Some(index) = self.grid.get_point(ijk) { - let index = self.grid.flatten_point_index(&index); + let index: usize = self.grid.flatten_point_index(&index).try_into().unwrap(); if self.values[index] < self.threshold { LevelSetSign::Outside } else { @@ -196,6 +196,7 @@ impl MarchingCubesLevelSet for MarchingCubesGrid { .get_point(ijk) .map(|p| self.grid.flatten_point_index(&p)) { + let index: usize = index.try_into().unwrap(); self.values[index] - self.threshold } else { f32::MIN // or some other value indicating outside @@ -278,8 +279,9 @@ fn reconstruct() -> Result<(), anyhow::Error> { densities }; - let mut function_values = vec![0.0; grid.points_per_dim().iter().product()]; - let mut function_values_vol_frac = vec![0.0; grid.points_per_dim().iter().product()]; + let n_grid_points = grid.points_per_dim().iter().product::() as usize; + let mut function_values = vec![0.0; n_grid_points]; + let mut function_values_vol_frac = vec![0.0; n_grid_points]; { profile!("evaluate_levelset_function"); @@ -326,7 +328,10 @@ fn reconstruct() -> Result<(), anyhow::Error> { let r = dx.norm(); if r <= kernel_evaluation_radius { - let index = grid.flatten_point_index(&point_index); + let index: usize = grid + .flatten_point_index(&point_index) + .try_into() + .unwrap(); //let vol = particle_rest_volume; let vol = @@ -370,7 +375,7 @@ fn reconstruct() -> Result<(), anyhow::Error> { let [ni, nj, nk] = grid.points_per_dim().clone(); - let mut points_flat = Vec::with_capacity(3 * ni * nj * nk); + let mut points_flat = Vec::with_capacity(3 * (ni * nj * nk) as usize); for i in 0..ni { for j in 0..nj { for k in 0..nk { diff --git a/splashsurf_lib/src/dense_subdomains.rs b/splashsurf_lib/src/dense_subdomains.rs index 5ad587f..cc57c87 100644 --- a/splashsurf_lib/src/dense_subdomains.rs +++ b/splashsurf_lib/src/dense_subdomains.rs @@ -19,6 +19,7 @@ use crate::neighborhood_search::{ FlatNeighborhoodList, neighborhood_search_spatial_hashing_flat_filtered, neighborhood_search_spatial_hashing_parallel, }; +use crate::topology::Direction; use crate::uniform_grid::{EdgeIndex, GridConstructionError, UniformCartesianCubeGrid3d}; use crate::{ Aabb3d, MapType, Parameters, RealConvert, SpatialDecomposition, SurfaceReconstruction, new_map, @@ -28,7 +29,7 @@ use crate::{Index, Real}; // TODO: Implement single-threaded processing -type GlobalIndex = u64; +type GlobalIndex = i64; pub(crate) struct ParametersSubdomainGrid { /// SPH particle radius (in simulation units) @@ -842,6 +843,8 @@ pub(crate) fn reconstruction( .copied() .zip(subdomain_particle_densities.iter().copied()) { + // Note: this loop assumes that enclosing_cell can return negative indices for ghost particles + // Get grid cell containing particle let particle_cell = mc_grid.enclosing_cell(&p_i); @@ -849,9 +852,9 @@ pub(crate) fn reconstruction( // We want to loop over the vertices of the enclosing cells plus all points in `cube_radius` distance from the cell let lower = [ - (particle_cell[0] - cube_radius).max(I::zero()), - (particle_cell[1] - cube_radius).max(I::zero()), - (particle_cell[2] - cube_radius).max(I::zero()), + particle_cell[0].saturating_sub(&cube_radius).max(I::zero()), + particle_cell[1].saturating_sub(&cube_radius).max(I::zero()), + particle_cell[2].saturating_sub(&cube_radius).max(I::zero()), ]; let upper = [ @@ -870,6 +873,8 @@ pub(crate) fn reconstruction( let point_ijk = [i, j, k]; let local_point = mc_grid .get_point(point_ijk) + // TODO: Can this fail if the ghost margin is too large such that upper + // falls outside of the subdomain grid? .expect("point has to be part of the subdomain grid"); //let point_coordinates = mc_grid.point_coordinates(&point); @@ -1095,6 +1100,8 @@ pub(crate) fn reconstruction( .copied() .zip(subdomain_particle_densities.iter().copied()) { + // Note: this loop assumes that enclosing_cell can return negative indices for ghost particles + // Get grid cell containing particle let particle_cell = mc_grid.enclosing_cell(&p_i); @@ -1102,9 +1109,9 @@ pub(crate) fn reconstruction( // We want to loop over the vertices of the enclosing cells plus all points in `cube_radius` distance from the cell let lower = [ - (particle_cell[0] - cube_radius).max(I::zero()), - (particle_cell[1] - cube_radius).max(I::zero()), - (particle_cell[2] - cube_radius).max(I::zero()), + particle_cell[0].saturating_sub(&cube_radius).max(I::zero()), + particle_cell[1].saturating_sub(&cube_radius).max(I::zero()), + particle_cell[2].saturating_sub(&cube_radius).max(I::zero()), ]; let upper = [ @@ -1570,7 +1577,27 @@ pub(crate) mod subdomain_classification { && is_in_ghost_margin_single_dim(z_step, 2) }; - // Loop over all 27 subdomains around and including the owning subdomain + let checked_apply_step = |index: I, step: i8| -> Option { + let direction = match step { + -1 => Some(Direction::Negative), + 0 => None, + 1 => Some(Direction::Positive), + _ => unsafe { std::hint::unreachable_unchecked() }, + }; + direction + .map(|d| d.checked_apply_step(index, I::one())) + .unwrap_or(Some(index)) + }; + + let checked_apply_step_ijk = + |ijk: [I; 3], x_step: i8, y_step: i8, z_step: i8| -> Option<[I; 3]> { + Some([ + checked_apply_step(ijk[0], x_step)?, + checked_apply_step(ijk[1], y_step)?, + checked_apply_step(ijk[2], z_step)?, + ]) + }; + for &i in &[-1, 0, 1] { for &j in &[-1, 0, 1] { for &k in &[-1, 0, 1] { @@ -1578,14 +1605,10 @@ pub(crate) mod subdomain_classification { let in_ghost_margin = is_in_ghost_margin(i, j, k); if in_ghost_margin { - let neighbor_subdomain_ijk = [ - subdomain_ijk[0] + I::from(i).unwrap(), - subdomain_ijk[1] + I::from(j).unwrap(), - subdomain_ijk[2] + I::from(k).unwrap(), - ]; - // The potential neighbor subdomain might not even be part of our computation domain - if let Some(cell) = subdomain_grid.get_cell(neighbor_subdomain_ijk) { - // If it is, it can be added as a subdomain of the particle + if let Some(neighbor_subdomain_ijk) = + checked_apply_step_ijk(subdomain_ijk, i, j, k) + && let Some(cell) = subdomain_grid.get_cell(neighbor_subdomain_ijk) + { subdomains.push(subdomain_grid.flatten_cell_index(&cell)); } } diff --git a/splashsurf_lib/src/density_map.rs b/splashsurf_lib/src/density_map.rs index 3f3ca08..41b9265 100644 --- a/splashsurf_lib/src/density_map.rs +++ b/splashsurf_lib/src/density_map.rs @@ -518,6 +518,12 @@ pub(crate) fn compute_kernel_evaluation_radius( compact_support_radius: R, cube_size: R, ) -> GridKernelExtents { + assert!( + compact_support_radius >= R::zero(), + "compact support radius must be non-negative" + ); + assert!(cube_size > R::zero(), "cube size must be positive"); + // The number of cells in each direction from a particle that can be affected by its compact support let half_supported_cells_real = (compact_support_radius / cube_size).ceil(); // Convert to index type for cell and point indexing diff --git a/splashsurf_lib/src/traits.rs b/splashsurf_lib/src/traits.rs index 5458091..28a0809 100644 --- a/splashsurf_lib/src/traits.rs +++ b/splashsurf_lib/src/traits.rs @@ -2,7 +2,8 @@ use bytemuck::Pod; use nalgebra::{RealField, SMatrix}; use num_integer::Integer; use num_traits::{ - Bounded, CheckedAdd, CheckedMul, CheckedSub, FromPrimitive, NumCast, SaturatingSub, ToPrimitive, + Bounded, CheckedAdd, CheckedMul, CheckedSub, FromPrimitive, NumCast, SaturatingSub, Signed, + ToPrimitive, }; use simba::scalar::SupersetOf; use std::fmt::{Debug, Display}; @@ -44,6 +45,7 @@ pub trait Index: Copy + Hash + Integer + + Signed + Bounded + CheckedAdd + CheckedSub @@ -153,6 +155,7 @@ impl Index for I where I: Copy + Hash + Integer + + Signed + Bounded + CheckedAdd + CheckedSub diff --git a/splashsurf_lib/src/uniform_grid.rs b/splashsurf_lib/src/uniform_grid.rs index 8781628..eb4d3be 100644 --- a/splashsurf_lib/src/uniform_grid.rs +++ b/splashsurf_lib/src/uniform_grid.rs @@ -435,6 +435,9 @@ impl UniformCartesianCubeGrid3d { } /// Returns the grid cell index triplet of the cell enclosing a point with the given coordinates in space + /// + /// Note that this function does not check if the point is part of the grid and thus might also + /// return negative indices or indices larger than the number of cells per dimension. #[inline(always)] pub fn enclosing_cell(&self, coord: &Vector3) -> [I; 3] { let normalized_coord = (coord - self.aabb.min()) / self.cell_size; From 03d7ed139e5215ee6f45636a30a301228a7751fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 16:05:22 +0200 Subject: [PATCH 21/63] Py: Use views for mesh vertices & triangles --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 24 +++--- pysplashsurf/src/mesh.rs | 95 ++++++++++++---------- 2 files changed, 66 insertions(+), 53 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index b88a7ff..d22cd7a 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -206,13 +206,14 @@ class MeshWithData: r""" Returns the type of the underlying mesh """ - def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: + @property + def vertices(self) -> numpy.typing.NDArray[typing.Any]: r""" - Returns a copy of the contained mesh without associated data and attributes + The `Nx3` array of vertex positions of the mesh """ - def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: + def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" - Returns a copy of the `Nx3` array of vertex positions + Returns a copy of the contained mesh without associated data and attributes """ class MixedTriQuadMesh3d: @@ -221,9 +222,10 @@ class MixedTriQuadMesh3d: r""" Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ - def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: + @property + def vertices(self) -> numpy.typing.NDArray[typing.Any]: r""" - Returns a copy of the `Nx3` array of vertex positions + The `Nx3` array of vertex positions of the mesh """ class MixedTriQuadMesh3dF32: @@ -421,13 +423,15 @@ class TriMesh3d: r""" Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ - def copy_vertices(self) -> numpy.typing.NDArray[typing.Any]: + @property + def vertices(self) -> numpy.typing.NDArray[typing.Any]: r""" - Returns a copy of the `Nx3` array of vertex positions + The `Nx3` array of vertex positions of the mesh """ - def copy_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: + @property + def triangles(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Returns a copy of the `Mx3` array of vertex indices per triangle + The `Mx3` array of vertex indices per triangle """ def vertex_vertex_connectivity(self) -> VertexVertexConnectivity: r""" diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 5c3e7bd..30b8db6 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -69,25 +69,29 @@ fn add_attribute_with_name<'py, R: Real + Element>( } } -fn get_vertices<'py, R: Real + Element>( - py: Python<'py>, +fn get_vertices_generic<'py, R: Real + Element>( vertices: &[Vector3], -) -> PyResult>> { + container: Bound<'py, PyAny>, +) -> PyResult> { let coordinates: &[R] = bytemuck::cast_slice(vertices); - let vertices: ArrayView2 = + let array: ArrayView2 = ArrayView::from_shape((vertices.len(), 3), coordinates).map_err(anyhow::Error::new)?; - // Seems like at least one copy is necessary here (to_pyarray copies the data) - Ok(vertices.to_pyarray(py)) + let pyarray = unsafe { PyArray2::borrow_from_array(&array, container) }; + Ok(pyarray + .into_any() + .downcast_into::() + .expect("downcast should not fail")) } -fn get_triangles<'py>( - py: Python<'py>, +fn get_triangles_generic<'py>( triangles: &[TriangleCell], + container: Bound<'py, PyAny>, ) -> PyResult>> { let vertex_indices: &[NumpyUsize] = bytemuck::cast_slice(triangles); - let triangles: ArrayView2 = + let array: ArrayView2 = ArrayView::from_shape((triangles.len(), 3), vertex_indices).map_err(anyhow::Error::new)?; - Ok(triangles.to_pyarray(py)) + let pyarray = unsafe { PyArray2::borrow_from_array(&array, container) }; + Ok(pyarray) } macro_rules! create_mesh_data_interface { @@ -568,24 +572,21 @@ impl PyTriMesh3d { } } - /// Returns a copy of the `Nx3` array of vertex positions - pub fn copy_vertices<'py>(&self, py: Python<'py>) -> PyResult> { - match &self.inner { - PyTriMesh3dData::F32(mesh) => get_vertices(py, mesh.vertices()) - .map(|v| v.into_any().downcast_into::().unwrap()), - PyTriMesh3dData::F64(mesh) => get_vertices(py, mesh.vertices()) - .map(|v| v.into_any().downcast_into::().unwrap()), + /// The `Nx3` array of vertex positions of the mesh + #[getter] + pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { + match &this.borrow().inner { + PyTriMesh3dData::F32(mesh) => get_vertices_generic(mesh.vertices(), this.into_any()), + PyTriMesh3dData::F64(mesh) => get_vertices_generic(mesh.vertices(), this.into_any()), } } - /// Returns a copy of the `Mx3` array of vertex indices per triangle - pub fn copy_triangles<'py>( - &self, - py: Python<'py>, - ) -> PyResult>> { - match &self.inner { - PyTriMesh3dData::F32(mesh) => get_triangles(py, mesh.cells()), - PyTriMesh3dData::F64(mesh) => get_triangles(py, mesh.cells()), + /// The `Mx3` array of vertex indices per triangle + #[getter] + pub fn triangles<'py>(this: Bound<'py, Self>) -> PyResult>> { + match &this.borrow().inner { + PyTriMesh3dData::F32(mesh) => get_triangles_generic(mesh.cells(), this.into_any()), + PyTriMesh3dData::F64(mesh) => get_triangles_generic(mesh.cells(), this.into_any()), } } @@ -638,13 +639,16 @@ impl PyMixedTriQuadMesh3d { } } - /// Returns a copy of the `Nx3` array of vertex positions - pub fn copy_vertices<'py>(&self, py: Python<'py>) -> PyResult> { - match &self.inner { - PyMixedTriQuadMesh3dData::F32(mesh) => get_vertices(py, mesh.vertices()) - .map(|v| v.into_any().downcast_into::().unwrap()), - PyMixedTriQuadMesh3dData::F64(mesh) => get_vertices(py, mesh.vertices()) - .map(|v| v.into_any().downcast_into::().unwrap()), + /// The `Nx3` array of vertex positions of the mesh + #[getter] + pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { + match &this.borrow().inner { + PyMixedTriQuadMesh3dData::F32(mesh) => { + get_vertices_generic(mesh.vertices(), this.into_any()) + } + PyMixedTriQuadMesh3dData::F64(mesh) => { + get_vertices_generic(mesh.vertices(), this.into_any()) + } } } } @@ -767,17 +771,22 @@ impl PyMeshWithData { } } - /// Returns a copy of the `Nx3` array of vertex positions - pub fn copy_vertices<'py>(&self, py: Python<'py>) -> PyResult> { - match &self.inner { - PyMeshWithDataData::Tri3dF32(mesh) => get_vertices(py, mesh.mesh.vertices()) - .map(|v| v.into_any().downcast_into::().unwrap()), - PyMeshWithDataData::Tri3dF64(mesh) => get_vertices(py, mesh.mesh.vertices()) - .map(|v| v.into_any().downcast_into::().unwrap()), - PyMeshWithDataData::MixedTriQuadF32(mesh) => get_vertices(py, mesh.mesh.vertices()) - .map(|v| v.into_any().downcast_into::().unwrap()), - PyMeshWithDataData::MixedTriQuadF64(mesh) => get_vertices(py, mesh.mesh.vertices()) - .map(|v| v.into_any().downcast_into::().unwrap()), + /// The `Nx3` array of vertex positions of the mesh + #[getter] + pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { + match &this.borrow().inner { + PyMeshWithDataData::Tri3dF32(mesh) => { + get_vertices_generic(mesh.vertices(), this.into_any()) + } + PyMeshWithDataData::Tri3dF64(mesh) => { + get_vertices_generic(mesh.vertices(), this.into_any()) + } + PyMeshWithDataData::MixedTriQuadF32(mesh) => { + get_vertices_generic(mesh.vertices(), this.into_any()) + } + PyMeshWithDataData::MixedTriQuadF64(mesh) => { + get_vertices_generic(mesh.vertices(), this.into_any()) + } } } } From 032804fdf46de004507849637ee756fec0b91b9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Thu, 28 Aug 2025 17:21:14 +0200 Subject: [PATCH 22/63] Implement normals --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 6 ++++- pysplashsurf/src/lib.rs | 3 ++- pysplashsurf/src/mesh.rs | 27 +++++++++++++++++++++- 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index d22cd7a..7e320af 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -433,9 +433,13 @@ class TriMesh3d: r""" The `Mx3` array of vertex indices per triangle """ + def vertex_normals_parallel(self) -> numpy.typing.NDArray[typing.Any]: + r""" + Computes the vertex normals of the mesh using an area weighted average of the adjacent triangle faces + """ def vertex_vertex_connectivity(self) -> VertexVertexConnectivity: r""" - Returns the vertex-vertex connectivity of the mesh + Computes the vertex-vertex connectivity of the mesh """ class TriMesh3dF32: diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 7fa20cb..30274a4 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -41,6 +41,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; m.add_class::()?; + m.add_class::()?; m.add_class::()?; m.add_class::()?; @@ -85,7 +86,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { fn run_splashsurf_py<'py>(args: Bound<'py, PyList>) -> PyResult<()> { cli::run_splashsurf(args.iter().map(|arg| { arg.downcast::() - .expect("Argument wasn't a string") + .expect("argument wasn't a string") .extract::() .unwrap() }))?; diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 30b8db6..b7cd412 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -94,6 +94,20 @@ fn get_triangles_generic<'py>( Ok(pyarray) } +fn compute_normals_generic<'py, R: Real + Element>( + py: Python<'py>, + mesh: &TriMesh3d, +) -> PyResult> { + let normals_vec = mesh.par_vertex_normals(); + let normals_vec = bytemuck::allocation::cast_vec::>, R>(normals_vec); + + Ok(PyArray::from_vec(py, normals_vec) + .reshape([mesh.vertices().len(), 3])? + .into_any() + .downcast_into::() + .expect("downcast should not fail")) +} + macro_rules! create_mesh_data_interface { ($name: ident, $type: ident, $mesh_class: ident, $pymesh_class: ident, $aabb_class: ident) => { /// MeshWithData wrapper @@ -590,7 +604,18 @@ impl PyTriMesh3d { } } - /// Returns the vertex-vertex connectivity of the mesh + /// Computes the vertex normals of the mesh using an area weighted average of the adjacent triangle faces + pub fn vertex_normals_parallel<'py>( + &self, + py: Python<'py>, + ) -> PyResult> { + match &self.inner { + PyTriMesh3dData::F32(mesh) => compute_normals_generic(py, mesh), + PyTriMesh3dData::F64(mesh) => compute_normals_generic(py, mesh), + } + } + + /// Computes the vertex-vertex connectivity of the mesh pub fn vertex_vertex_connectivity(&self) -> PyVertexVertexConnectivity { let connectivity = match &self.inner { PyTriMesh3dData::F32(mesh) => mesh.vertex_vertex_connectivity(), From 93a45319e79c13829ca1d9fd95c89a49a226794b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Fri, 29 Aug 2025 13:16:20 +0200 Subject: [PATCH 23/63] Py: Refactor MeshWithData wrapper --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 8 +- pysplashsurf/src/marching_cubes.rs | 76 +++----- pysplashsurf/src/mesh.rs | 215 ++++++++++++--------- pysplashsurf/src/pipeline.rs | 21 +- pysplashsurf/src/post_processing.rs | 206 +++++++------------- pysplashsurf/src/utils.rs | 22 ++- 6 files changed, 248 insertions(+), 300 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 7e320af..23a55d0 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -202,15 +202,17 @@ class MeshWithData: Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ @property - def mesh_cell_type(self) -> MeshType: + def mesh_type(self) -> MeshType: r""" Returns the type of the underlying mesh """ @property - def vertices(self) -> numpy.typing.NDArray[typing.Any]: + def mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" - The `Nx3` array of vertex positions of the mesh + The contained mesh without associated data and attributes """ + def as_tri3d(self) -> typing.Optional[TriMesh3d]: ... + def as_mixed_tri_quad3d(self) -> typing.Optional[MixedTriQuadMesh3d]: ... def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" Returns a copy of the contained mesh without associated data and attributes diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index cb06752..9a2b650 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -1,4 +1,4 @@ -use crate::mesh::{MeshType, PyMeshWithData, PyTriMesh3d}; +use crate::mesh::get_triangle_mesh_generic; use crate::uniform_grid::PyUniformGrid; use crate::utils::*; use pyo3::prelude::*; @@ -17,55 +17,31 @@ pub fn check_mesh_consistency<'py>( check_manifold: bool, debug: bool, ) -> PyResult> { - if let Ok(mesh) = mesh.downcast::() { - let mesh = mesh.borrow(); - if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32()) { - Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( - grid, - mesh, - check_closed, - check_manifold, - debug, - ) - .err()) - } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64()) { - Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( - grid, - mesh, - check_closed, - check_manifold, - debug, - ) - .err()) - } else { - Err(pyerr_mesh_grid_scalar_mismatch()) - } - } else if let Ok(mesh) = mesh.downcast::() - && let mesh = mesh.borrow() - && mesh.mesh_cell_type() == MeshType::Tri3d - { - if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_tri_f32()) { - Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( - grid, - &mesh.mesh, - check_closed, - check_manifold, - debug, - ) - .err()) - } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_tri_f64()) { - Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( - grid, - &mesh.mesh, - check_closed, - check_manifold, - debug, - ) - .err()) - } else { - Err(pyerr_mesh_grid_scalar_mismatch()) - } + let py = mesh.py(); + + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mesh = mesh.borrow(py); + + if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32()) { + Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( + grid, + mesh, + check_closed, + check_manifold, + debug, + ) + .err()) + } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64()) { + Ok(splashsurf_lib::marching_cubes::check_mesh_consistency( + grid, + mesh, + check_closed, + check_manifold, + debug, + ) + .err()) } else { - Err(pyerr_only_triangle_mesh()) + Err(pyerr_mesh_grid_scalar_mismatch()) } } diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index b7cd412..1becd2e 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -108,6 +108,19 @@ fn compute_normals_generic<'py, R: Real + Element>( .expect("downcast should not fail")) } +pub fn get_triangle_mesh_generic<'py>(mesh: &Bound<'py, PyAny>) -> Option> { + let py = mesh.py(); + if let Ok(mesh) = mesh.downcast::() { + Some(mesh.as_unbound().clone_ref(py)) + } else if let Ok(data_mesh) = mesh.downcast::() + && data_mesh.borrow().mesh_type() == MeshType::Tri3d + { + data_mesh.borrow().as_tri3d(py) + } else { + None + } +} + macro_rules! create_mesh_data_interface { ($name: ident, $type: ident, $mesh_class: ident, $pymesh_class: ident, $aabb_class: ident) => { /// MeshWithData wrapper @@ -519,6 +532,7 @@ impl PyVertexVertexConnectivity { } } +#[derive(Clone)] enum PyTriMesh3dData { F32(TriMesh3d), F64(TriMesh3d), @@ -527,6 +541,7 @@ enum PyTriMesh3dData { #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "TriMesh3d")] +#[derive(Clone)] pub struct PyTriMesh3d { inner: PyTriMesh3dData, } @@ -534,6 +549,14 @@ pub struct PyTriMesh3d { impl_from_mesh!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F32); impl_from_mesh!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F64); +impl Default for PyTriMesh3d { + fn default() -> Self { + Self { + inner: PyTriMesh3dData::F32(TriMesh3d::default()), + } + } +} + impl PyTriMesh3d { pub fn try_from_generic(mut mesh: TriMesh3d) -> PyResult { transmute_take_into::<_, TriMesh3d, _>(&mut mesh) @@ -625,6 +648,7 @@ impl PyTriMesh3d { } } +#[derive(Clone)] enum PyMixedTriQuadMesh3dData { F32(MixedTriQuadMesh3d), F64(MixedTriQuadMesh3d), @@ -633,6 +657,7 @@ enum PyMixedTriQuadMesh3dData { #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "MixedTriQuadMesh3d")] +#[derive(Clone)] pub struct PyMixedTriQuadMesh3d { inner: PyMixedTriQuadMesh3dData, } @@ -678,20 +703,6 @@ impl PyMixedTriQuadMesh3d { } } -enum PyMeshWithDataData { - Tri3dF32(MeshWithData>), - Tri3dF64(MeshWithData>), - MixedTriQuadF32(MeshWithData>), - MixedTriQuadF64(MeshWithData>), -} - -#[gen_stub_pyclass] -#[pyclass] -#[pyo3(name = "MeshWithData")] -pub struct PyMeshWithData { - inner: PyMeshWithDataData, -} - /// Enum specifying the type of mesh contained in a `MeshWithData` #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[gen_stub_pyclass_enum] @@ -703,49 +714,74 @@ pub enum MeshType { MixedTriQuad3d, } -impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::Tri3dF32); -impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::Tri3dF64); -impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::MixedTriQuadF32); -impl_from_mesh!(PyMeshWithData, MeshWithData> => PyMeshWithDataData::MixedTriQuadF64); +pub enum PyMesh3dData { + Tri3d(Py), + MixedTriQuad3d(Py), +} -impl PyMeshWithData { - pub fn try_from_generic + 'static>( - mut mesh: MeshWithData, - ) -> PyResult { - transmute_take_into::<_, MeshWithData>, _>(&mut mesh) - .or_else(|| transmute_take_into::<_, MeshWithData>, _>(&mut mesh)) - .or_else(|| transmute_take_into::<_, MeshWithData>, _>(&mut mesh)) - .or_else(|| transmute_take_into::<_, MeshWithData>, _>(&mut mesh)) - .ok_or_else(|| PyTypeError::new_err( - "Unsupported mesh type for MeshWithData. Only TriMesh3d and MixedTriQuadMesh3d with f32 or f64 scalar types are supported.", - )) - } +enum_impl_from!(PyMesh3dData, Py => PyMesh3dData::Tri3d); +enum_impl_from!(PyMesh3dData, Py => PyMesh3dData::MixedTriQuad3d); - pub fn as_tri_f32(&self) -> Option<&MeshWithData>> { - match &self.inner { - PyMeshWithDataData::Tri3dF32(mesh) => Some(mesh), - _ => None, - } - } +enum PyMeshAttribute { + F32(OwnedMeshAttribute), + F64(OwnedMeshAttribute), +} - pub fn as_tri_f64(&self) -> Option<&MeshWithData>> { - match &self.inner { - PyMeshWithDataData::Tri3dF64(mesh) => Some(mesh), - _ => None, - } - } +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "MeshWithData")] +pub struct PyMeshWithData { + mesh: PyMesh3dData, + point_attributes: Vec, + cell_attributes: Vec, +} - pub fn as_tri_f32_mut(&mut self) -> Option<&mut MeshWithData>> { - match &mut self.inner { - PyMeshWithDataData::Tri3dF32(mesh) => Some(mesh), - _ => None, - } +impl PyMeshWithData { + /// Constructs a new mesh with data from an existing mesh object (either `TriMesh3d` or `MixedTriQuadMesh3d`) + pub fn try_from_pymesh<'py, T>(py: Python<'py>, mesh: T) -> PyResult + where + T: IntoPyObject<'py>, + T::Output: Into>, + Py: Into, + PyErr: From, + { + let mesh_bound = mesh.into_pyobject(py)?; + let mesh_py: Py = mesh_bound.into(); + let mesh: PyMesh3dData = mesh_py.into(); + + Ok(Self { + mesh, + point_attributes: vec![], + cell_attributes: vec![], + }) } - pub fn as_tri_f64_mut(&mut self) -> Option<&mut MeshWithData>> { - match &mut self.inner { - PyMeshWithDataData::Tri3dF64(mesh) => Some(mesh), - _ => None, + pub fn try_from_mesh_with_data<'py, R: Real + Element, M: Mesh3d + 'static>( + py: Python<'_>, + mesh_with_data: MeshWithData, + ) -> PyResult { + // Deconstruct the input mesh + let MeshWithData { + mut mesh, + point_attributes: _, + cell_attributes: _, + } = mesh_with_data; + + // TODO: Convert attributes + + use std::any::TypeId; + if TypeId::of::() == TypeId::of::>() { + let mesh_ref = unsafe { std::mem::transmute::<&mut M, &mut TriMesh3d>(&mut mesh) }; + let mesh = std::mem::take(mesh_ref); + let tri_mesh = PyTriMesh3d::try_from_generic(mesh)?; + Self::try_from_pymesh(py, tri_mesh) + } else if TypeId::of::() == TypeId::of::>() { + let mesh_ref = unsafe { std::mem::transmute::<&mut M, &mut TriMesh3d>(&mut mesh) }; + let mesh = std::mem::take(mesh_ref); + let tri_mesh = PyTriMesh3d::try_from_generic(mesh)?; + Self::try_from_pymesh(py, tri_mesh) + } else { + Err(pyerr_only_tri_and_tri_quad_mesh()) } } } @@ -756,62 +792,51 @@ impl PyMeshWithData { /// Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { - match &self.inner { - PyMeshWithDataData::Tri3dF32(_) | PyMeshWithDataData::MixedTriQuadF32(_) => { - np::dtype::(py) - } - PyMeshWithDataData::Tri3dF64(_) | PyMeshWithDataData::MixedTriQuadF64(_) => { - np::dtype::(py) - } + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => mesh.borrow(py).dtype(py), + PyMesh3dData::MixedTriQuad3d(mesh) => mesh.borrow(py).dtype(py), } } /// Returns the type of the underlying mesh #[getter] - pub fn mesh_cell_type(&self) -> MeshType { - match &self.inner { - PyMeshWithDataData::Tri3dF32(_) | PyMeshWithDataData::Tri3dF64(_) => MeshType::Tri3d, - PyMeshWithDataData::MixedTriQuadF32(_) | PyMeshWithDataData::MixedTriQuadF64(_) => { - MeshType::MixedTriQuad3d - } + pub fn mesh_type(&self) -> MeshType { + match &self.mesh { + PyMesh3dData::Tri3d(_) => MeshType::Tri3d, + PyMesh3dData::MixedTriQuad3d(_) => MeshType::MixedTriQuad3d, } } - /// Returns a copy of the contained mesh without associated data and attributes - #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] - pub fn copy_mesh<'py>(&self, py: Python<'py>) -> PyResult> { - match &self.inner { - PyMeshWithDataData::Tri3dF32(mesh) => { - PyTriMesh3d::from(mesh.mesh.clone()).into_bound_py_any(py) - } - PyMeshWithDataData::Tri3dF64(mesh) => { - PyTriMesh3d::from(mesh.mesh.clone()).into_bound_py_any(py) - } - PyMeshWithDataData::MixedTriQuadF32(mesh) => { - PyMixedTriQuadMesh3d::from(mesh.mesh.clone()).into_bound_py_any(py) - } - PyMeshWithDataData::MixedTriQuadF64(mesh) => { - PyMixedTriQuadMesh3d::from(mesh.mesh.clone()).into_bound_py_any(py) - } + pub fn as_tri3d<'py, 'a>(&'a self, py: Python<'py>) -> Option> { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => Some(mesh.clone_ref(py)), + _ => None, } } - /// The `Nx3` array of vertex positions of the mesh + pub fn as_mixed_tri_quad3d<'py>(&self, py: Python<'py>) -> Option> { + match &self.mesh { + PyMesh3dData::MixedTriQuad3d(mesh) => Some(mesh.clone_ref(py)), + _ => None, + } + } + + /// The contained mesh without associated data and attributes #[getter] - pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { - match &this.borrow().inner { - PyMeshWithDataData::Tri3dF32(mesh) => { - get_vertices_generic(mesh.vertices(), this.into_any()) - } - PyMeshWithDataData::Tri3dF64(mesh) => { - get_vertices_generic(mesh.vertices(), this.into_any()) - } - PyMeshWithDataData::MixedTriQuadF32(mesh) => { - get_vertices_generic(mesh.vertices(), this.into_any()) - } - PyMeshWithDataData::MixedTriQuadF64(mesh) => { - get_vertices_generic(mesh.vertices(), this.into_any()) - } + #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] + pub fn mesh<'py>(&self, py: Python<'py>) -> Py { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => mesh.clone_ref(py).into_any(), + PyMesh3dData::MixedTriQuad3d(mesh) => mesh.clone_ref(py).into_any(), + } + } + + /// Returns a copy of the contained mesh without associated data and attributes + #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] + pub fn copy_mesh<'py>(&self, py: Python<'py>) -> PyResult> { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => mesh.borrow(py).clone().into_bound_py_any(py), + PyMesh3dData::MixedTriQuad3d(mesh) => mesh.borrow(py).clone().into_bound_py_any(py), } } } diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index a41fdbf..58a4a17 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -1,11 +1,11 @@ use crate::mesh::PyMeshWithData; -use crate::utils::IndexT; +use crate::utils::{IndexT, pyerr_unsupported_scalar}; use numpy as np; use numpy::{ Element, PyArray1, PyArray2, PyArrayDescr, PyArrayDescrMethods, PyArrayMethods, PyReadonlyArray1, PyReadonlyArray2, PyUntypedArray, PyUntypedArrayMethods, }; -use pyo3::exceptions::{PyRuntimeError, PyTypeError}; +use pyo3::exceptions::PyRuntimeError; use pyo3::{ prelude::*, types::{PyDict, PyString}, @@ -140,13 +140,15 @@ pub fn reconstruction_pipeline<'py>( mesh_aabb_clamp_vertices, }; - fn reconstruction_to_pymesh( + // TODO: Support transfer of attributes + fn reconstruction_to_pymesh<'py, I: Index, R: Real + Element>( + py: Python<'py>, reconstruction: splashsurf::reconstruct::ReconstructionResult, ) -> PyResult { if let Some(tri_mesh) = reconstruction.tri_mesh { - PyMeshWithData::try_from_generic(tri_mesh) + PyMeshWithData::try_from_mesh_with_data(py, tri_mesh) } else if let Some(tri_quad_mesh) = reconstruction.tri_quad_mesh { - PyMeshWithData::try_from_generic(tri_quad_mesh) + PyMeshWithData::try_from_mesh_with_data(py, tri_quad_mesh) } else { Err(PyRuntimeError::new_err( "Reconstruction resulted in no mesh", @@ -164,7 +166,7 @@ pub fn reconstruction_pipeline<'py>( .expect("failed to convert reconstruction parameters to f32"), &postprocessing_args, )?; - reconstruction_to_pymesh(reconstruction) + reconstruction_to_pymesh(py, reconstruction) } else if element_type.is_equiv_to(&np::dtype::(py)) { let particles = particles.downcast::>()?; let reconstruction = reconstruction_pipeline_generic_impl::( @@ -173,12 +175,9 @@ pub fn reconstruction_pipeline<'py>( ¶meters, &postprocessing_args, )?; - reconstruction_to_pymesh(reconstruction) + reconstruction_to_pymesh(py, reconstruction) } else { - Err(PyTypeError::new_err(format!( - "Unsupported scalar type {} for reconstruction, only float32 and float64 are supported", - element_type - ))) + Err(pyerr_unsupported_scalar()) } } diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index e2b2adc..7fb5f81 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -7,7 +7,7 @@ use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::nalgebra::Vector3; use crate::mesh::{ - MeshType, PyMeshWithData, PyMixedTriQuadMesh3d, PyTriMesh3d, PyVertexVertexConnectivity, + PyMeshWithData, PyMixedTriQuadMesh3d, PyVertexVertexConnectivity, get_triangle_mesh_generic, }; use crate::uniform_grid::PyUniformGrid; use crate::utils::*; @@ -33,8 +33,11 @@ pub fn convert_tris_to_quads<'py>( let normal_angle_limit = normal_angle_limit.to_radians(); let max_interior_angle = max_interior_angle.to_radians(); - if let Ok(mesh) = mesh.downcast::() { - let mesh = mesh.borrow(); + let quad_mesh = { + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mesh = mesh.borrow(py); + if let Some(mesh) = mesh.as_f32() { let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( mesh, @@ -42,7 +45,7 @@ pub fn convert_tris_to_quads<'py>( normal_angle_limit as f32, max_interior_angle as f32, ); - PyMixedTriQuadMesh3d::from(quad_mesh).into_bound_py_any(py) + Ok(PyMixedTriQuadMesh3d::from(quad_mesh)) } else if let Some(mesh) = mesh.as_f64() { let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( mesh, @@ -50,39 +53,18 @@ pub fn convert_tris_to_quads<'py>( normal_angle_limit, max_interior_angle, ); - PyMixedTriQuadMesh3d::from(quad_mesh).into_bound_py_any(py) - } else { - Err(pyerr_unsupported_scalar()) - } - } else if let Ok(mesh) = mesh.downcast::() - && let mesh = mesh.borrow() - && mesh.mesh_cell_type() == MeshType::Tri3d - { - if let Some(mesh) = mesh.as_tri_f32() { - let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.mesh, - non_squareness_limit as f32, - normal_angle_limit as f32, - max_interior_angle as f32, - ); - let mut quad_mesh = splashsurf_lib::mesh::MeshWithData::new(quad_mesh); - quad_mesh.point_attributes = mesh.point_attributes.clone(); - PyMeshWithData::from(quad_mesh).into_bound_py_any(py) - } else if let Some(mesh) = mesh.as_tri_f64() { - let quad_mesh = splashsurf_lib::postprocessing::convert_tris_to_quads( - &mesh.mesh, - non_squareness_limit, - normal_angle_limit, - max_interior_angle, - ); - let mut quad_mesh = splashsurf_lib::mesh::MeshWithData::new(quad_mesh); - quad_mesh.point_attributes = mesh.point_attributes.clone(); - PyMeshWithData::from(quad_mesh).into_bound_py_any(py) + Ok(PyMixedTriQuadMesh3d::from(quad_mesh)) } else { Err(pyerr_unsupported_scalar()) } + }?; + + if let Ok(mesh) = mesh.downcast::() { + let mut data_mesh = PyMeshWithData::try_from_pymesh(py, quad_mesh)?; + // TODO: transfer of point attributes not implemented yet + unimplemented!("transfer of point attributes not implemented yet"); } else { - Err(pyerr_only_triangle_mesh()) + quad_mesh.into_bound_py_any(py) } } @@ -101,56 +83,32 @@ pub fn laplacian_smoothing_parallel<'py>( beta: f64, weights: &Bound<'py, PyUntypedArray>, ) -> PyResult<()> { - if let Ok(mesh) = mesh.downcast::() { - let mut mesh = mesh.borrow_mut(); - if let Some(mesh) = mesh.as_f32_mut() { - let weights = weights.downcast::>()?.try_readonly()?; - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - mesh, - &vertex_connectivity.borrow().connectivity, - iterations, - beta as f32, - weights.as_slice()?, - ); - } else if let Some(mesh) = mesh.as_f64_mut() { - let weights = weights.downcast::>()?.try_readonly()?; - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - mesh, - &vertex_connectivity.borrow().connectivity, - iterations, - beta, - weights.as_slice()?, - ); - } else { - return Err(pyerr_unsupported_scalar()); - } - } else if let Ok(mesh) = mesh.downcast::() - && let mut mesh = mesh.borrow_mut() - && mesh.mesh_cell_type() == MeshType::Tri3d - { - if let Some(mesh) = mesh.as_tri_f32_mut() { - let weights = weights.downcast::>()?.try_readonly()?; - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.mesh, - &vertex_connectivity.borrow().connectivity, - iterations, - beta as f32, - weights.as_slice()?, - ); - } else if let Some(mesh) = mesh.as_tri_f64_mut() { - let weights = weights.downcast::>()?.try_readonly()?; - splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( - &mut mesh.mesh, - &vertex_connectivity.borrow().connectivity, - iterations, - beta, - weights.as_slice()?, - ); - } else { - return Err(pyerr_unsupported_scalar()); - } + let py = mesh.py(); + + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mut mesh = mesh.borrow_mut(py); + + if let Some(mesh) = mesh.as_f32_mut() { + let weights = weights.downcast::>()?.try_readonly()?; + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + mesh, + &vertex_connectivity.borrow().connectivity, + iterations, + beta as f32, + weights.as_slice()?, + ); + } else if let Some(mesh) = mesh.as_f64_mut() { + let weights = weights.downcast::>()?.try_readonly()?; + splashsurf_lib::postprocessing::par_laplacian_smoothing_inplace( + mesh, + &vertex_connectivity.borrow().connectivity, + iterations, + beta, + weights.as_slice()?, + ); } else { - return Err(pyerr_only_triangle_mesh()); + return Err(pyerr_unsupported_scalar()); } Ok(()) @@ -209,41 +167,24 @@ pub fn barnacle_decimation<'py>( keep_vertices: bool, ) -> PyResult { use splashsurf_lib::postprocessing::decimation; + let py = mesh.py(); - if let Ok(mesh) = mesh.downcast::() { - let mut mesh = mesh.borrow_mut(); - if let Some(mesh) = mesh.as_f32_mut() { - Ok(PyVertexVertexConnectivity::new(decimation( - mesh, - keep_vertices, - ))) - } else if let Some(mesh) = mesh.as_f64_mut() { - Ok(PyVertexVertexConnectivity::new(decimation( - mesh, - keep_vertices, - ))) - } else { - Err(pyerr_unsupported_scalar()) - } - } else if let Ok(mesh) = mesh.downcast::() { - let mut mesh = mesh.borrow_mut(); - if let Some(mesh) = mesh.as_tri_f32_mut() { - let mesh = &mut mesh.mesh; - Ok(PyVertexVertexConnectivity::new(decimation( - mesh, - keep_vertices, - ))) - } else if let Some(mesh) = mesh.as_tri_f64_mut() { - let mesh = &mut mesh.mesh; - Ok(PyVertexVertexConnectivity::new(decimation( - mesh, - keep_vertices, - ))) - } else { - Err(pyerr_unsupported_scalar()) - } + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mut mesh = mesh.borrow_mut(py); + + if let Some(mesh) = mesh.as_f32_mut() { + Ok(PyVertexVertexConnectivity::new(decimation( + mesh, + keep_vertices, + ))) + } else if let Some(mesh) = mesh.as_f64_mut() { + Ok(PyVertexVertexConnectivity::new(decimation( + mesh, + keep_vertices, + ))) } else { - Err(pyerr_only_triangle_mesh()) + Err(pyerr_unsupported_scalar()) } } @@ -263,33 +204,20 @@ pub fn marching_cubes_cleanup<'py>( max_iter: usize, keep_vertices: bool, ) -> PyResult<()> { - let max_rel_snap_dist_f32 = max_rel_snap_dist.map(|d| d as f32); use splashsurf_lib::postprocessing::marching_cubes_cleanup as cleanup; + let py = mesh.py(); + let max_rel_snap_dist_f32 = max_rel_snap_dist.map(|d| d as f32); - if let Ok(mesh) = mesh.downcast::() { - let mut mesh = mesh.borrow_mut(); - if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32_mut()) { - cleanup(mesh, grid, max_rel_snap_dist_f32, max_iter, keep_vertices); - } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64_mut()) { - cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); - } else { - return Err(pyerr_mesh_grid_scalar_mismatch()); - } - } else if let Ok(mesh) = mesh.downcast::() - && let mut mesh = mesh.borrow_mut() - && mesh.mesh_cell_type() == MeshType::Tri3d - { - if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_tri_f32_mut()) { - let mesh = &mut mesh.mesh; - cleanup(mesh, grid, max_rel_snap_dist_f32, max_iter, keep_vertices); - } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_tri_f64_mut()) { - let mesh = &mut mesh.mesh; - cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); - } else { - return Err(pyerr_mesh_grid_scalar_mismatch()); - } + // Try to extract the triangle mesh; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mut mesh = mesh.borrow_mut(py); + + if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32_mut()) { + cleanup(mesh, grid, max_rel_snap_dist_f32, max_iter, keep_vertices); + } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64_mut()) { + cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); } else { - return Err(pyerr_only_triangle_mesh()); + return Err(pyerr_mesh_grid_scalar_mismatch()); } Ok(()) diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index f1ef5c6..07086cc 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -1,3 +1,6 @@ +use pyo3::PyErr; +use pyo3::exceptions::PyTypeError; + pub(crate) type IndexT = i64; pub(crate) fn pyerr_unsupported_scalar() -> PyErr { @@ -14,6 +17,12 @@ pub(crate) fn pyerr_only_triangle_mesh() -> PyErr { PyTypeError::new_err("unsupported mesh type, only triangle meshes are supported") } +pub(crate) fn pyerr_only_tri_and_tri_quad_mesh() -> PyErr { + PyTypeError::new_err( + "unsupported mesh type, only triangle and mixed triangle-quad meshes are supported", + ) +} + macro_rules! impl_from_mesh { ($pyclass:ident, $mesh:ty => $target_enum:path) => { impl From<$mesh> for $pyclass { @@ -26,9 +35,18 @@ macro_rules! impl_from_mesh { }; } +macro_rules! enum_impl_from { + ($enum_t:ident, $from_t:ty => $to_variant:path) => { + impl From<$from_t> for $enum_t { + fn from(value: $from_t) -> Self { + $to_variant(value) + } + } + }; +} + +pub(crate) use enum_impl_from; pub(crate) use impl_from_mesh; -use pyo3::PyErr; -use pyo3::exceptions::PyTypeError; /// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type pub fn transmute_take_into< From 0dec8be3d09eac78266c56fafc2bd0a4d56a3ccd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Fri, 29 Aug 2025 13:47:51 +0200 Subject: [PATCH 24/63] Py: Get triangle and quad cells from mixed mesh --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 8 ++++ pysplashsurf/src/mesh.rs | 48 ++++++++++++++++++++++ splashsurf_lib/src/mesh.rs | 2 +- 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 23a55d0..5813236 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -229,6 +229,14 @@ class MixedTriQuadMesh3d: r""" The `Nx3` array of vertex positions of the mesh """ + def get_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: + r""" + Extracts the triangles of the mesh as an `Nx3` array of vertex indices + """ + def get_quads(self) -> numpy.typing.NDArray[numpy.uint64]: + r""" + Extracts the quads of the mesh as an `Nx3` array of vertex indices + """ class MixedTriQuadMesh3dF32: r""" diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 1becd2e..9026ad8 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,6 +1,7 @@ use crate::NumpyUsize; use crate::aabb::{Aabb3dF32, Aabb3dF64}; use crate::utils::*; +use bytemuck::{NoUninit, Pod}; use ndarray::{Array2, ArrayView, ArrayView2}; use numpy as np; use numpy::{ @@ -701,6 +702,53 @@ impl PyMixedTriQuadMesh3d { } } } + + /// Returns a copy of all triangle cells of the mesh as an `Nx3` array of vertex indices + pub fn get_triangles<'py>( + &self, + py: Python<'py>, + ) -> PyResult>> { + let cells = match &self.inner { + PyMixedTriQuadMesh3dData::F32(mesh) => mesh.cells.as_slice(), + PyMixedTriQuadMesh3dData::F64(mesh) => mesh.cells.as_slice(), + }; + + filter_cells(py, cells, |cell| match cell { + TriangleOrQuadCell::Tri(tri) => Some(tri.map(|v| v as NumpyUsize)), + _ => None, + }) + } + + /// Returns a copy of all quad cells of the mesh as an `Nx3` array of vertex indices + pub fn get_quads<'py>(&self, py: Python<'py>) -> PyResult>> { + let cells = match &self.inner { + PyMixedTriQuadMesh3dData::F32(mesh) => mesh.cells.as_slice(), + PyMixedTriQuadMesh3dData::F64(mesh) => mesh.cells.as_slice(), + }; + + filter_cells(py, cells, |cell| match cell { + TriangleOrQuadCell::Quad(quad) => Some(quad.map(|v| v as NumpyUsize)), + _ => None, + }) + } +} + +pub fn filter_cells<'py, C, const N: usize, F>( + py: Python<'py>, + cells: &[C], + filter: F, +) -> PyResult>> +where + [NumpyUsize; N]: Pod + NoUninit, + F: Fn(&C) -> Option<[NumpyUsize; N]>, +{ + let filtered_cells: Vec<[NumpyUsize; N]> = cells.iter().filter_map(filter).collect(); + let n_triangles = filtered_cells.len(); + let vertex_indices: Vec = bytemuck::cast_vec(filtered_cells); + let array: Array2 = + Array2::from_shape_vec((n_triangles, N), vertex_indices).map_err(anyhow::Error::new)?; + let pyarray = array.into_pyarray(py); + Ok(pyarray) } /// Enum specifying the type of mesh contained in a `MeshWithData` diff --git a/splashsurf_lib/src/mesh.rs b/splashsurf_lib/src/mesh.rs index dfefa09..a12df31 100644 --- a/splashsurf_lib/src/mesh.rs +++ b/splashsurf_lib/src/mesh.rs @@ -232,7 +232,7 @@ impl TriangleOrQuadCell { pub struct MixedTriQuadMesh3d { /// Coordinates of all vertices of the mesh pub vertices: Vec>, - /// All triangle cells of the mesh + /// All triangle and quad cells of the mesh pub cells: Vec, } From 3abedba01e1b9aaa957231b21e4e60d47398cb38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Fri, 29 Aug 2025 14:40:55 +0200 Subject: [PATCH 25/63] Py: Refactor, fix MeshWithData constructor --- pysplashsurf/src/mesh.rs | 40 +++++++++++++++---------- pysplashsurf/src/neighborhood_search.rs | 26 ++++++++++++++++ pysplashsurf/src/reconstruction.rs | 4 +-- pysplashsurf/src/uniform_grid.rs | 4 +-- pysplashsurf/src/utils.rs | 24 +++++++++------ 5 files changed, 69 insertions(+), 29 deletions(-) diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 9026ad8..6345b32 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -547,8 +547,8 @@ pub struct PyTriMesh3d { inner: PyTriMesh3dData, } -impl_from_mesh!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F32); -impl_from_mesh!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F64); +enum_wrapper_impl_from!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F32); +enum_wrapper_impl_from!(PyTriMesh3d, TriMesh3d => PyTriMesh3dData::F64); impl Default for PyTriMesh3d { fn default() -> Self { @@ -663,8 +663,8 @@ pub struct PyMixedTriQuadMesh3d { inner: PyMixedTriQuadMesh3dData, } -impl_from_mesh!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F32); -impl_from_mesh!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F64); +enum_wrapper_impl_from!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F32); +enum_wrapper_impl_from!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixedTriQuadMesh3dData::F64); impl PyMixedTriQuadMesh3d { pub fn try_from_generic(mut mesh: MixedTriQuadMesh3d) -> PyResult { @@ -770,11 +770,22 @@ pub enum PyMesh3dData { enum_impl_from!(PyMesh3dData, Py => PyMesh3dData::Tri3d); enum_impl_from!(PyMesh3dData, Py => PyMesh3dData::MixedTriQuad3d); -enum PyMeshAttribute { +enum PyMeshAttributeData { F32(OwnedMeshAttribute), F64(OwnedMeshAttribute), } +#[gen_stub_pyclass] +#[pyclass] +pub struct PyMeshAttribute { + inner: PyMeshAttributeData, +} + +enum_wrapper_impl_from!(PyMeshAttribute, OwnedMeshAttribute => PyMeshAttributeData::F32); +enum_wrapper_impl_from!(PyMeshAttribute, OwnedMeshAttribute => PyMeshAttributeData::F64); + +impl PyMeshAttribute {} + #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "MeshWithData")] @@ -811,23 +822,20 @@ impl PyMeshWithData { // Deconstruct the input mesh let MeshWithData { mut mesh, - point_attributes: _, - cell_attributes: _, + mut point_attributes, + mut cell_attributes, } = mesh_with_data; // TODO: Convert attributes - use std::any::TypeId; - if TypeId::of::() == TypeId::of::>() { - let mesh_ref = unsafe { std::mem::transmute::<&mut M, &mut TriMesh3d>(&mut mesh) }; - let mesh = std::mem::take(mesh_ref); - let tri_mesh = PyTriMesh3d::try_from_generic(mesh)?; - Self::try_from_pymesh(py, tri_mesh) - } else if TypeId::of::() == TypeId::of::>() { - let mesh_ref = unsafe { std::mem::transmute::<&mut M, &mut TriMesh3d>(&mut mesh) }; - let mesh = std::mem::take(mesh_ref); + if let Some(mesh) = transmute_if_same::>(&mut mesh).map(std::mem::take) { let tri_mesh = PyTriMesh3d::try_from_generic(mesh)?; Self::try_from_pymesh(py, tri_mesh) + } else if let Some(mesh) = + transmute_if_same::>(&mut mesh).map(std::mem::take) + { + let quad_mesh = PyMixedTriQuadMesh3d::try_from_generic(mesh)?; + Self::try_from_pymesh(py, quad_mesh) } else { Err(pyerr_only_tri_and_tri_quad_mesh()) } diff --git a/pysplashsurf/src/neighborhood_search.rs b/pysplashsurf/src/neighborhood_search.rs index d0e2028..18ecb00 100644 --- a/pysplashsurf/src/neighborhood_search.rs +++ b/pysplashsurf/src/neighborhood_search.rs @@ -4,6 +4,32 @@ use splashsurf_lib::{nalgebra::Vector3, neighborhood_search::*}; use crate::aabb::{Aabb3dF32, Aabb3dF64}; +/* +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "neighborhood_search_spatial_hashing_parallel")] +#[pyo3(signature = (domain, particle_positions, search_radius))] +pub fn neighborhood_search_spatial_hashing_parallel<'py>( + domain: &Aabb3dF64, + particle_positions: &Bound<'py, PyArray2>, + search_radius: f64, +) -> PyResult>> { + let mut nl: Vec> = Vec::new(); + + let particle_positions: PyReadonlyArray2 = particle_positions.extract()?; + let particle_positions = particle_positions.as_slice()?; + let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + + neighborhood_search_spatial_hashing_parallel::( + &domain.inner, + particle_positions, + search_radius, + &mut nl, + ); + + Ok(nl) +}*/ + #[pyfunction] #[pyo3(name = "neighborhood_search_spatial_hashing_parallel_f64")] #[pyo3(signature = (domain, particle_positions, search_radius))] diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index 7b262d8..53d46e7 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -26,8 +26,8 @@ pub struct PySurfaceReconstruction { inner: PySurfaceReconstructionData, } -impl_from_mesh!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F32); -impl_from_mesh!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F64); +enum_wrapper_impl_from!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F32); +enum_wrapper_impl_from!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F64); impl PySurfaceReconstruction { pub fn try_from_generic( diff --git a/pysplashsurf/src/uniform_grid.rs b/pysplashsurf/src/uniform_grid.rs index e87bb11..574c156 100644 --- a/pysplashsurf/src/uniform_grid.rs +++ b/pysplashsurf/src/uniform_grid.rs @@ -16,8 +16,8 @@ pub struct PyUniformGrid { inner: PyUniformGridData, } -impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); -impl_from_mesh!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); +enum_wrapper_impl_from!(PyUniformGrid, UniformGrid => PyUniformGridData::F32); +enum_wrapper_impl_from!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); impl PyUniformGrid { pub(crate) fn as_f32(&self) -> Option<&UniformGrid> { diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index 07086cc..b130980 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -23,7 +23,7 @@ pub(crate) fn pyerr_only_tri_and_tri_quad_mesh() -> PyErr { ) } -macro_rules! impl_from_mesh { +macro_rules! enum_wrapper_impl_from { ($pyclass:ident, $mesh:ty => $target_enum:path) => { impl From<$mesh> for $pyclass { fn from(mesh: $mesh) -> Self { @@ -46,20 +46,26 @@ macro_rules! enum_impl_from { } pub(crate) use enum_impl_from; -pub(crate) use impl_from_mesh; +pub(crate) use enum_wrapper_impl_from; + +pub(crate) fn transmute_if_same( + value: &mut GenericSrc, +) -> Option<&mut ConcreteSrc> { + if std::any::TypeId::of::() == std::any::TypeId::of::() { + Some(unsafe { std::mem::transmute::<&mut GenericSrc, &mut ConcreteSrc>(value) }) + } else { + None + } +} /// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type -pub fn transmute_take_into< +pub(crate) fn transmute_take_into< GenericSrc: 'static, ConcreteSrc: Default + Into + 'static, Target, >( value: &mut GenericSrc, ) -> Option { - if std::any::TypeId::of::() == std::any::TypeId::of::() { - let value_ref = unsafe { std::mem::transmute::<&mut GenericSrc, &mut ConcreteSrc>(value) }; - Some(std::mem::take(value_ref).into()) - } else { - None - } + transmute_if_same::(value) + .map(|value_ref| std::mem::take(value_ref).into()) } From eca2f3f804301ddd3a52cb23515d54f36aa6a26d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Fri, 29 Aug 2025 15:38:13 +0200 Subject: [PATCH 26/63] Py: WIP: Mesh attributes --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 31 ++++- pysplashsurf/src/lib.rs | 1 + pysplashsurf/src/mesh.rs | 135 ++++++++++++++++++--- pysplashsurf/src/post_processing.rs | 2 +- pysplashsurf/src/utils.rs | 13 +- 5 files changed, 158 insertions(+), 24 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 5813236..3605496 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -195,11 +195,28 @@ class Aabb3dF64: Returns the smallest cubical AABB with the same center that encloses this AABB """ +class MeshAttribute: + @property + def dtype(self) -> numpy.dtype: + r""" + Numpy dtype of the data stored in the attribute + """ + @property + def name(self) -> builtins.str: + r""" + Name of the attribute + """ + @property + def data(self) -> None: + r""" + View of the attribute data as a numpy array + """ + class MeshWithData: @property def dtype(self) -> numpy.dtype: r""" - Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ @property def mesh_type(self) -> MeshType: @@ -207,6 +224,10 @@ class MeshWithData: Returns the type of the underlying mesh """ @property + def point_attributes(self) -> typing.List[MeshAttribute]: ... + @property + def cell_attributes(self) -> typing.List[MeshAttribute]: ... + @property def mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" The contained mesh without associated data and attributes @@ -222,7 +243,7 @@ class MixedTriQuadMesh3d: @property def dtype(self) -> numpy.dtype: r""" - Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ @property def vertices(self) -> numpy.typing.NDArray[typing.Any]: @@ -231,11 +252,11 @@ class MixedTriQuadMesh3d: """ def get_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Extracts the triangles of the mesh as an `Nx3` array of vertex indices + Returns a copy of all triangle cells of the mesh as an `Nx3` array of vertex indices """ def get_quads(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Extracts the quads of the mesh as an `Nx3` array of vertex indices + Returns a copy of all quad cells of the mesh as an `Nx3` array of vertex indices """ class MixedTriQuadMesh3dF32: @@ -431,7 +452,7 @@ class TriMesh3d: @property def dtype(self) -> numpy.dtype: r""" - Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) """ @property def vertices(self) -> numpy.typing.NDArray[typing.Any]: diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 30274a4..5cda7ca 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -42,6 +42,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; m.add_class::()?; + m.add_class::()?; m.add_class::()?; m.add_class::()?; diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 6345b32..457b0e8 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -601,7 +601,7 @@ impl PyTriMesh3d { #[gen_stub_pymethods] #[pymethods] impl PyTriMesh3d { - /// Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + /// Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { match &self.inner { @@ -681,7 +681,7 @@ impl PyMixedTriQuadMesh3d { #[gen_stub_pymethods] #[pymethods] impl PyMixedTriQuadMesh3d { - /// Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + /// Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { match &self.inner { @@ -777,6 +777,7 @@ enum PyMeshAttributeData { #[gen_stub_pyclass] #[pyclass] +#[pyo3(name = "MeshAttribute")] pub struct PyMeshAttribute { inner: PyMeshAttributeData, } @@ -784,15 +785,49 @@ pub struct PyMeshAttribute { enum_wrapper_impl_from!(PyMeshAttribute, OwnedMeshAttribute => PyMeshAttributeData::F32); enum_wrapper_impl_from!(PyMeshAttribute, OwnedMeshAttribute => PyMeshAttributeData::F64); -impl PyMeshAttribute {} +#[gen_stub_pymethods] +#[pymethods] +impl PyMeshAttribute { + /// Numpy dtype of the data stored in the attribute + #[getter] + pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { + match &self.inner { + PyMeshAttributeData::F32(attr) => match attr.data { + OwnedAttributeData::ScalarU64(_) => np::dtype::(py), + OwnedAttributeData::ScalarReal(_) => np::dtype::(py), + OwnedAttributeData::Vector3Real(_) => np::dtype::(py), + }, + PyMeshAttributeData::F64(attr) => match attr.data { + OwnedAttributeData::ScalarU64(_) => np::dtype::(py), + OwnedAttributeData::ScalarReal(_) => np::dtype::(py), + OwnedAttributeData::Vector3Real(_) => np::dtype::(py), + }, + } + } + + /// Name of the attribute + #[getter] + pub fn name(&self) -> String { + match &self.inner { + PyMeshAttributeData::F32(attr) => attr.name.clone(), + PyMeshAttributeData::F64(attr) => attr.name.clone(), + } + } + + /// View of the attribute data as a numpy array + #[getter] + pub fn data<'py>(&self, _py: Python<'py>) -> PyResult<()> { + unimplemented!() + } +} #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "MeshWithData")] pub struct PyMeshWithData { mesh: PyMesh3dData, - point_attributes: Vec, - cell_attributes: Vec, + point_attributes: Vec>, + cell_attributes: Vec>, } impl PyMeshWithData { @@ -826,26 +861,76 @@ impl PyMeshWithData { mut cell_attributes, } = mesh_with_data; - // TODO: Convert attributes - - if let Some(mesh) = transmute_if_same::>(&mut mesh).map(std::mem::take) { - let tri_mesh = PyTriMesh3d::try_from_generic(mesh)?; - Self::try_from_pymesh(py, tri_mesh) - } else if let Some(mesh) = - transmute_if_same::>(&mut mesh).map(std::mem::take) + // Convert the inner mesh + let mut mesh_with_data = + if let Some(mesh) = transmute_same_take::>(&mut mesh) { + PyTriMesh3d::try_from_generic(mesh) + .and_then(|tri_mesh| Self::try_from_pymesh(py, tri_mesh)) + } else if let Some(mesh) = transmute_same_take::>(&mut mesh) { + PyMixedTriQuadMesh3d::try_from_generic(mesh) + .and_then(|quad_mesh| Self::try_from_pymesh(py, quad_mesh)) + } else { + Err(pyerr_only_tri_and_tri_quad_mesh()) + }?; + + fn try_convert_attribute_vec<'a, In: Real + Element, Out: Real + Element>( + py: Python<'_>, + attributes: &mut Vec>, + dest: &mut Vec>, + ) -> Option<()> + where + PyMeshAttribute: From>, { - let quad_mesh = PyMixedTriQuadMesh3d::try_from_generic(mesh)?; - Self::try_from_pymesh(py, quad_mesh) + transmute_same_take::>, Vec>>( + attributes, + ) + .map(|a| { + a.into_iter() + .map(|a| { + PyMeshAttribute::from(a) + .into_pyobject(py) + .expect("allocation should not fail") + .into() + }) + .collect::>>() + }) + .and_then(|a| Some(*dest = a)) + } + + if std::any::TypeId::of::() == std::any::TypeId::of::() { + try_convert_attribute_vec::( + py, + &mut point_attributes, + &mut mesh_with_data.point_attributes, + ); + try_convert_attribute_vec::( + py, + &mut cell_attributes, + &mut mesh_with_data.cell_attributes, + ); + } else if std::any::TypeId::of::() == std::any::TypeId::of::() { + try_convert_attribute_vec::( + py, + &mut point_attributes, + &mut mesh_with_data.point_attributes, + ); + try_convert_attribute_vec::( + py, + &mut cell_attributes, + &mut mesh_with_data.cell_attributes, + ); } else { - Err(pyerr_only_tri_and_tri_quad_mesh()) + return Err(pyerr_unsupported_scalar()); } + + Ok(mesh_with_data) } } #[gen_stub_pymethods] #[pymethods] impl PyMeshWithData { - /// Returns the numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + /// Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { match &self.mesh { @@ -863,6 +948,24 @@ impl PyMeshWithData { } } + #[getter] + #[gen_stub(override_return_type(type_repr="typing.List[MeshAttribute]", imports=()))] + pub fn point_attributes<'py>(&self, py: Python<'py>) -> PyResult> { + PyList::new( + py, + self.point_attributes.iter().map(|attr| attr.clone_ref(py)), + ) + } + + #[getter] + #[gen_stub(override_return_type(type_repr="typing.List[MeshAttribute]", imports=()))] + pub fn cell_attributes<'py>(&self, py: Python<'py>) -> PyResult> { + PyList::new( + py, + self.cell_attributes.iter().map(|attr| attr.clone_ref(py)), + ) + } + pub fn as_tri3d<'py, 'a>(&'a self, py: Python<'py>) -> Option> { match &self.mesh { PyMesh3dData::Tri3d(mesh) => Some(mesh.clone_ref(py)), diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 7fb5f81..b6b741b 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -61,7 +61,7 @@ pub fn convert_tris_to_quads<'py>( if let Ok(mesh) = mesh.downcast::() { let mut data_mesh = PyMeshWithData::try_from_pymesh(py, quad_mesh)?; - // TODO: transfer of point attributes not implemented yet + // TODO: transfer of point attributes not implemented yet, has to clone the data unimplemented!("transfer of point attributes not implemented yet"); } else { quad_mesh.into_bound_py_any(py) diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index b130980..f839ae4 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -48,7 +48,8 @@ macro_rules! enum_impl_from { pub(crate) use enum_impl_from; pub(crate) use enum_wrapper_impl_from; -pub(crate) fn transmute_if_same( +/// Transmutes a mutable reference from a generic type to a concrete type if they are identical, otherwise returns None +pub(crate) fn transmute_same_mut( value: &mut GenericSrc, ) -> Option<&mut ConcreteSrc> { if std::any::TypeId::of::() == std::any::TypeId::of::() { @@ -58,6 +59,14 @@ pub(crate) fn transmute_if_same( } } +/// Transmutes between types if they are identical and takes the value out of the source +pub(crate) fn transmute_same_take( + value: &mut GenericSrc, +) -> Option { + transmute_same_mut::(value) + .map(|value_ref| std::mem::take(value_ref)) +} + /// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type pub(crate) fn transmute_take_into< GenericSrc: 'static, @@ -66,6 +75,6 @@ pub(crate) fn transmute_take_into< >( value: &mut GenericSrc, ) -> Option { - transmute_if_same::(value) + transmute_same_mut::(value) .map(|value_ref| std::mem::take(value_ref).into()) } From b0fa70bebbebcf1194896b935be65c1918811e8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Fri, 29 Aug 2025 16:22:16 +0200 Subject: [PATCH 27/63] Py: Getter for data of mesh attributes --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 2 +- pysplashsurf/src/mesh.rs | 91 ++++++++++++++++------ pysplashsurf/src/pipeline.rs | 3 +- pysplashsurf/src/post_processing.rs | 15 +++- pysplashsurf/src/utils.rs | 3 +- 5 files changed, 81 insertions(+), 33 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 3605496..0ceab3e 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -207,7 +207,7 @@ class MeshAttribute: Name of the attribute """ @property - def data(self) -> None: + def data(self) -> numpy.typing.NDArray[typing.Any]: r""" View of the attribute data as a numpy array """ diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 457b0e8..46788e5 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -2,11 +2,11 @@ use crate::NumpyUsize; use crate::aabb::{Aabb3dF32, Aabb3dF64}; use crate::utils::*; use bytemuck::{NoUninit, Pod}; -use ndarray::{Array2, ArrayView, ArrayView2}; +use ndarray::{Array2, ArrayView, ArrayView1, ArrayView2}; use numpy as np; use numpy::{ - Element, IntoPyArray, PyArray, PyArray2, PyArrayDescr, PyArrayMethods, PyReadonlyArray2, - PyUntypedArray, ToPyArray, + Element, IntoPyArray, PyArray, PyArray1, PyArray2, PyArrayDescr, PyArrayMethods, + PyReadonlyArray2, PyUntypedArray, ToPyArray, }; use pyo3::exceptions::PyTypeError; use pyo3::{ @@ -70,18 +70,48 @@ fn add_attribute_with_name<'py, R: Real + Element>( } } -fn get_vertices_generic<'py, R: Real + Element>( - vertices: &[Vector3], +fn get_vec_generic<'py, R: Element>( + values: &[R], + shape: (usize, usize), container: Bound<'py, PyAny>, ) -> PyResult> { - let coordinates: &[R] = bytemuck::cast_slice(vertices); - let array: ArrayView2 = - ArrayView::from_shape((vertices.len(), 3), coordinates).map_err(anyhow::Error::new)?; - let pyarray = unsafe { PyArray2::borrow_from_array(&array, container) }; - Ok(pyarray - .into_any() - .downcast_into::() - .expect("downcast should not fail")) + assert_eq!( + shape.0 * shape.1, + values.len(), + "shape does not match values length" + ); + if shape.1 == 1 { + let array: ArrayView1 = + ArrayView::from_shape((values.len(),), values).map_err(anyhow::Error::new)?; + let pyarray = unsafe { PyArray1::borrow_from_array(&array, container) }; + Ok(pyarray + .into_any() + .downcast_into::() + .expect("downcast should not fail")) + } else { + let array: ArrayView2 = + ArrayView::from_shape(shape, values).map_err(anyhow::Error::new)?; + let pyarray = unsafe { PyArray2::borrow_from_array(&array, container) }; + Ok(pyarray + .into_any() + .downcast_into::() + .expect("downcast should not fail")) + } +} + +fn get_scalar_generic<'py, R: Element>( + values: &[R], + container: Bound<'py, PyAny>, +) -> PyResult> { + get_vec_generic(values, (values.len(), 1), container) +} + +fn get_vec3f_generic<'py, R: Real + Element>( + values: &[Vector3], + container: Bound<'py, PyAny>, +) -> PyResult> { + let coordinates: &[R] = bytemuck::cast_slice(values); + get_vec_generic(coordinates, (values.len(), 3), container) } fn get_triangles_generic<'py>( @@ -89,10 +119,8 @@ fn get_triangles_generic<'py>( container: Bound<'py, PyAny>, ) -> PyResult>> { let vertex_indices: &[NumpyUsize] = bytemuck::cast_slice(triangles); - let array: ArrayView2 = - ArrayView::from_shape((triangles.len(), 3), vertex_indices).map_err(anyhow::Error::new)?; - let pyarray = unsafe { PyArray2::borrow_from_array(&array, container) }; - Ok(pyarray) + let view = get_vec_generic(vertex_indices, (triangles.len(), 3), container)?.into_any(); + Ok(view.downcast_into::>()?) } fn compute_normals_generic<'py, R: Real + Element>( @@ -614,8 +642,8 @@ impl PyTriMesh3d { #[getter] pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { match &this.borrow().inner { - PyTriMesh3dData::F32(mesh) => get_vertices_generic(mesh.vertices(), this.into_any()), - PyTriMesh3dData::F64(mesh) => get_vertices_generic(mesh.vertices(), this.into_any()), + PyTriMesh3dData::F32(mesh) => get_vec3f_generic(mesh.vertices(), this.into_any()), + PyTriMesh3dData::F64(mesh) => get_vec3f_generic(mesh.vertices(), this.into_any()), } } @@ -695,10 +723,10 @@ impl PyMixedTriQuadMesh3d { pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { match &this.borrow().inner { PyMixedTriQuadMesh3dData::F32(mesh) => { - get_vertices_generic(mesh.vertices(), this.into_any()) + get_vec3f_generic(mesh.vertices(), this.into_any()) } PyMixedTriQuadMesh3dData::F64(mesh) => { - get_vertices_generic(mesh.vertices(), this.into_any()) + get_vec3f_generic(mesh.vertices(), this.into_any()) } } } @@ -770,6 +798,7 @@ pub enum PyMesh3dData { enum_impl_from!(PyMesh3dData, Py => PyMesh3dData::Tri3d); enum_impl_from!(PyMesh3dData, Py => PyMesh3dData::MixedTriQuad3d); +#[derive(Clone)] enum PyMeshAttributeData { F32(OwnedMeshAttribute), F64(OwnedMeshAttribute), @@ -778,6 +807,7 @@ enum PyMeshAttributeData { #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "MeshAttribute")] +#[derive(Clone)] pub struct PyMeshAttribute { inner: PyMeshAttributeData, } @@ -816,8 +846,19 @@ impl PyMeshAttribute { /// View of the attribute data as a numpy array #[getter] - pub fn data<'py>(&self, _py: Python<'py>) -> PyResult<()> { - unimplemented!() + pub fn data<'py>(this: Bound<'py, Self>) -> PyResult> { + match &this.borrow().inner { + PyMeshAttributeData::F32(attr) => match &attr.data { + OwnedAttributeData::ScalarU64(data) => get_scalar_generic(data, this.into_any()), + OwnedAttributeData::ScalarReal(data) => get_scalar_generic(data, this.into_any()), + OwnedAttributeData::Vector3Real(data) => get_vec3f_generic(data, this.into_any()), + }, + PyMeshAttributeData::F64(attr) => match &attr.data { + OwnedAttributeData::ScalarU64(data) => get_scalar_generic(data, this.into_any()), + OwnedAttributeData::ScalarReal(data) => get_scalar_generic(data, this.into_any()), + OwnedAttributeData::Vector3Real(data) => get_vec3f_generic(data, this.into_any()), + }, + } } } @@ -826,8 +867,8 @@ impl PyMeshAttribute { #[pyo3(name = "MeshWithData")] pub struct PyMeshWithData { mesh: PyMesh3dData, - point_attributes: Vec>, - cell_attributes: Vec>, + pub(crate) point_attributes: Vec>, + pub(crate) cell_attributes: Vec>, } impl PyMeshWithData { diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index 58a4a17..337060b 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -139,8 +139,7 @@ pub fn reconstruction_pipeline<'py>( mesh_aabb, mesh_aabb_clamp_vertices, }; - - // TODO: Support transfer of attributes + fn reconstruction_to_pymesh<'py, I: Index, R: Real + Element>( py: Python<'py>, reconstruction: splashsurf::reconstruct::ReconstructionResult, diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index b6b741b..69bdce5 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -7,7 +7,8 @@ use pyo3_stub_gen::derive::gen_stub_pyfunction; use splashsurf_lib::nalgebra::Vector3; use crate::mesh::{ - PyMeshWithData, PyMixedTriQuadMesh3d, PyVertexVertexConnectivity, get_triangle_mesh_generic, + PyMeshAttribute, PyMeshWithData, PyMixedTriQuadMesh3d, PyVertexVertexConnectivity, + get_triangle_mesh_generic, }; use crate::uniform_grid::PyUniformGrid; use crate::utils::*; @@ -61,8 +62,16 @@ pub fn convert_tris_to_quads<'py>( if let Ok(mesh) = mesh.downcast::() { let mut data_mesh = PyMeshWithData::try_from_pymesh(py, quad_mesh)?; - // TODO: transfer of point attributes not implemented yet, has to clone the data - unimplemented!("transfer of point attributes not implemented yet"); + data_mesh.point_attributes = mesh + .borrow() + .point_attributes + .iter() + .map(|attr| { + let attr_clone: PyMeshAttribute = attr.borrow(py).clone(); + attr_clone.into_pyobject(py).map(Py::from) + }) + .collect::>()?; + data_mesh.into_bound_py_any(py) } else { quad_mesh.into_bound_py_any(py) } diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index f839ae4..8935f18 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -63,8 +63,7 @@ pub(crate) fn transmute_same_mut( pub(crate) fn transmute_same_take( value: &mut GenericSrc, ) -> Option { - transmute_same_mut::(value) - .map(|value_ref| std::mem::take(value_ref)) + transmute_same_mut::(value).map(|value_ref| std::mem::take(value_ref)) } /// Transmutes from a generic type to a concrete type if they are identical, takes the value and converts it into the target type From 55d8286804ed268773c955355606f9206ace3d89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Fri, 29 Aug 2025 23:08:26 +0200 Subject: [PATCH 28/63] Py: Add basic AABB, update neighborhood search --- pysplashsurf/pysplashsurf/__init__.py | 30 ------ pysplashsurf/src/aabb.rs | 82 +++++++++++++++- pysplashsurf/src/lib.rs | 9 +- pysplashsurf/src/marching_cubes.rs | 10 +- pysplashsurf/src/mesh.rs | 1 + pysplashsurf/src/neighborhood_search.rs | 118 +++++++++++------------- pysplashsurf/src/pipeline.rs | 4 +- 7 files changed, 146 insertions(+), 108 deletions(-) diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index 7127e9e..22b4877 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -199,33 +199,3 @@ def create_aabb_object_from_points(points): return Aabb3dF64.from_points(points) else: raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)") - -def neighborhood_search_spatial_hashing_parallel( - domain, - particle_positions: np.ndarray, - search_radius: float -): - """Performs a neighborhood search (multi-threaded implementation) - - Returns the indices of all neighboring particles in the given search radius per particle as a `list[list[int]]`. - - Parameters - ---------- - domain: Aabb3dF32 | Aabb3dF64 - Axis-aligned bounding box of the domain - - particle_positions: np.ndarray - 2D-Array of particle positions - - search_radius: float - Search radius - """ - - if type(domain) is Aabb3dF32: - return neighborhood_search_spatial_hashing_parallel_f32(domain, particle_positions, search_radius) - - elif type(domain) is Aabb3dF64: - return neighborhood_search_spatial_hashing_parallel_f64(domain, particle_positions, search_radius) - - else: - raise ValueError("Invalid domain type") diff --git a/pysplashsurf/src/aabb.rs b/pysplashsurf/src/aabb.rs index fcba634..b0f7957 100644 --- a/pysplashsurf/src/aabb.rs +++ b/pysplashsurf/src/aabb.rs @@ -1,7 +1,85 @@ -use numpy::{PyArray, PyArray1, PyArray2, PyReadonlyArray2}; +use numpy as np; +use numpy::prelude::*; +use numpy::{Element, PyArray, PyArray1, PyArray2, PyReadonlyArray2, PyUntypedArray}; use pyo3::{PyResult, prelude::*}; use pyo3_stub_gen::derive::*; -use splashsurf_lib::{Aabb3d, nalgebra::Vector3}; +use splashsurf_lib::{Aabb3d, Real, nalgebra::Vector3}; + +use crate::utils::*; + +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "Aabb3d")] +pub struct PyAabb3d { + min: Vector3, + max: Vector3, +} + +impl From> for PyAabb3d { + fn from(aabb: Aabb3d) -> Self { + Self { + min: aabb.min().map(|x| x.to_f64().unwrap()), + max: aabb.max().map(|x| x.to_f64().unwrap()), + } + } +} + +impl PyAabb3d { + pub(crate) fn inner(&self) -> Aabb3d { + Aabb3d::new( + self.min.map(|x| R::from_f64(x).unwrap()), + self.max.map(|x| R::from_f64(x).unwrap()), + ) + } + + fn from_points_generic<'py, R: Real + Element>( + points: &Bound<'py, PyArray2>, + ) -> PyResult { + let points = points.try_readonly()?; + let points_vec: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); + Ok(Self::from(Aabb3d::par_from_points(points_vec))) + } +} + +#[gen_stub_pymethods] +#[pymethods] +impl PyAabb3d { + /// Constructs an AABB with the given min and max coordinates + #[staticmethod] + pub fn from_min_max<'py>(min: [f64; 3], max: [f64; 3]) -> Self { + // TODO: Check with numpy arrays as input + Self { + min: Vector3::from(min), + max: Vector3::from(max), + } + } + + /// Constructs the smallest AABB fitting around all the given points + #[staticmethod] + pub fn from_points<'py>(points: &Bound<'py, PyUntypedArray>) -> PyResult { + let py = points.py(); + let element_type = points.dtype(); + if element_type.is_equiv_to(&np::dtype::(py)) { + Self::from_points_generic(points.downcast::>()?) + } else if element_type.is_equiv_to(&np::dtype::(py)) { + Self::from_points_generic(points.downcast::>()?) + } else { + Err(pyerr_unsupported_scalar()) + } + } + + /// The min coordinate of the AABB + #[getter] + pub fn min<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { + PyArray::from_slice(py, self.min.as_slice()) + } + + /// The max coordinate of the AABB + #[getter] + pub fn max<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { + PyArray::from_slice(py, self.max.as_slice()) + } +} macro_rules! create_aabb3d_interface { ($name: ident, $type: ident) => { diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 5cda7ca..763e9fc 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -50,9 +50,12 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; + m.add_class::()?; m.add_class::()?; m.add_class::()?; + m.add_class::()?; + use wrap_pyfunction as wrap; m.add_function(wrap!(reconstruction::reconstruct_surface, m)?)?; @@ -67,11 +70,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { )?)?; m.add_function(wrap!( - neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f32, - m - )?)?; - m.add_function(wrap!( - neighborhood_search::neighborhood_search_spatial_hashing_parallel_py_f64, + neighborhood_search::neighborhood_search_spatial_hashing_parallel, m )?)?; diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index 9a2b650..ee229ef 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -1,9 +1,11 @@ -use crate::mesh::get_triangle_mesh_generic; -use crate::uniform_grid::PyUniformGrid; -use crate::utils::*; +use numpy::PyUntypedArray; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; +use crate::mesh::{PyTriMesh3d, get_triangle_mesh_generic}; +use crate::uniform_grid::PyUniformGrid; +use crate::utils::*; + /// Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found #[gen_stub_pyfunction] #[pyfunction] @@ -11,7 +13,7 @@ use pyo3_stub_gen::derive::*; #[pyo3(signature = (mesh, grid, *, check_closed = true, check_manifold = true, debug = false))] pub fn check_mesh_consistency<'py>( #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] - mesh: Bound<'py, PyAny>, + mesh: &Bound<'py, PyAny>, grid: &PyUniformGrid, check_closed: bool, check_manifold: bool, diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 46788e5..8b2a8e5 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -548,6 +548,7 @@ impl PyVertexVertexConnectivity { } #[gen_stub_pymethods] +#[pymethods] impl PyVertexVertexConnectivity { /// Returns a copy of the contained connectivity data pub fn copy_connectivity(&self) -> Vec> { diff --git a/pysplashsurf/src/neighborhood_search.rs b/pysplashsurf/src/neighborhood_search.rs index 18ecb00..84b67b4 100644 --- a/pysplashsurf/src/neighborhood_search.rs +++ b/pysplashsurf/src/neighborhood_search.rs @@ -1,79 +1,67 @@ -use numpy::{PyArray2, PyReadonlyArray2}; +use numpy as np; +use numpy::prelude::*; +use numpy::{PyArray2, PyUntypedArray}; use pyo3::prelude::*; -use splashsurf_lib::{nalgebra::Vector3, neighborhood_search::*}; +use pyo3_stub_gen::derive::*; +use splashsurf_lib::nalgebra::Vector3; -use crate::aabb::{Aabb3dF32, Aabb3dF64}; +use crate::aabb::PyAabb3d; +use crate::utils::*; -/* +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "NeighborhoodLists")] +pub struct PyNeighborhoodLists { + inner: Vec>, +} + +impl From>> for PyNeighborhoodLists { + fn from(nl: Vec>) -> Self { + Self { inner: nl } + } +} + +/// Performs a neighborhood search using spatial hashing (multi-threaded implementation) #[gen_stub_pyfunction] #[pyfunction] #[pyo3(name = "neighborhood_search_spatial_hashing_parallel")] -#[pyo3(signature = (domain, particle_positions, search_radius))] +#[pyo3(signature = (particle_positions, domain, search_radius))] pub fn neighborhood_search_spatial_hashing_parallel<'py>( - domain: &Aabb3dF64, - particle_positions: &Bound<'py, PyArray2>, + particle_positions: &Bound<'py, PyUntypedArray>, + domain: &Bound<'py, PyAabb3d>, search_radius: f64, -) -> PyResult>> { +) -> PyResult { let mut nl: Vec> = Vec::new(); - let particle_positions: PyReadonlyArray2 = particle_positions.extract()?; - let particle_positions = particle_positions.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); - - neighborhood_search_spatial_hashing_parallel::( - &domain.inner, - particle_positions, - search_radius, - &mut nl, - ); - - Ok(nl) -}*/ - -#[pyfunction] -#[pyo3(name = "neighborhood_search_spatial_hashing_parallel_f64")] -#[pyo3(signature = (domain, particle_positions, search_radius))] -pub fn neighborhood_search_spatial_hashing_parallel_py_f64<'py>( - domain: &Aabb3dF64, - particle_positions: &Bound<'py, PyArray2>, - search_radius: f64, -) -> PyResult>> { - let mut nl: Vec> = Vec::new(); - - let particle_positions: PyReadonlyArray2 = particle_positions.extract()?; - let particle_positions = particle_positions.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); - - neighborhood_search_spatial_hashing_parallel::( - &domain.inner, - particle_positions, - search_radius, - &mut nl, - ); - - Ok(nl) -} - -#[pyfunction] -#[pyo3(name = "neighborhood_search_spatial_hashing_parallel_f32")] -#[pyo3(signature = (domain, particle_positions, search_radius))] -pub fn neighborhood_search_spatial_hashing_parallel_py_f32<'py>( - domain: &Aabb3dF32, - particle_positions: &Bound<'py, PyArray2>, - search_radius: f32, -) -> PyResult>> { - let mut nl: Vec> = Vec::new(); + let py = particle_positions.py(); + let element_type = particle_positions.dtype(); + if element_type.is_equiv_to(&np::dtype::(py)) { + let particle_positions = particle_positions + .downcast::>()? + .try_readonly()?; + let particles: &[Vector3] = bytemuck::cast_slice(particle_positions.as_slice()?); - let particle_positions: PyReadonlyArray2 = particle_positions.extract()?; - let particle_positions = particle_positions.as_slice()?; - let particle_positions: &[Vector3] = bytemuck::cast_slice(particle_positions); + splashsurf_lib::neighborhood_search::neighborhood_search_spatial_hashing_parallel::( + &domain.borrow().inner(), + particles, + search_radius as f32, + &mut nl, + ); + } else if element_type.is_equiv_to(&np::dtype::(py)) { + let particle_positions = particle_positions + .downcast::>()? + .try_readonly()?; + let particles: &[Vector3] = bytemuck::cast_slice(particle_positions.as_slice()?); - neighborhood_search_spatial_hashing_parallel::( - &domain.inner, - particle_positions, - search_radius, - &mut nl, - ); + splashsurf_lib::neighborhood_search::neighborhood_search_spatial_hashing_parallel::( + &domain.borrow().inner(), + particles, + search_radius, + &mut nl, + ); + } else { + return Err(pyerr_unsupported_scalar()); + } - Ok(nl) + Ok(nl.into()) } diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index 337060b..9f5b611 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -139,7 +139,7 @@ pub fn reconstruction_pipeline<'py>( mesh_aabb, mesh_aabb_clamp_vertices, }; - + fn reconstruction_to_pymesh<'py, I: Index, R: Real + Element>( py: Python<'py>, reconstruction: splashsurf::reconstruct::ReconstructionResult, @@ -186,7 +186,7 @@ fn reconstruction_pipeline_generic_impl<'py, I: Index, R: Real + Element>( parameters: &splashsurf_lib::Parameters, postprocessing_args: &splashsurf::reconstruct::ReconstructionPostprocessingParameters, ) -> Result, anyhow::Error> { - let particles: PyReadonlyArray2 = particles.readonly(); + let particles: PyReadonlyArray2 = particles.try_readonly()?; let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); enum AttributePyView<'a, R: Real + Element> { From c7af481f87b881261545406ea2459d3aa9ea499e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Fri, 29 Aug 2025 23:11:20 +0200 Subject: [PATCH 29/63] Py: Remove old code --- pysplashsurf/pysplashsurf/__init__.py | 20 - pysplashsurf/pysplashsurf/pysplashsurf.pyi | 502 +-------------------- pysplashsurf/src/aabb.rs | 157 +------ pysplashsurf/src/lib.rs | 12 - pysplashsurf/src/mesh.rs | 469 +------------------ 5 files changed, 26 insertions(+), 1134 deletions(-) diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index 22b4877..0101d18 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -42,26 +42,6 @@ def push_cell_attribute(self, name: str, data: np.ndarray, real_type): else: raise ValueError("Not a valid data array") -TriMeshWithDataF64.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float64) -TriMeshWithDataF64.push_point_attribute.__doc__ = push_point_attribute.__doc__ -TriMeshWithDataF32.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float32) -TriMeshWithDataF32.push_point_attribute.__doc__ = push_point_attribute.__doc__ - -TriMeshWithDataF64.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float64) -TriMeshWithDataF64.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ -TriMeshWithDataF32.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float32) -TriMeshWithDataF32.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ - -MixedTriQuadMeshWithDataF64.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float64) -MixedTriQuadMeshWithDataF64.push_point_attribute.__doc__ = push_point_attribute.__doc__ -MixedTriQuadMeshWithDataF32.push_point_attribute = lambda self, name, data: push_point_attribute(self, name, data, np.float32) -MixedTriQuadMeshWithDataF32.push_point_attribute.__doc__ = push_point_attribute.__doc__ - -MixedTriQuadMeshWithDataF64.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float64) -MixedTriQuadMeshWithDataF64.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ -MixedTriQuadMeshWithDataF32.push_cell_attribute = lambda self, name, data: push_cell_attribute(self, name, data, np.float32) -MixedTriQuadMeshWithDataF32.push_cell_attribute.__doc__ = push_cell_attribute.__doc__ - def write_to_file(mesh_with_data, filename, file_format=None, consume_object=False): """Write the mesh and its attributes to a file using meshio diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 0ceab3e..ce4abe2 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -7,193 +7,27 @@ import numpy.typing import typing from enum import Enum -class Aabb3dF32: - r""" - Aabb3d wrapper - """ - def __new__(cls, min:typing.Sequence[builtins.float], max:typing.Sequence[builtins.float]) -> Aabb3dF32: ... - @staticmethod - def from_points(points:numpy.typing.NDArray[numpy.float32]) -> Aabb3dF32: +class Aabb3d: + @property + def min(self) -> numpy.typing.NDArray[numpy.float64]: r""" - Constructs the smallest AABB fitting around all the given points + The min coordinate of the AABB """ - @staticmethod - def par_from_points(points:numpy.typing.NDArray[numpy.float32]) -> Aabb3dF32: + @property + def max(self) -> numpy.typing.NDArray[numpy.float64]: r""" - Constructs the smallest AABB fitting around all the given points, parallel version + The max coordinate of the AABB """ @staticmethod - def zeros() -> Aabb3dF32: + def from_min_max(min:typing.Sequence[builtins.float], max:typing.Sequence[builtins.float]) -> Aabb3d: r""" - Constructs a degenerate AABB with min and max set to zero + Constructs an AABB with the given min and max coordinates """ @staticmethod - def from_point(point:typing.Sequence[builtins.float]) -> Aabb3dF32: - r""" - Constructs a degenerate AABB with zero extents centered at the given point - """ - def min(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the min coordinate of the bounding box - """ - def max(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the max coordinate of the bounding box - """ - def is_consistent(self) -> builtins.bool: - r""" - Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` - """ - def is_degenerate(self) -> builtins.bool: - r""" - Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` - """ - def extents(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the extents of the bounding box (vector connecting min and max point of the box) - """ - def min_extent(self) -> builtins.float: - r""" - Returns the smallest scalar extent of the AABB over all of its dimensions - """ - def max_extent(self) -> builtins.float: - r""" - Returns the largest scalar extent of the AABB over all of its dimensions - """ - def centroid(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the geometric centroid of the AABB (mean of the corner points) - """ - def contains_aabb(self, other:Aabb3dF32) -> builtins.bool: - r""" - Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate - """ - def contains_point(self, point:typing.Sequence[builtins.float]) -> builtins.bool: - r""" - Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate - """ - def translate(self, vector:typing.Sequence[builtins.float]) -> None: - r""" - Translates the AABB by the given vector - """ - def center_at_origin(self) -> None: - r""" - Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) - """ - def scale_uniformly(self, scaling:builtins.float) -> None: - r""" - Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) - """ - def join(self, other:Aabb3dF32) -> None: - r""" - Enlarges this AABB to the smallest AABB enclosing both itself and another AABB - """ - def join_with_point(self, point:typing.Sequence[builtins.float]) -> None: - r""" - Enlarges this AABB to the smallest AABB enclosing both itself and another point - """ - def grow_uniformly(self, margin:builtins.float) -> None: - r""" - Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) - """ - def enclosing_cube(self) -> Aabb3dF32: - r""" - Returns the smallest cubical AABB with the same center that encloses this AABB - """ - -class Aabb3dF64: - r""" - Aabb3d wrapper - """ - def __new__(cls, min:typing.Sequence[builtins.float], max:typing.Sequence[builtins.float]) -> Aabb3dF64: ... - @staticmethod - def from_points(points:numpy.typing.NDArray[numpy.float64]) -> Aabb3dF64: + def from_points(points:numpy.typing.NDArray[typing.Any]) -> Aabb3d: r""" Constructs the smallest AABB fitting around all the given points """ - @staticmethod - def par_from_points(points:numpy.typing.NDArray[numpy.float64]) -> Aabb3dF64: - r""" - Constructs the smallest AABB fitting around all the given points, parallel version - """ - @staticmethod - def zeros() -> Aabb3dF64: - r""" - Constructs a degenerate AABB with min and max set to zero - """ - @staticmethod - def from_point(point:typing.Sequence[builtins.float]) -> Aabb3dF64: - r""" - Constructs a degenerate AABB with zero extents centered at the given point - """ - def min(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns the min coordinate of the bounding box - """ - def max(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns the max coordinate of the bounding box - """ - def is_consistent(self) -> builtins.bool: - r""" - Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` - """ - def is_degenerate(self) -> builtins.bool: - r""" - Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` - """ - def extents(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns the extents of the bounding box (vector connecting min and max point of the box) - """ - def min_extent(self) -> builtins.float: - r""" - Returns the smallest scalar extent of the AABB over all of its dimensions - """ - def max_extent(self) -> builtins.float: - r""" - Returns the largest scalar extent of the AABB over all of its dimensions - """ - def centroid(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns the geometric centroid of the AABB (mean of the corner points) - """ - def contains_aabb(self, other:Aabb3dF64) -> builtins.bool: - r""" - Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate - """ - def contains_point(self, point:typing.Sequence[builtins.float]) -> builtins.bool: - r""" - Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate - """ - def translate(self, vector:typing.Sequence[builtins.float]) -> None: - r""" - Translates the AABB by the given vector - """ - def center_at_origin(self) -> None: - r""" - Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) - """ - def scale_uniformly(self, scaling:builtins.float) -> None: - r""" - Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) - """ - def join(self, other:Aabb3dF64) -> None: - r""" - Enlarges this AABB to the smallest AABB enclosing both itself and another AABB - """ - def join_with_point(self, point:typing.Sequence[builtins.float]) -> None: - r""" - Enlarges this AABB to the smallest AABB enclosing both itself and another point - """ - def grow_uniformly(self, margin:builtins.float) -> None: - r""" - Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) - """ - def enclosing_cube(self) -> Aabb3dF64: - r""" - Returns the smallest cubical AABB with the same center that encloses this AABB - """ class MeshAttribute: @property @@ -259,135 +93,8 @@ class MixedTriQuadMesh3d: Returns a copy of all quad cells of the mesh as an `Nx3` array of vertex indices """ -class MixedTriQuadMesh3dF32: - r""" - MixedTriQuadMesh3d wrapper - """ - def get_vertices(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - def get_cells(self) -> builtins.list[builtins.list[builtins.int]]: - r""" - Returns a 2D list specifying the vertex indices either for a triangle or a quad - """ - def take_vertices(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - """ - -class MixedTriQuadMesh3dF64: - r""" - MixedTriQuadMesh3d wrapper - """ - def get_vertices(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - def get_cells(self) -> builtins.list[builtins.list[builtins.int]]: - r""" - Returns a 2D list specifying the vertex indices either for a triangle or a quad - """ - def take_vertices(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - """ - -class MixedTriQuadMeshWithDataF32: - r""" - MeshWithData wrapper - """ - def __new__(cls, mesh:MixedTriQuadMesh3dF32) -> MixedTriQuadMeshWithDataF32: ... - def get_mesh(self) -> MixedTriQuadMesh3dF32: - r""" - Returns a copy of the contained mesh - """ - def take_mesh(self) -> MixedTriQuadMesh3dF32: - r""" - Returns the contained mesh by moving it out of this object (zero copy) - """ - def par_clamp_with_aabb(self, aabb:Aabb3dF32, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> MixedTriQuadMeshWithDataF32: - r""" - Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - """ - def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: ... - def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: ... - def get_point_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh vertex attribute by name - """ - def get_cell_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh cell attribute by name - """ - def get_point_attributes(self) -> dict: - r""" - Get all point attributes in a python dictionary - """ - def get_cell_attributes(self) -> dict: - r""" - Get all cell attributes in a python dictionary - """ - def get_point_attribute_keys(self) -> list: - r""" - Get all registered point attribute names - """ - def get_cell_attribute_keys(self) -> list: - r""" - Get all registered cell attribute names - """ - -class MixedTriQuadMeshWithDataF64: - r""" - MeshWithData wrapper - """ - def __new__(cls, mesh:MixedTriQuadMesh3dF64) -> MixedTriQuadMeshWithDataF64: ... - def get_mesh(self) -> MixedTriQuadMesh3dF64: - r""" - Returns a copy of the contained mesh - """ - def take_mesh(self) -> MixedTriQuadMesh3dF64: - r""" - Returns the contained mesh by moving it out of this object (zero copy) - """ - def par_clamp_with_aabb(self, aabb:Aabb3dF64, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> MixedTriQuadMeshWithDataF64: - r""" - Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - """ - def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: ... - def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: ... - def get_point_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh vertex attribute by name - """ - def get_cell_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh cell attribute by name - """ - def get_point_attributes(self) -> dict: - r""" - Get all point attributes in a python dictionary - """ - def get_cell_attributes(self) -> dict: - r""" - Get all cell attributes in a python dictionary - """ - def get_point_attribute_keys(self) -> list: - r""" - Get all registered point attribute names - """ - def get_cell_attribute_keys(self) -> list: - r""" - Get all registered cell attribute names - """ +class NeighborhoodLists: + ... class SphInterpolatorF32: r""" @@ -473,184 +180,6 @@ class TriMesh3d: Computes the vertex-vertex connectivity of the mesh """ -class TriMesh3dF32: - r""" - TriMesh3d wrapper - """ - def get_vertices(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - def get_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - """ - def get_cells(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Alias for `get_triangles` - """ - def take_vertices(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - """ - def take_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns the `Mx3` array of the vertex indices that make up the triangles by moving it out of the mesh (zero copy) - """ - def take_cells(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Alias for `take_triangles` - """ - def take_vertices_and_triangles(self) -> tuple: - r""" - Returns a tuple containing the vertices and triangles of the mesh by moving them out of the mesh (zero copy) - """ - def par_vertex_normals(self) -> numpy.typing.NDArray[numpy.float32]: - r""" - Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) - """ - def vertex_vertex_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: - r""" - Returns a mapping of all mesh vertices to the set of their connected neighbor vertices - """ - -class TriMesh3dF64: - r""" - TriMesh3d wrapper - """ - def get_vertices(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns a copy of the `Nx3` array of vertex positions - """ - def get_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - """ - def get_cells(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Alias for `get_triangles` - """ - def take_vertices(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - """ - def take_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Returns the `Mx3` array of the vertex indices that make up the triangles by moving it out of the mesh (zero copy) - """ - def take_cells(self) -> numpy.typing.NDArray[numpy.uint64]: - r""" - Alias for `take_triangles` - """ - def take_vertices_and_triangles(self) -> tuple: - r""" - Returns a tuple containing the vertices and triangles of the mesh by moving them out of the mesh (zero copy) - """ - def par_vertex_normals(self) -> numpy.typing.NDArray[numpy.float64]: - r""" - Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) - """ - def vertex_vertex_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: - r""" - Returns a mapping of all mesh vertices to the set of their connected neighbor vertices - """ - -class TriMeshWithDataF32: - r""" - MeshWithData wrapper - """ - def __new__(cls, mesh:TriMesh3dF32) -> TriMeshWithDataF32: ... - def get_mesh(self) -> TriMesh3dF32: - r""" - Returns a copy of the contained mesh - """ - def take_mesh(self) -> TriMesh3dF32: - r""" - Returns the contained mesh by moving it out of this object (zero copy) - """ - def par_clamp_with_aabb(self, aabb:Aabb3dF32, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> TriMeshWithDataF32: - r""" - Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - """ - def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: ... - def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float32]) -> None: ... - def get_point_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh vertex attribute by name - """ - def get_cell_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh cell attribute by name - """ - def get_point_attributes(self) -> dict: - r""" - Get all point attributes in a python dictionary - """ - def get_cell_attributes(self) -> dict: - r""" - Get all cell attributes in a python dictionary - """ - def get_point_attribute_keys(self) -> list: - r""" - Get all registered point attribute names - """ - def get_cell_attribute_keys(self) -> list: - r""" - Get all registered cell attribute names - """ - -class TriMeshWithDataF64: - r""" - MeshWithData wrapper - """ - def __new__(cls, mesh:TriMesh3dF64) -> TriMeshWithDataF64: ... - def get_mesh(self) -> TriMesh3dF64: - r""" - Returns a copy of the contained mesh - """ - def take_mesh(self) -> TriMesh3dF64: - r""" - Returns the contained mesh by moving it out of this object (zero copy) - """ - def par_clamp_with_aabb(self, aabb:Aabb3dF64, clamp_vertices:builtins.bool, keep_vertices:builtins.bool) -> TriMeshWithDataF64: - r""" - Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - """ - def push_point_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_point_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_point_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: ... - def push_cell_attribute_scalar_u64(self, name:builtins.str, data:typing.Sequence[builtins.int]) -> None: ... - def push_cell_attribute_scalar_real(self, name:builtins.str, data:typing.Sequence[builtins.float]) -> None: ... - def push_cell_attribute_vector_real(self, name:builtins.str, data:numpy.typing.NDArray[numpy.float64]) -> None: ... - def get_point_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh vertex attribute by name - """ - def get_cell_attribute(self, name:builtins.str) -> typing.Any: - r""" - Get mesh cell attribute by name - """ - def get_point_attributes(self) -> dict: - r""" - Get all point attributes in a python dictionary - """ - def get_cell_attributes(self) -> dict: - r""" - Get all cell attributes in a python dictionary - """ - def get_point_attribute_keys(self) -> list: - r""" - Get all registered point attribute names - """ - def get_cell_attribute_keys(self) -> list: - r""" - Get all registered cell attribute names - """ - class UniformGrid: r""" Struct containing the parameters of the uniform grid used for the surface reconstruction @@ -726,6 +255,11 @@ def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:Unif The simplification is performed inplace and modifies the given mesh. """ +def neighborhood_search_spatial_hashing_parallel(particle_positions:numpy.typing.NDArray[typing.Any], domain:Aabb3d, search_radius:builtins.float) -> NeighborhoodLists: + r""" + Performs a neighborhood search using spatial hashing (multi-threaded implementation) + """ + def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> SurfaceReconstruction: r""" Performs a surface reconstruction from the given particles without additional post-processing @@ -740,3 +274,5 @@ def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attri Note that smoothing length and cube size are given in multiples of the particle radius. """ +def triangulate_density_map(values:numpy.typing.NDArray[typing.Any], grid:UniformGrid, *, iso_surface_threshold:builtins.float) -> TriMesh3d: ... + diff --git a/pysplashsurf/src/aabb.rs b/pysplashsurf/src/aabb.rs index b0f7957..0ad6070 100644 --- a/pysplashsurf/src/aabb.rs +++ b/pysplashsurf/src/aabb.rs @@ -1,6 +1,6 @@ use numpy as np; use numpy::prelude::*; -use numpy::{Element, PyArray, PyArray1, PyArray2, PyReadonlyArray2, PyUntypedArray}; +use numpy::{Element, PyArray, PyArray1, PyArray2, PyUntypedArray}; use pyo3::{PyResult, prelude::*}; use pyo3_stub_gen::derive::*; use splashsurf_lib::{Aabb3d, Real, nalgebra::Vector3}; @@ -80,158 +80,3 @@ impl PyAabb3d { PyArray::from_slice(py, self.max.as_slice()) } } - -macro_rules! create_aabb3d_interface { - ($name: ident, $type: ident) => { - /// Aabb3d wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: Aabb3d<$type>, - } - - impl $name { - pub fn new(data: Aabb3d<$type>) -> Self { - Self { inner: data } - } - } - - #[gen_stub_pymethods] - #[pymethods] - impl $name { - #[new] - fn py_new<'py>(min: [$type; 3], max: [$type; 3]) -> PyResult { - Ok($name::new(Aabb3d::<$type>::new( - Vector3::from_column_slice(&min), - Vector3::from_column_slice(&max), - ))) - } - - /// Constructs the smallest AABB fitting around all the given points - #[staticmethod] - fn from_points<'py>(points: &Bound<'py, PyArray2<$type>>) -> PyResult<$name> { - let points: PyReadonlyArray2<$type> = points.extract()?; - let points = points.as_slice()?; - let points: &[Vector3<$type>] = bytemuck::cast_slice(points); - - Ok($name::new(Aabb3d::from_points(points))) - } - - /// Constructs the smallest AABB fitting around all the given points, parallel version - #[staticmethod] - fn par_from_points<'py>(points: &Bound<'py, PyArray2<$type>>) -> PyResult<$name> { - let points: PyReadonlyArray2<$type> = points.extract()?; - let points = points.as_slice()?; - let points: &[Vector3<$type>] = bytemuck::cast_slice(points); - - Ok($name::new(Aabb3d::par_from_points(points))) - } - - /// Constructs a degenerate AABB with min and max set to zero - #[staticmethod] - fn zeros() -> $name { - $name::new(Aabb3d::zeros()) - } - - /// Constructs a degenerate AABB with zero extents centered at the given point - #[staticmethod] - fn from_point(point: [$type; 3]) -> Self { - $name::new(Aabb3d::from_point(Vector3::from_column_slice(&point))) - } - - /// Returns the min coordinate of the bounding box - fn min<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { - let min: &[$type] = self.inner.min().as_slice(); - PyArray::from_slice(py, min) - } - - /// Returns the max coordinate of the bounding box - fn max<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { - let max: &[$type] = self.inner.max().as_slice(); - PyArray::from_slice(py, max) - } - - /// Returns whether the AABB is consistent, i.e. `aabb.min()[i] <= aabb.max()[i]` for all `i` - fn is_consistent(&self) -> bool { - self.inner.is_consistent() - } - - /// Returns whether the AABB is degenerate in any dimension, i.e. `aabb.min()[i] == aabb.max()[i]` for any `i` - fn is_degenerate(&self) -> bool { - self.inner.is_degenerate() - } - - /// Returns the extents of the bounding box (vector connecting min and max point of the box) - fn extents<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { - let extents = self.inner.extents(); - PyArray::from_slice(py, extents.as_slice()) - } - - /// Returns the smallest scalar extent of the AABB over all of its dimensions - fn min_extent(&self) -> $type { - self.inner.min_extent() - } - - /// Returns the largest scalar extent of the AABB over all of its dimensions - fn max_extent(&self) -> $type { - self.inner.max_extent() - } - - /// Returns the geometric centroid of the AABB (mean of the corner points) - fn centroid<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1<$type>> { - let centroid = self.inner.centroid(); - PyArray::from_slice(py, centroid.as_slice()) - } - - /// Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate - fn contains_aabb(&self, other: &$name) -> bool { - self.inner.contains_aabb(&other.inner) - } - - /// Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate - fn contains_point(&self, point: [$type; 3]) -> bool { - self.inner - .contains_point(&Vector3::from_column_slice(&point)) - } - - /// Translates the AABB by the given vector - fn translate(&mut self, vector: [$type; 3]) { - self.inner.translate(&Vector3::from_column_slice(&vector)); - } - - /// Translates the AABB to center it at the coordinate origin (moves the centroid to the coordinate origin) - fn center_at_origin(&mut self) { - self.inner.center_at_origin(); - } - - /// Multiplies a uniform, local scaling to the AABB (i.e. multiplying its extents as if it was centered at the origin) - fn scale_uniformly(&mut self, scaling: $type) { - self.inner.scale_uniformly(scaling); - } - - /// Enlarges this AABB to the smallest AABB enclosing both itself and another AABB - fn join(&mut self, other: &$name) { - self.inner.join(&other.inner); - } - - /// Enlarges this AABB to the smallest AABB enclosing both itself and another point - fn join_with_point(&mut self, point: [$type; 3]) { - self.inner - .join_with_point(&Vector3::from_column_slice(&point)); - } - - /// Grows this AABB uniformly in all directions by the given scalar margin (i.e. adding the margin to min/max extents) - fn grow_uniformly(&mut self, margin: $type) { - self.inner.grow_uniformly(margin); - } - - /// Returns the smallest cubical AABB with the same center that encloses this AABB - fn enclosing_cube(&self) -> $name { - $name::new(self.inner.enclosing_cube()) - } - } - }; -} - -create_aabb3d_interface!(Aabb3dF64, f64); -create_aabb3d_interface!(Aabb3dF32, f32); diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 763e9fc..5729999 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -28,16 +28,6 @@ pub(crate) mod utils; /// Support reconstructing Level-Set surfaces from particle clouds or from regular grids. #[pymodule] fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { - m.add_class::()?; - m.add_class::()?; - m.add_class::()?; - m.add_class::()?; - - m.add_class::()?; - m.add_class::()?; - m.add_class::()?; - m.add_class::()?; - m.add_class::()?; m.add_class::()?; m.add_class::()?; @@ -51,8 +41,6 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; - m.add_class::()?; - m.add_class::()?; m.add_class::()?; diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 8b2a8e5..6d8bfc4 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,20 +1,11 @@ -use crate::NumpyUsize; -use crate::aabb::{Aabb3dF32, Aabb3dF64}; -use crate::utils::*; use bytemuck::{NoUninit, Pod}; use ndarray::{Array2, ArrayView, ArrayView1, ArrayView2}; use numpy as np; -use numpy::{ - Element, IntoPyArray, PyArray, PyArray1, PyArray2, PyArrayDescr, PyArrayMethods, - PyReadonlyArray2, PyUntypedArray, ToPyArray, -}; +use numpy::prelude::*; +use numpy::{Element, PyArray, PyArray1, PyArray2, PyArrayDescr, PyUntypedArray}; use pyo3::exceptions::PyTypeError; -use pyo3::{ - IntoPyObjectExt, - exceptions::PyValueError, - prelude::*, - types::{PyDict, PyList, PyTuple}, -}; +use pyo3::prelude::*; +use pyo3::{IntoPyObjectExt, exceptions::PyValueError, types::PyList}; use pyo3_stub_gen::derive::*; use splashsurf_lib::mesh::TriangleCell; use splashsurf_lib::{ @@ -26,49 +17,8 @@ use splashsurf_lib::{ nalgebra::{Unit, Vector3}, }; -fn get_attribute_with_name<'py, R: Real + Element>( - py: Python<'py>, - attrs: &[OwnedMeshAttribute], - name: &str, -) -> PyResult -where - R: pyo3::IntoPyObject<'py>, -{ - let elem = attrs.iter().filter(|x| x.name == name).next(); - match elem { - Some(attr) => match attr.data.clone() { - OwnedAttributeData::ScalarU64(res) => Ok(res.into_owned().into_pyobject(py)?.into()), - OwnedAttributeData::ScalarReal(res) => Ok(res.into_owned().into_pyobject(py)?.into()), - OwnedAttributeData::Vector3Real(res) => { - let flattened: Vec = bytemuck::cast_vec(res.into_owned()); - let res: Array2 = Array2::from_shape_vec((flattened.len() / 3, 3), flattened) - .map_err(anyhow::Error::new)?; - Ok(res.into_pyarray(py).into_bound_py_any(py)?.into()) - } - }, - None => Err(PyErr::new::(format!( - "Attribute with name {} doesn't exist", - name - ))), - } -} - -fn add_attribute_with_name<'py, R: Real + Element>( - attrs: &mut Vec>, - attribute: OwnedMeshAttribute, -) -> PyResult<()> { - let elem = attrs.iter().filter(|x| x.name == attribute.name).next(); - match elem { - None => { - attrs.push(attribute); - Ok(()) - } - _ => Err(PyErr::new::(format!( - "Attribute with name {} already exists", - attribute.name - ))), - } -} +use crate::NumpyUsize; +use crate::utils::*; fn get_vec_generic<'py, R: Element>( values: &[R], @@ -150,389 +100,6 @@ pub fn get_triangle_mesh_generic<'py>(mesh: &Bound<'py, PyAny>) -> Option { - /// MeshWithData wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: MeshWithData<$type, $mesh_class<$type>>, - } - - impl $name { - pub fn new(data: MeshWithData<$type, $mesh_class<$type>>) -> Self { - Self { inner: data } - } - } - - #[gen_stub_pymethods] - #[pymethods] - impl $name { - #[new] - fn py_new(mesh: &$pymesh_class) -> PyResult { - let meshdata = MeshWithData::new(mesh.inner.clone()); - Ok($name::new(meshdata)) - } - - /// Returns a copy of the contained mesh - fn get_mesh(&self) -> $pymesh_class { - $pymesh_class::new(self.inner.mesh.clone()) - } - - /// Returns the contained mesh by moving it out of this object (zero copy) - fn take_mesh(&mut self) -> $pymesh_class { - let mesh = std::mem::take(&mut self.inner.mesh); - $pymesh_class::new(mesh) - } - - /// Removes all cells from the mesh that are completely outside of the given AABB and clamps the remaining cells to the boundary - fn par_clamp_with_aabb( - &self, - aabb: &$aabb_class, - clamp_vertices: bool, - keep_vertices: bool, - ) -> $name { - $name::new(self.inner.par_clamp_with_aabb( - &aabb.inner, - clamp_vertices, - keep_vertices, - )) - } - - fn push_point_attribute_scalar_u64<'py>( - &mut self, - name: &str, - data: Vec, - ) -> PyResult<()> { - add_attribute_with_name::<$type>( - &mut self.inner.point_attributes, - OwnedMeshAttribute::new(name, OwnedAttributeData::ScalarU64(data.into())), - ) - } - - fn push_point_attribute_scalar_real<'py>( - &mut self, - name: &str, - data: Vec<$type>, - ) -> PyResult<()> { - add_attribute_with_name::<$type>( - &mut self.inner.point_attributes, - OwnedMeshAttribute::new(name, OwnedAttributeData::ScalarReal(data.into())), - ) - } - - fn push_point_attribute_vector_real<'py>( - &mut self, - name: &str, - data: &Bound<'py, PyArray2<$type>>, - ) -> PyResult<()> { - let data: PyReadonlyArray2<$type> = data.extract()?; - let data = data.as_slice()?; - let data: &[Vector3<$type>] = bytemuck::cast_slice(data); - - add_attribute_with_name::<$type>( - &mut self.inner.point_attributes, - OwnedMeshAttribute::new( - name, - OwnedAttributeData::Vector3Real(data.to_vec().into()), - ), - ) - } - - fn push_cell_attribute_scalar_u64<'py>( - &mut self, - name: &str, - data: Vec, - ) -> PyResult<()> { - add_attribute_with_name::<$type>( - &mut self.inner.cell_attributes, - OwnedMeshAttribute::new(name, OwnedAttributeData::ScalarU64(data.into())), - ) - } - - fn push_cell_attribute_scalar_real<'py>( - &mut self, - name: &str, - data: Vec<$type>, - ) -> PyResult<()> { - add_attribute_with_name::<$type>( - &mut self.inner.cell_attributes, - OwnedMeshAttribute::new(name, OwnedAttributeData::ScalarReal(data.into())), - ) - } - - fn push_cell_attribute_vector_real<'py>( - &mut self, - name: &str, - data: &Bound<'py, PyArray2<$type>>, - ) -> PyResult<()> { - let data: PyReadonlyArray2<$type> = data.extract()?; - let data = data.as_slice()?; - let data: &[Vector3<$type>] = bytemuck::cast_slice(data); - - add_attribute_with_name::<$type>( - &mut self.inner.cell_attributes, - OwnedMeshAttribute::new( - name, - OwnedAttributeData::Vector3Real(data.to_vec().into()), - ), - ) - } - - /// Get mesh vertex attribute by name - fn get_point_attribute<'py>(&self, py: Python<'py>, name: &str) -> PyResult { - get_attribute_with_name::<$type>(py, self.inner.point_attributes.as_slice(), name) - } - - /// Get mesh cell attribute by name - fn get_cell_attribute<'py>(&self, py: Python<'py>, name: &str) -> PyResult { - get_attribute_with_name::<$type>(py, self.inner.cell_attributes.as_slice(), name) - } - - /// Get all point attributes in a python dictionary - fn get_point_attributes<'py>(&self, py: Python<'py>) -> PyResult> { - let res = PyDict::new(py); - - for attr in self.inner.point_attributes.iter() { - let data = get_attribute_with_name::<$type>( - py, - self.inner.point_attributes.as_slice(), - &attr.name, - ); - match data { - Ok(data) => res.set_item(&attr.name, data)?, - Err(_) => println!("Couldn't embed attribute {} in PyDict", &attr.name), - } - } - - Ok(res) - } - - /// Get all cell attributes in a python dictionary - fn get_cell_attributes<'py>(&self, py: Python<'py>) -> PyResult> { - let res = PyDict::new(py); - - for attr in self.inner.cell_attributes.iter() { - let data = get_attribute_with_name::<$type>( - py, - self.inner.cell_attributes.as_slice(), - &attr.name, - ); - match data { - Ok(data) => res.set_item(&attr.name, data)?, - Err(_) => println!("Couldn't embed attribute {} in PyDict", &attr.name), - } - } - - Ok(res) - } - - /// Get all registered point attribute names - fn get_point_attribute_keys<'py>( - &self, - py: Python<'py>, - ) -> PyResult> { - let mut res: Vec<&str> = vec![]; - - for attr in self.inner.point_attributes.iter() { - res.push(&attr.name); - } - - PyList::new(py, res) - } - - /// Get all registered cell attribute names - fn get_cell_attribute_keys<'py>( - &self, - py: Python<'py>, - ) -> PyResult> { - let mut res: Vec<&str> = vec![]; - - for attr in self.inner.cell_attributes.iter() { - res.push(&attr.name); - } - - PyList::new(py, res) - } - } - }; -} - -macro_rules! create_tri_mesh_interface { - ($name: ident, $type: ident) => { - /// TriMesh3d wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: TriMesh3d<$type>, - } - - impl $name { - pub fn new(data: TriMesh3d<$type>) -> Self { - Self { inner: data } - } - } - - #[gen_stub_pymethods] - #[pymethods] - impl $name { - /// Returns a copy of the `Nx3` array of vertex positions - fn get_vertices<'py>(&self, py: Python<'py>) -> PyResult>> { - let points: &[$type] = bytemuck::cast_slice(&self.inner.vertices); - let vertices: ArrayView2<$type> = - ArrayView::from_shape((self.inner.vertices.len(), 3), points) - .map_err(anyhow::Error::new)?; - Ok(vertices.to_pyarray(py)) // seems like at least one copy is necessary here (to_pyarray copies the data) - } - - /// Returns a copy of the `Mx3` array of the vertex indices that make up a triangle - fn get_triangles<'py>( - &self, - py: Python<'py>, - ) -> PyResult>> { - let tris: &[NumpyUsize] = bytemuck::cast_slice(&self.inner.triangles); - let triangles: ArrayView2 = - ArrayView::from_shape((self.inner.triangles.len(), 3), tris) - .map_err(anyhow::Error::new)?; - Ok(triangles.to_pyarray(py)) - } - - /// Alias for `get_triangles` - fn get_cells<'py>( - &self, - py: Python<'py>, - ) -> PyResult>> { - self.get_triangles(py) - } - - /// Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - fn take_vertices<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult>> { - let vertices = std::mem::take(&mut self.inner.vertices); - let n = vertices.len(); - let vertices_scalar: Vec<$type> = bytemuck::cast_vec(vertices); - let vertices_array = PyArray::from_vec(py, vertices_scalar) - .reshape([n, 3]) - .map_err(anyhow::Error::new)?; - Ok(vertices_array) - } - - /// Returns the `Mx3` array of the vertex indices that make up the triangles by moving it out of the mesh (zero copy) - fn take_triangles<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult>> { - let triangles = std::mem::take(&mut self.inner.triangles); - let m = triangles.len(); - let triangles_scalar: Vec = bytemuck::cast_vec(triangles); - let triangles_array = PyArray::from_vec(py, triangles_scalar) - .reshape([m, 3]) - .map_err(anyhow::Error::new)?; - Ok(triangles_array) - } - - /// Alias for `take_triangles` - fn take_cells<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult>> { - self.take_triangles(py) - } - - /// Returns a tuple containing the vertices and triangles of the mesh by moving them out of the mesh (zero copy) - fn take_vertices_and_triangles<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult> { - let tup = (self.take_vertices(py)?, self.take_triangles(py)?); - tup.into_pyobject(py) - } - - /// Computes the mesh's vertex normals using an area weighted average of the adjacent triangle faces (parallelized version) - fn par_vertex_normals<'py>( - &self, - py: Python<'py>, - ) -> PyResult>> { - let normals_vec = self.inner.par_vertex_normals(); - let normals_vec = - bytemuck::allocation::cast_vec::>, $type>(normals_vec); - - let normals: &[$type] = normals_vec.as_slice(); - let normals: ArrayView2<$type> = - ArrayView::from_shape((normals.len() / 3, 3), normals) - .map_err(anyhow::Error::new)?; - - Ok(normals.to_pyarray(py)) - } - - /// Returns a mapping of all mesh vertices to the set of their connected neighbor vertices - fn vertex_vertex_connectivity(&self) -> Vec> { - self.inner.vertex_vertex_connectivity() - } - } - }; -} - -macro_rules! create_tri_quad_mesh_interface { - ($name: ident, $type: ident) => { - /// MixedTriQuadMesh3d wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: MixedTriQuadMesh3d<$type>, - } - - impl $name { - pub fn new(data: MixedTriQuadMesh3d<$type>) -> Self { - Self { inner: data } - } - } - - #[gen_stub_pymethods] - #[pymethods] - impl $name { - /// Returns a copy of the `Nx3` array of vertex positions - fn get_vertices<'py>(&self, py: Python<'py>) -> PyResult>> { - let points: &[$type] = bytemuck::cast_slice(&self.inner.vertices); - let vertices: ArrayView2<$type> = - ArrayView::from_shape((self.inner.vertices.len(), 3), points) - .map_err(anyhow::Error::new)?; - Ok(vertices.to_pyarray(py)) - } - - /// Returns a 2D list specifying the vertex indices either for a triangle or a quad - fn get_cells(&self) -> PyResult>> { - let cells: Vec> = self - .inner - .cells - .iter() - .map(|c| match c { - TriangleOrQuadCell::Tri(v) => v.to_vec(), - TriangleOrQuadCell::Quad(v) => v.to_vec(), - }) - .collect(); - Ok(cells) - } - - /// Returns the `Nx3` array of vertex positions by moving it out of the mesh (zero copy) - fn take_vertices<'py>( - &mut self, - py: Python<'py>, - ) -> PyResult>> { - let vertices = std::mem::take(&mut self.inner.vertices); - let n = vertices.len(); - let vertices_scalar: Vec<$type> = bytemuck::cast_vec(vertices); - let vertices_array = PyArray::from_vec(py, vertices_scalar) - .reshape([n, 3]) - .map_err(anyhow::Error::new)?; - Ok(vertices_array) - } - } - }; -} - /// Vertex-vertex connectivity of a mesh #[gen_stub_pyclass] #[pyclass] @@ -1041,27 +608,3 @@ impl PyMeshWithData { } } } - -create_tri_mesh_interface!(TriMesh3dF64, f64); -create_tri_mesh_interface!(TriMesh3dF32, f32); - -create_tri_quad_mesh_interface!(MixedTriQuadMesh3dF64, f64); -create_tri_quad_mesh_interface!(MixedTriQuadMesh3dF32, f32); - -create_mesh_data_interface!(TriMeshWithDataF64, f64, TriMesh3d, TriMesh3dF64, Aabb3dF64); -create_mesh_data_interface!(TriMeshWithDataF32, f32, TriMesh3d, TriMesh3dF32, Aabb3dF32); - -create_mesh_data_interface!( - MixedTriQuadMeshWithDataF64, - f64, - MixedTriQuadMesh3d, - MixedTriQuadMesh3dF64, - Aabb3dF64 -); -create_mesh_data_interface!( - MixedTriQuadMeshWithDataF32, - f32, - MixedTriQuadMesh3d, - MixedTriQuadMesh3dF32, - Aabb3dF32 -); From 31b6dd5fe804ddb1945e332e24e14c36db8f3905 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Fri, 29 Aug 2025 23:33:02 +0200 Subject: [PATCH 30/63] Py: Access methods for neighborhood lists --- pysplashsurf/src/mesh.rs | 2 +- pysplashsurf/src/neighborhood_search.rs | 30 ++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 6d8bfc4..646f158 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -5,7 +5,7 @@ use numpy::prelude::*; use numpy::{Element, PyArray, PyArray1, PyArray2, PyArrayDescr, PyUntypedArray}; use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; -use pyo3::{IntoPyObjectExt, exceptions::PyValueError, types::PyList}; +use pyo3::{IntoPyObjectExt, types::PyList}; use pyo3_stub_gen::derive::*; use splashsurf_lib::mesh::TriangleCell; use splashsurf_lib::{ diff --git a/pysplashsurf/src/neighborhood_search.rs b/pysplashsurf/src/neighborhood_search.rs index 84b67b4..ff9bf54 100644 --- a/pysplashsurf/src/neighborhood_search.rs +++ b/pysplashsurf/src/neighborhood_search.rs @@ -1,6 +1,7 @@ use numpy as np; use numpy::prelude::*; use numpy::{PyArray2, PyUntypedArray}; +use pyo3::exceptions::PyIndexError; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; use splashsurf_lib::nalgebra::Vector3; @@ -8,6 +9,8 @@ use splashsurf_lib::nalgebra::Vector3; use crate::aabb::PyAabb3d; use crate::utils::*; +// TODO: Bindings for flat neighborhood search + #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "NeighborhoodLists")] @@ -21,7 +24,32 @@ impl From>> for PyNeighborhoodLists { } } -/// Performs a neighborhood search using spatial hashing (multi-threaded implementation) +#[gen_stub_pymethods] +#[pymethods] +impl PyNeighborhoodLists { + /// Returns the number of particles for which neighborhood lists are stored + pub fn __len__(&self) -> usize { + self.inner.len() + } + + /// Returns the neighborhood list for the particle at the given index + pub fn __getitem__(&self, idx: isize) -> PyResult> { + let len = self.inner.len() as isize; + let idx = if idx < 0 { len + idx } else { idx }; + if idx < 0 || idx >= len { + Err(PyIndexError::new_err("index out of bounds")) + } else { + Ok(self.inner[idx as usize].clone()) + } + } + + /// Returns all stored neighborhood lists as a list of lists + pub fn get_neighborhood_lists(&self) -> Vec> { + self.inner.clone() + } +} + +/// Performs a neighborhood search using spatial hashing (multithreaded implementation) #[gen_stub_pyfunction] #[pyfunction] #[pyo3(name = "neighborhood_search_spatial_hashing_parallel")] From 015c422651fca90cd45df70c184ade42ac4c012b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Sat, 30 Aug 2025 11:00:49 +0200 Subject: [PATCH 31/63] Add method to interpolator --- splashsurf_lib/src/sph_interpolation.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/splashsurf_lib/src/sph_interpolation.rs b/splashsurf_lib/src/sph_interpolation.rs index c99e77a..28f3f82 100644 --- a/splashsurf_lib/src/sph_interpolation.rs +++ b/splashsurf_lib/src/sph_interpolation.rs @@ -72,6 +72,11 @@ impl SphInterpolator { tree, } } + + /// Returns the number of particles stored in the interpolator + pub fn size(&self) -> usize { + self.tree.size() + } /// Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation, appends to the given vector pub fn interpolate_normals_inplace( From be47ebe66a86b4e159aaa219868fe48fead9999b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Sat, 30 Aug 2025 11:01:07 +0200 Subject: [PATCH 32/63] Py: Update SphInterpolator --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 39 +-- pysplashsurf/src/lib.rs | 6 +- pysplashsurf/src/marching_cubes.rs | 2 +- pysplashsurf/src/neighborhood_search.rs | 1 + pysplashsurf/src/post_processing.rs | 2 +- pysplashsurf/src/sph_interpolation.rs | 323 +++++++++++++-------- pysplashsurf/src/utils.rs | 4 +- 7 files changed, 229 insertions(+), 148 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index ce4abe2..314f991 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -94,42 +94,31 @@ class MixedTriQuadMesh3d: """ class NeighborhoodLists: - ... - -class SphInterpolatorF32: - r""" - SphInterpolator wrapper - """ - def __new__(cls, particle_positions:numpy.typing.NDArray[numpy.float32], particle_densities:typing.Sequence[builtins.float], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolatorF32: ... - def interpolate_scalar_quantity(self, particle_quantity:typing.Sequence[builtins.float], interpolation_points:numpy.typing.NDArray[numpy.float32], first_order_correction:builtins.bool) -> builtins.list[builtins.float]: + def __len__(self) -> builtins.int: r""" - Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles + Returns the number of particles for which neighborhood lists are stored """ - def interpolate_normals(self, interpolation_points:numpy.typing.NDArray[numpy.float32]) -> numpy.typing.NDArray[numpy.float32]: + def __getitem__(self, idx:builtins.int) -> builtins.list[builtins.int]: r""" - Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + Returns the neighborhood list for the particle at the given index """ - def interpolate_vector_quantity(self, particle_quantity:numpy.typing.NDArray[numpy.float32], interpolation_points:numpy.typing.NDArray[numpy.float32], first_order_correction:builtins.bool) -> numpy.typing.NDArray[numpy.float32]: + def get_neighborhood_lists(self) -> builtins.list[builtins.list[builtins.int]]: r""" - Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles + Returns all stored neighborhood lists as a list of lists """ -class SphInterpolatorF64: - r""" - SphInterpolator wrapper - """ - def __new__(cls, particle_positions:numpy.typing.NDArray[numpy.float64], particle_densities:typing.Sequence[builtins.float], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolatorF64: ... - def interpolate_scalar_quantity(self, particle_quantity:typing.Sequence[builtins.float], interpolation_points:numpy.typing.NDArray[numpy.float64], first_order_correction:builtins.bool) -> builtins.list[builtins.float]: +class SphInterpolator: + def __new__(cls, particle_positions:numpy.typing.NDArray[typing.Any], particle_densities:numpy.typing.NDArray[typing.Any], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolator: r""" - Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles + Constructs an SPH interpolator for the given particles """ - def interpolate_normals(self, interpolation_points:numpy.typing.NDArray[numpy.float64]) -> numpy.typing.NDArray[numpy.float64]: + def interpolate_quantity(self, particle_quantity:numpy.typing.NDArray[typing.Any], interpolation_points:numpy.typing.NDArray[typing.Any], first_order_correction:builtins.bool) -> numpy.typing.NDArray[typing.Any]: r""" - Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + Interpolates a scalar or vectorial per particle quantity to the given points """ - def interpolate_vector_quantity(self, particle_quantity:numpy.typing.NDArray[numpy.float64], interpolation_points:numpy.typing.NDArray[numpy.float64], first_order_correction:builtins.bool) -> numpy.typing.NDArray[numpy.float64]: + def interpolate_normals(self, interpolation_points:numpy.typing.NDArray[typing.Any]) -> numpy.typing.NDArray[typing.Any]: r""" - Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles + Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation """ class SurfaceReconstruction: @@ -257,7 +246,7 @@ def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:Unif def neighborhood_search_spatial_hashing_parallel(particle_positions:numpy.typing.NDArray[typing.Any], domain:Aabb3d, search_radius:builtins.float) -> NeighborhoodLists: r""" - Performs a neighborhood search using spatial hashing (multi-threaded implementation) + Performs a neighborhood search using spatial hashing (multithreaded implementation) """ def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, multi_threading:builtins.bool=True, global_neighborhood_list:builtins.bool=False, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None) -> SurfaceReconstruction: diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 5729999..8e35ecc 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -36,12 +36,8 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; - - m.add_class::()?; - m.add_class::()?; - + m.add_class::()?; m.add_class::()?; - m.add_class::()?; use wrap_pyfunction as wrap; diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index ee229ef..22af5ea 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -44,6 +44,6 @@ pub fn check_mesh_consistency<'py>( ) .err()) } else { - Err(pyerr_mesh_grid_scalar_mismatch()) + Err(pyerr_scalar_type_mismatch()) } } diff --git a/pysplashsurf/src/neighborhood_search.rs b/pysplashsurf/src/neighborhood_search.rs index ff9bf54..34585a8 100644 --- a/pysplashsurf/src/neighborhood_search.rs +++ b/pysplashsurf/src/neighborhood_search.rs @@ -10,6 +10,7 @@ use crate::aabb::PyAabb3d; use crate::utils::*; // TODO: Bindings for flat neighborhood search +// TODO: Bindings for computing particle densities #[gen_stub_pyclass] #[pyclass] diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/post_processing.rs index 69bdce5..d88c70a 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/post_processing.rs @@ -226,7 +226,7 @@ pub fn marching_cubes_cleanup<'py>( } else if let (Some(grid), Some(mesh)) = (grid.as_f64(), mesh.as_f64_mut()) { cleanup(mesh, grid, max_rel_snap_dist, max_iter, keep_vertices); } else { - return Err(pyerr_mesh_grid_scalar_mismatch()); + return Err(pyerr_scalar_type_mismatch()); } Ok(()) diff --git a/pysplashsurf/src/sph_interpolation.rs b/pysplashsurf/src/sph_interpolation.rs index c86ac8f..b56522c 100644 --- a/pysplashsurf/src/sph_interpolation.rs +++ b/pysplashsurf/src/sph_interpolation.rs @@ -1,128 +1,223 @@ -use ndarray::{ArrayView, ArrayView2}; -use numpy::{PyArray2, PyReadonlyArray2, ToPyArray}; -use pyo3::{PyResult, prelude::*}; +use numpy as np; +use numpy::prelude::*; +use numpy::{Element, PyArray1, PyArray2, PyUntypedArray}; +use pyo3::PyResult; +use pyo3::exceptions::PyValueError; +use pyo3::prelude::*; use pyo3_stub_gen::derive::*; +use splashsurf_lib::nalgebra::SVector; use splashsurf_lib::{ + Real, nalgebra::{Unit, Vector3}, sph_interpolation::SphInterpolator, }; -macro_rules! create_sph_interpolator_interface { - ($name: ident, $type: ident) => { - /// SphInterpolator wrapper - #[gen_stub_pyclass] - #[pyclass] - pub struct $name { - pub inner: SphInterpolator<$type>, - } +use crate::utils::*; - impl $name { - pub fn new(data: SphInterpolator<$type>) -> Self { - Self { inner: data } - } - } +enum PySphInterpolatorWrapper { + F32(SphInterpolator), + F64(SphInterpolator), +} - #[gen_stub_pymethods] - #[pymethods] - impl $name { - #[new] - fn py_new<'py>( - particle_positions: &Bound<'py, PyArray2<$type>>, - particle_densities: Vec<$type>, - particle_rest_mass: $type, - compact_support_radius: $type, - ) -> PyResult { - let particle_positions: PyReadonlyArray2<$type> = - particle_positions.extract().unwrap(); - let particle_positions = particle_positions.as_slice().unwrap(); - let particle_positions: &[Vector3<$type>] = - bytemuck::cast_slice(particle_positions); - - Ok($name::new(SphInterpolator::new( - particle_positions, - particle_densities.as_slice(), - particle_rest_mass, - compact_support_radius, - ))) - } +#[gen_stub_pyclass] +#[pyclass] +#[pyo3(name = "SphInterpolator")] +pub struct PySphInterpolator { + inner: PySphInterpolatorWrapper, +} - /// Interpolates a scalar per particle quantity to the given points, panics if the there are less per-particles values than particles - fn interpolate_scalar_quantity<'py>( - &self, - particle_quantity: Vec<$type>, - interpolation_points: &Bound<'py, PyArray2<$type>>, - first_order_correction: bool, - ) -> PyResult> { - let interpolation_points: PyReadonlyArray2<$type> = - interpolation_points.extract()?; - let interpolation_points = interpolation_points.as_slice()?; - let interpolation_points: &[Vector3<$type>] = - bytemuck::cast_slice(interpolation_points); - - Ok(self.inner.interpolate_scalar_quantity( - particle_quantity.as_slice(), - interpolation_points, - first_order_correction, - )) - } +enum_wrapper_impl_from!(PySphInterpolator, SphInterpolator => PySphInterpolatorWrapper::F32); +enum_wrapper_impl_from!(PySphInterpolator, SphInterpolator => PySphInterpolatorWrapper::F64); + +impl PySphInterpolator { + fn new_generic<'py, R: Real + Element>( + particle_positions: &Bound<'py, PyUntypedArray>, + particle_densities: &Bound<'py, PyUntypedArray>, + particle_rest_mass: f64, + compact_support_radius: f64, + ) -> PyResult + where + PySphInterpolator: From>, + { + if let (Ok(particles), Ok(densities)) = ( + particle_positions.downcast::>(), + particle_densities.downcast::>(), + ) { + let particles = particles.try_readonly()?; + let particles: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); + + let densities = densities.try_readonly()?; + let densities = densities.as_slice()?; + + Ok(PySphInterpolator::from(SphInterpolator::new( + particles, + densities, + R::from_float(particle_rest_mass), + R::from_float(compact_support_radius), + ))) + } else { + Err(pyerr_scalar_type_mismatch()) + } + } + + fn interpolate_normals_generic<'py, R: Real + Element>( + interpolator: &SphInterpolator, + interpolation_points: &Bound<'py, PyUntypedArray>, + ) -> PyResult> { + let py = interpolation_points.py(); + if let Ok(points) = interpolation_points.downcast::>() { + let points = points.try_readonly()?; + let points: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); + + let normals_vec = interpolator.interpolate_normals(points); + Ok(bytemuck::cast_vec::>, R>(normals_vec) + .into_pyarray(py) + .reshape((points.len(), 3))? + .into_any() + .downcast_into::() + .expect("downcast should not fail")) + } else { + Err(pyerr_unsupported_scalar()) + } + } + + fn interpolate_vector_generic<'py, R: Real + Element>( + interpolator: &SphInterpolator, + particle_quantity: &Bound<'py, PyUntypedArray>, + interpolation_points: &Bound<'py, PyUntypedArray>, + first_order_correction: bool, + ) -> PyResult> { + let shape = particle_quantity.shape(); + if ![1, 2].contains(&shape.len()) || shape[0] != interpolator.size() { + return Err(PyValueError::new_err( + "unsupported shape of per particle quantity", + )); + } + let n_components = shape.get(1).copied().unwrap_or(1); + + // Get the per-particle quantity as a read-only contiguous slice + let quantity = if let Ok(q) = particle_quantity.downcast::>() { + q.to_dyn().try_readonly() + } else if let Ok(q) = particle_quantity.downcast::>() { + q.to_dyn().try_readonly() + } else { + return Err(pyerr_scalar_type_mismatch()); + }?; + let quantity = quantity.as_slice()?; + + let points = if let Ok(p) = interpolation_points.downcast::>() { + p.try_readonly()? + } else { + return Err(pyerr_scalar_type_mismatch()); + }; + let points: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); + + fn interpolate_ndim<'py, const D: usize, R: Real + Element>( + py: Python<'py>, + interpolator: &SphInterpolator, + points: &[Vector3], + quantity: &[R], + first_order_correction: bool, + shape: &[usize], + ) -> PyResult> { + let quantity: &[SVector] = bytemuck::cast_slice(quantity); + let interpolated = + interpolator.interpolate_vector_quantity(quantity, points, first_order_correction); + Ok(bytemuck::cast_vec::<_, R>(interpolated) + .into_pyarray(py) + .reshape(shape)? + .into_any() + .downcast_into::() + .expect("downcast should not fail")) + } - /// Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation - fn interpolate_normals<'py>( - &self, - py: Python<'py>, - interpolation_points: &Bound<'py, PyArray2<$type>>, - ) -> PyResult>> { - let interpolation_points: PyReadonlyArray2<$type> = - interpolation_points.extract()?; - let interpolation_points = interpolation_points.as_slice()?; - let interpolation_points: &[Vector3<$type>] = - bytemuck::cast_slice(interpolation_points); - - let normals_vec = self.inner.interpolate_normals(interpolation_points); - let normals_vec = - bytemuck::allocation::cast_vec::>, $type>(normals_vec); - - let normals: &[$type] = normals_vec.as_slice(); - let normals: ArrayView2<$type> = - ArrayView::from_shape((normals.len() / 3, 3), normals).unwrap(); - - Ok(normals.to_pyarray(py)) - } + let py = particle_quantity.py(); + let i = interpolator; + match n_components { + 1 => interpolate_ndim::<1, R>(py, i, points, quantity, first_order_correction, shape), + 2 => interpolate_ndim::<2, R>(py, i, points, quantity, first_order_correction, shape), + 3 => interpolate_ndim::<3, R>(py, i, points, quantity, first_order_correction, shape), + 4 => interpolate_ndim::<4, R>(py, i, points, quantity, first_order_correction, shape), + 5 => interpolate_ndim::<5, R>(py, i, points, quantity, first_order_correction, shape), + 6 => interpolate_ndim::<6, R>(py, i, points, quantity, first_order_correction, shape), + 7 => interpolate_ndim::<7, R>(py, i, points, quantity, first_order_correction, shape), + 8 => interpolate_ndim::<8, R>(py, i, points, quantity, first_order_correction, shape), + 9 => interpolate_ndim::<9, R>(py, i, points, quantity, first_order_correction, shape), + _ => Err(PyValueError::new_err( + "only vector quantities with up to 9 dimensions are supported", + )), + } + } +} - /// Interpolates a vectorial per particle quantity to the given points, panics if the there are less per-particles values than particles - fn interpolate_vector_quantity<'py>( - &self, - py: Python<'py>, - particle_quantity: &Bound<'py, PyArray2<$type>>, - interpolation_points: &Bound<'py, PyArray2<$type>>, - first_order_correction: bool, - ) -> PyResult>> { - let interpolation_points: PyReadonlyArray2<$type> = - interpolation_points.extract()?; - let interpolation_points = interpolation_points.as_slice()?; - let interpolation_points: &[Vector3<$type>] = - bytemuck::cast_slice(interpolation_points); - - let particle_quantity: PyReadonlyArray2<$type> = particle_quantity.extract()?; - let particle_quantity = particle_quantity.as_slice()?; - let particle_quantity: &[Vector3<$type>] = bytemuck::cast_slice(particle_quantity); - - let res_vec = self.inner.interpolate_vector_quantity( - particle_quantity, - interpolation_points, - first_order_correction, - ); - let res_vec = bytemuck::allocation::cast_vec::, $type>(res_vec); - - let res: &[$type] = res_vec.as_slice(); - let res: ArrayView2<$type> = - ArrayView::from_shape((res.len() / 3, 3), res).unwrap(); - - Ok(res.to_pyarray(py)) +#[gen_stub_pymethods] +#[pymethods] +impl PySphInterpolator { + /// Constructs an SPH interpolator for the given particles + #[new] + fn py_new<'py>( + particle_positions: &Bound<'py, PyUntypedArray>, + particle_densities: &Bound<'py, PyUntypedArray>, + particle_rest_mass: f64, + compact_support_radius: f64, + ) -> PyResult { + let py = particle_positions.py(); + let element_type = particle_positions.dtype(); + + if element_type.is_equiv_to(&np::dtype::(py)) { + Self::new_generic::( + particle_positions, + particle_densities, + particle_rest_mass, + compact_support_radius, + ) + } else if element_type.is_equiv_to(&np::dtype::(py)) { + Self::new_generic::( + particle_positions, + particle_densities, + particle_rest_mass, + compact_support_radius, + ) + } else { + Err(pyerr_unsupported_scalar()) + } + } + + /// Interpolates a scalar or vectorial per particle quantity to the given points + fn interpolate_quantity<'py>( + &self, + particle_quantity: &Bound<'py, PyUntypedArray>, + interpolation_points: &Bound<'py, PyUntypedArray>, + first_order_correction: bool, + ) -> PyResult> { + match &self.inner { + PySphInterpolatorWrapper::F32(interp) => Self::interpolate_vector_generic::( + interp, + particle_quantity, + interpolation_points, + first_order_correction, + ), + PySphInterpolatorWrapper::F64(interp) => Self::interpolate_vector_generic::( + interp, + particle_quantity, + interpolation_points, + first_order_correction, + ), + } + } + + /// Interpolates surface normals (i.e. normalized SPH gradient of the indicator function) of the fluid to the given points using SPH interpolation + fn interpolate_normals<'py>( + &self, + interpolation_points: &Bound<'py, PyUntypedArray>, + ) -> PyResult> { + match &self.inner { + PySphInterpolatorWrapper::F32(interp) => { + Self::interpolate_normals_generic::(interp, interpolation_points) + } + PySphInterpolatorWrapper::F64(interp) => { + Self::interpolate_normals_generic::(interp, interpolation_points) } } - }; + } } - -create_sph_interpolator_interface!(SphInterpolatorF64, f64); -create_sph_interpolator_interface!(SphInterpolatorF32, f32); diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index 8935f18..1b61cf7 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -7,9 +7,9 @@ pub(crate) fn pyerr_unsupported_scalar() -> PyErr { PyTypeError::new_err("unsupported mesh scalar data type, only f32 and f64 are supported") } -pub(crate) fn pyerr_mesh_grid_scalar_mismatch() -> PyErr { +pub(crate) fn pyerr_scalar_type_mismatch() -> PyErr { PyTypeError::new_err( - "unsupported mesh and grid scalar data type combination, both have to be either f32 or f64", + "unsupported combination of scalar data types, all parameters must have the same type (f32 or f64)", ) } From 0c0b496d74fe7c0be5c25d428a1c56497191cdac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Sat, 30 Aug 2025 12:53:46 +0200 Subject: [PATCH 33/63] Make fields of SurfaceReconstruction public, refactor pipeline return --- pysplashsurf/src/pipeline.rs | 32 ++++++++------- splashsurf/src/reconstruct.rs | 65 ++++++++++++++++++------------- splashsurf/tests/test_pipeline.rs | 18 ++------- splashsurf_lib/src/lib.rs | 11 +++--- 4 files changed, 65 insertions(+), 61 deletions(-) diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index 9f5b611..1e1b107 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -1,9 +1,7 @@ -use crate::mesh::PyMeshWithData; -use crate::utils::{IndexT, pyerr_unsupported_scalar}; use numpy as np; +use numpy::prelude::*; use numpy::{ - Element, PyArray1, PyArray2, PyArrayDescr, PyArrayDescrMethods, PyArrayMethods, - PyReadonlyArray1, PyReadonlyArray2, PyUntypedArray, PyUntypedArrayMethods, + Element, PyArray1, PyArray2, PyArrayDescr, PyReadonlyArray1, PyReadonlyArray2, PyUntypedArray, }; use pyo3::exceptions::PyRuntimeError; use pyo3::{ @@ -18,6 +16,10 @@ use splashsurf_lib::{ }; use std::borrow::Cow; +use crate::mesh::PyMeshWithData; +use crate::reconstruction::PySurfaceReconstruction; +use crate::utils::{IndexT, pyerr_unsupported_scalar}; + /// Runs the surface reconstruction pipeline for the given particle positions with optional post-processing /// /// Note that smoothing length and cube size are given in multiples of the particle radius. @@ -74,7 +76,7 @@ pub fn reconstruction_pipeline<'py>( mesh_aabb_max: Option<[f64; 3]>, mesh_aabb_clamp_vertices: bool, dtype: Option>, -) -> PyResult { +) -> PyResult<(PyMeshWithData, PySurfaceReconstruction)> { let py = particles.py(); let element_type = particles.dtype(); @@ -140,19 +142,19 @@ pub fn reconstruction_pipeline<'py>( mesh_aabb_clamp_vertices, }; - fn reconstruction_to_pymesh<'py, I: Index, R: Real + Element>( + fn reconstruction_to_pymesh<'py, R: Real + Element>( py: Python<'py>, - reconstruction: splashsurf::reconstruct::ReconstructionResult, - ) -> PyResult { - if let Some(tri_mesh) = reconstruction.tri_mesh { - PyMeshWithData::try_from_mesh_with_data(py, tri_mesh) + reconstruction: splashsurf::reconstruct::ReconstructionResult, + ) -> PyResult<(PyMeshWithData, PySurfaceReconstruction)> { + let mesh_with_data = if let Some(tri_mesh) = reconstruction.tri_mesh { + PyMeshWithData::try_from_mesh_with_data(py, tri_mesh)? } else if let Some(tri_quad_mesh) = reconstruction.tri_quad_mesh { - PyMeshWithData::try_from_mesh_with_data(py, tri_quad_mesh) + PyMeshWithData::try_from_mesh_with_data(py, tri_quad_mesh)? } else { - Err(PyRuntimeError::new_err( - "Reconstruction resulted in no mesh", - )) - } + return Err(PyRuntimeError::new_err("reconstruction returned no mesh")); + }; + let rec = PySurfaceReconstruction::try_from_generic(reconstruction.raw_reconstruction)?; + Ok((mesh_with_data, rec)) } if element_type.is_equiv_to(&np::dtype::(py)) { diff --git a/splashsurf/src/reconstruct.rs b/splashsurf/src/reconstruct.rs index 0aa25f9..643b4b5 100644 --- a/splashsurf/src/reconstruct.rs +++ b/splashsurf/src/reconstruct.rs @@ -440,8 +440,8 @@ pub struct ReconstructionResult { pub tri_mesh: Option>>, /// Holds the reconstructed mixed triangle/quad mesh (only if [`generate_quads`](ReconstructionPostprocessingParameters::generate_quads) was enabled) pub tri_quad_mesh: Option>>, - /// Holds the initial [`SurfaceReconstruction`] with no post-processing applied (only if [`output_raw_mesh`](ReconstructionPostprocessingParameters::output_raw_mesh) was enabled) - pub raw_reconstruction: Option>, + /// Holds the initial [`SurfaceReconstruction`] with no post-processing applied (the unprocessed mesh is only contained if [`output_raw_mesh`](ReconstructionPostprocessingParameters::output_raw_mesh) was enabled) + pub raw_reconstruction: SurfaceReconstruction, } /// Parameters for the post-processing steps in the reconstruction pipeline @@ -1040,12 +1040,6 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( } } - let reconstruction_output = if postprocessing.output_raw_mesh { - Some(reconstruction.clone()) - } else { - None - }; - let grid = reconstruction.grid(); let mut mesh_with_data = MeshWithData::new(Cow::Borrowed(reconstruction.mesh())); @@ -1396,7 +1390,7 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( }; // Convert triangles to quads - let (mut tri_mesh, tri_quad_mesh) = if postprocessing.generate_quads { + let (tri_mesh, mut tri_quad_mesh) = if postprocessing.generate_quads { info!("Post-processing: Convert triangles to quads..."); let non_squareness_limit = R::from_float(postprocessing.quad_max_edge_diag_ratio); let normal_angle_limit = R::from_float(postprocessing.quad_max_normal_angle.to_radians()); @@ -1432,7 +1426,7 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( // TODO: Option to continue processing sequences even if checks fail. Maybe return special error type? if postprocessing.check_mesh_closed || postprocessing.check_mesh_manifold { - if let Err(err) = match (&tri_mesh, &tri_quad_mesh) { + if let Err(err) = match (&tri_mesh, &mut tri_quad_mesh) { (Some(mesh), None) => splashsurf_lib::marching_cubes::check_mesh_consistency( grid, &mesh.mesh, @@ -1440,12 +1434,12 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( postprocessing.check_mesh_manifold, postprocessing.check_mesh_debug, ), - (None, Some(_mesh)) => { + (None, Some(mesh)) => { info!("Checking for mesh consistency not implemented for quad mesh at the moment."); return Ok(ReconstructionResult { tri_mesh: None, - tri_quad_mesh: Some(_mesh.to_owned()), - raw_reconstruction: reconstruction_output, + tri_quad_mesh: Some(std::mem::take(mesh)), + raw_reconstruction: reconstruction, }); } _ => unreachable!(), @@ -1529,23 +1523,45 @@ pub fn reconstruction_pipeline<'a, I: Index, R: Real>( } } - match (&mut tri_mesh, &tri_quad_mesh) { + match (tri_mesh, tri_quad_mesh) { (Some(mesh), None) => { - let mut res: MeshWithData> = - MeshWithData::new(mesh.to_owned().mesh.into_owned()); - res.point_attributes = std::mem::take(&mut mesh.point_attributes); - res.cell_attributes = std::mem::take(&mut mesh.cell_attributes); + let MeshWithData { + mesh, + point_attributes, + cell_attributes, + } = mesh; + + // Avoid copy if original mesh was not modified + let (mesh, take_mesh) = if std::ptr::eq(mesh.as_ref(), reconstruction.mesh()) + && !postprocessing.output_raw_mesh + { + // Ensure that borrow of reconstruction is dropped + (Default::default(), true) + } else { + (mesh.into_owned(), false) + }; + + let mut reconstruction = reconstruction; + let mesh = if take_mesh { + std::mem::take(&mut reconstruction.mesh) + } else { + mesh + }; Ok(ReconstructionResult { - tri_mesh: Some(res), + tri_mesh: Some(MeshWithData { + mesh, + point_attributes, + cell_attributes, + }), tri_quad_mesh: None, - raw_reconstruction: reconstruction_output, + raw_reconstruction: reconstruction, }) } - (None, Some(_mesh)) => Ok(ReconstructionResult { + (None, Some(mesh)) => Ok(ReconstructionResult { tri_mesh: None, - tri_quad_mesh: Some(_mesh.to_owned()), - raw_reconstruction: reconstruction_output, + tri_quad_mesh: Some(mesh), + raw_reconstruction: reconstruction, }), _ => unreachable!(), } @@ -1586,9 +1602,6 @@ pub(crate) fn reconstruction_pipeline_from_path( if postprocessing.output_raw_mesh { profile!("write surface mesh to file"); - let reconstruction = reconstruction.expect( - "reconstruction_pipeline_from_data did not return a SurfaceReconstruction object", - ); let mesh = reconstruction.mesh(); let output_path = paths diff --git a/splashsurf/tests/test_pipeline.rs b/splashsurf/tests/test_pipeline.rs index 9bbd848..6003e54 100644 --- a/splashsurf/tests/test_pipeline.rs +++ b/splashsurf/tests/test_pipeline.rs @@ -27,11 +27,7 @@ fn test_basic_pipeline() -> Result<(), Box> { .as_ref() .expect("reconstruction should produce a triangle mesh") .mesh; - let raw_mesh = reconstruction - .raw_reconstruction - .as_ref() - .expect("raw surface should be present") - .mesh(); + let raw_mesh = reconstruction.raw_reconstruction.mesh(); vtk_format::write_vtk(mesh, "../out/bunny_test_basic_pipeline.vtk", "mesh")?; // Compare raw and final mesh @@ -207,11 +203,7 @@ fn test_basic_pipeline_postprocessing() -> Result<(), Box .as_ref() .expect("reconstruction should produce a triangle mesh") .mesh; - let raw_mesh = reconstruction - .raw_reconstruction - .as_ref() - .expect("raw surface should be present") - .mesh(); + let raw_mesh = reconstruction.raw_reconstruction.mesh(); vtk_format::write_vtk( mesh, "../out/bunny_test_basic_pipeline_postprocessing.vtk", @@ -294,11 +286,7 @@ fn test_basic_pipeline_postprocessing_with_aabb() -> Result<(), Box Parameters { #[derive(Clone, Debug)] pub struct SurfaceReconstruction { /// Background grid that was used as a basis for generating the density map for marching cubes - grid: UniformGrid, + pub grid: UniformGrid, /// Per particle densities (contains only data of particles inside the domain) - particle_densities: Option>, + pub particle_densities: Option>, /// If an AABB was specified to restrict the reconstruction, this stores per input particle whether they were inside - particle_inside_aabb: Option>, + pub particle_inside_aabb: Option>, /// Per particles neighbor lists - particle_neighbors: Option>>, + pub particle_neighbors: Option>>, /// Surface mesh that is the result of the surface reconstruction - mesh: TriMesh3d, + pub mesh: TriMesh3d, /// Workspace with allocated memory for subsequent surface reconstructions workspace: ReconstructionWorkspace, } @@ -266,6 +266,7 @@ impl Default for SurfaceReconstruction { } } +// TODO: Remove these functions impl SurfaceReconstruction { /// Returns a reference to the surface mesh that is the result of the reconstruction pub fn mesh(&self) -> &TriMesh3d { From b95ed054cd5a1ce76dc4dbad8d9623fe484a4b4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Sat, 30 Aug 2025 12:54:25 +0200 Subject: [PATCH 34/63] Py: Refactoring --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 11 +-- pysplashsurf/src/mesh.rs | 81 +++++----------------- pysplashsurf/src/reconstruction.rs | 42 +++++------ pysplashsurf/src/utils.rs | 42 ++++++++++- splashsurf_lib/src/sph_interpolation.rs | 2 +- 5 files changed, 85 insertions(+), 93 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 314f991..77ca877 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -125,6 +125,11 @@ class SurfaceReconstruction: r""" Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data """ + @property + def particle_densities(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: + r""" + The particle densities computed during the reconstruction if available + """ def copy_mesh(self) -> TriMesh3d: r""" Returns a copy of the surface mesh of the reconstruction @@ -133,10 +138,6 @@ class SurfaceReconstruction: r""" Returns a copy of the uniform grid parameters used for the reconstruction """ - def copy_particle_densities(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: - r""" - Returns a copy of the particle densities computed during the reconstruction - """ def copy_particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: r""" Returns a copy of the per-particle neighborhood lists computed during the reconstruction if available @@ -256,7 +257,7 @@ def reconstruct_surface(particles:numpy.typing.NDArray[typing.Any], *, particle_ Note that all parameters use absolute distance units and are not relative to the particle radius. """ -def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> MeshWithData: +def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attributes_to_interpolate:typing.Optional[dict]=None, particle_radius:builtins.float, rest_density:builtins.float=1000.0, smoothing_length:builtins.float, cube_size:builtins.float, iso_surface_threshold:builtins.float=0.6, aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, multi_threading:builtins.bool=True, subdomain_grid:builtins.bool=True, subdomain_grid_auto_disable:builtins.bool=True, subdomain_num_cubes_per_dim:builtins.int=64, check_mesh_closed:builtins.bool=False, check_mesh_manifold:builtins.bool=False, check_mesh_orientation:builtins.bool=False, check_mesh_debug:builtins.bool=False, mesh_cleanup:builtins.bool=False, mesh_cleanup_snap_dist:typing.Optional[builtins.float]=None, decimate_barnacles:builtins.bool=False, keep_vertices:builtins.bool=False, compute_normals:builtins.bool=False, sph_normals:builtins.bool=False, normals_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_iters:typing.Optional[builtins.int]=None, mesh_smoothing_weights:builtins.bool=True, mesh_smoothing_weights_normalization:builtins.float=13.0, generate_quads:builtins.bool=False, quad_max_edge_diag_ratio:builtins.float=1.75, quad_max_normal_angle:builtins.float=10.0, quad_max_interior_angle:builtins.float=135.0, output_mesh_smoothing_weights:builtins.bool=False, output_raw_normals:builtins.bool=False, output_raw_mesh:builtins.bool=False, mesh_aabb_min:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_max:typing.Optional[typing.Sequence[builtins.float]]=None, mesh_aabb_clamp_vertices:builtins.bool=True, dtype:typing.Optional[numpy.dtype]=None) -> tuple[MeshWithData, SurfaceReconstruction]: r""" Runs the surface reconstruction pipeline for the given particle positions with optional post-processing diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 646f158..1e7cc9a 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,8 +1,10 @@ +use crate::NumpyUsize; +use crate::utils::*; use bytemuck::{NoUninit, Pod}; -use ndarray::{Array2, ArrayView, ArrayView1, ArrayView2}; +use ndarray::Array2; use numpy as np; use numpy::prelude::*; -use numpy::{Element, PyArray, PyArray1, PyArray2, PyArrayDescr, PyUntypedArray}; +use numpy::{Element, PyArray, PyArray2, PyArrayDescr, PyUntypedArray}; use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3::{IntoPyObjectExt, types::PyList}; @@ -17,59 +19,12 @@ use splashsurf_lib::{ nalgebra::{Unit, Vector3}, }; -use crate::NumpyUsize; -use crate::utils::*; - -fn get_vec_generic<'py, R: Element>( - values: &[R], - shape: (usize, usize), - container: Bound<'py, PyAny>, -) -> PyResult> { - assert_eq!( - shape.0 * shape.1, - values.len(), - "shape does not match values length" - ); - if shape.1 == 1 { - let array: ArrayView1 = - ArrayView::from_shape((values.len(),), values).map_err(anyhow::Error::new)?; - let pyarray = unsafe { PyArray1::borrow_from_array(&array, container) }; - Ok(pyarray - .into_any() - .downcast_into::() - .expect("downcast should not fail")) - } else { - let array: ArrayView2 = - ArrayView::from_shape(shape, values).map_err(anyhow::Error::new)?; - let pyarray = unsafe { PyArray2::borrow_from_array(&array, container) }; - Ok(pyarray - .into_any() - .downcast_into::() - .expect("downcast should not fail")) - } -} - -fn get_scalar_generic<'py, R: Element>( - values: &[R], - container: Bound<'py, PyAny>, -) -> PyResult> { - get_vec_generic(values, (values.len(), 1), container) -} - -fn get_vec3f_generic<'py, R: Real + Element>( - values: &[Vector3], - container: Bound<'py, PyAny>, -) -> PyResult> { - let coordinates: &[R] = bytemuck::cast_slice(values); - get_vec_generic(coordinates, (values.len(), 3), container) -} - -fn get_triangles_generic<'py>( +fn view_triangles_generic<'py>( triangles: &[TriangleCell], container: Bound<'py, PyAny>, ) -> PyResult>> { let vertex_indices: &[NumpyUsize] = bytemuck::cast_slice(triangles); - let view = get_vec_generic(vertex_indices, (triangles.len(), 3), container)?.into_any(); + let view = view_generic(vertex_indices, &[triangles.len(), 3], container)?.into_any(); Ok(view.downcast_into::>()?) } @@ -210,8 +165,8 @@ impl PyTriMesh3d { #[getter] pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { match &this.borrow().inner { - PyTriMesh3dData::F32(mesh) => get_vec3f_generic(mesh.vertices(), this.into_any()), - PyTriMesh3dData::F64(mesh) => get_vec3f_generic(mesh.vertices(), this.into_any()), + PyTriMesh3dData::F32(mesh) => view_vec_generic(mesh.vertices(), this.into_any()), + PyTriMesh3dData::F64(mesh) => view_vec_generic(mesh.vertices(), this.into_any()), } } @@ -219,8 +174,8 @@ impl PyTriMesh3d { #[getter] pub fn triangles<'py>(this: Bound<'py, Self>) -> PyResult>> { match &this.borrow().inner { - PyTriMesh3dData::F32(mesh) => get_triangles_generic(mesh.cells(), this.into_any()), - PyTriMesh3dData::F64(mesh) => get_triangles_generic(mesh.cells(), this.into_any()), + PyTriMesh3dData::F32(mesh) => view_triangles_generic(mesh.cells(), this.into_any()), + PyTriMesh3dData::F64(mesh) => view_triangles_generic(mesh.cells(), this.into_any()), } } @@ -291,10 +246,10 @@ impl PyMixedTriQuadMesh3d { pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { match &this.borrow().inner { PyMixedTriQuadMesh3dData::F32(mesh) => { - get_vec3f_generic(mesh.vertices(), this.into_any()) + view_vec_generic(mesh.vertices(), this.into_any()) } PyMixedTriQuadMesh3dData::F64(mesh) => { - get_vec3f_generic(mesh.vertices(), this.into_any()) + view_vec_generic(mesh.vertices(), this.into_any()) } } } @@ -417,14 +372,14 @@ impl PyMeshAttribute { pub fn data<'py>(this: Bound<'py, Self>) -> PyResult> { match &this.borrow().inner { PyMeshAttributeData::F32(attr) => match &attr.data { - OwnedAttributeData::ScalarU64(data) => get_scalar_generic(data, this.into_any()), - OwnedAttributeData::ScalarReal(data) => get_scalar_generic(data, this.into_any()), - OwnedAttributeData::Vector3Real(data) => get_vec3f_generic(data, this.into_any()), + OwnedAttributeData::ScalarU64(data) => view_scalar_generic(data, this.into_any()), + OwnedAttributeData::ScalarReal(data) => view_scalar_generic(data, this.into_any()), + OwnedAttributeData::Vector3Real(data) => view_vec_generic(data, this.into_any()), }, PyMeshAttributeData::F64(attr) => match &attr.data { - OwnedAttributeData::ScalarU64(data) => get_scalar_generic(data, this.into_any()), - OwnedAttributeData::ScalarReal(data) => get_scalar_generic(data, this.into_any()), - OwnedAttributeData::Vector3Real(data) => get_vec3f_generic(data, this.into_any()), + OwnedAttributeData::ScalarU64(data) => view_scalar_generic(data, this.into_any()), + OwnedAttributeData::ScalarReal(data) => view_scalar_generic(data, this.into_any()), + OwnedAttributeData::Vector3Real(data) => view_vec_generic(data, this.into_any()), }, } } diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index 53d46e7..f08c94a 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -1,6 +1,6 @@ use crate::mesh::PyTriMesh3d; use crate::uniform_grid::PyUniformGrid; -use crate::utils::*; +use crate::utils; use anyhow::anyhow; use numpy as np; use numpy::prelude::*; @@ -12,6 +12,7 @@ use splashsurf_lib::{ Aabb3d, GridDecompositionParameters, Real, SpatialDecomposition, SurfaceReconstruction, nalgebra::Vector3, }; +use utils::{IndexT, enum_wrapper_impl_from}; enum PySurfaceReconstructionData { F32(SurfaceReconstruction), @@ -33,9 +34,9 @@ impl PySurfaceReconstruction { pub fn try_from_generic( mut reconstruction: SurfaceReconstruction, ) -> PyResult { - transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) + utils::transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) .or_else(|| { - transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) + utils::transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) }) .ok_or_else(|| PyTypeError::new_err("unsupported type of reconstruction, only i64 for Index and f32 and f64 for Real type are supported")) } @@ -68,25 +69,20 @@ impl PySurfaceReconstruction { } } - /// Returns a copy of the particle densities computed during the reconstruction - fn copy_particle_densities<'py>(&self, py: Python<'py>) -> Option> { - match &self.inner { - PySurfaceReconstructionData::F32(reconstruction) => Some( - reconstruction - .particle_densities()? - .to_pyarray(py) - .into_any() - .downcast_into::() - .expect("downcasting should not fail"), - ), - PySurfaceReconstructionData::F64(reconstruction) => Some( - reconstruction - .particle_densities()? - .to_pyarray(py) - .into_any() - .downcast_into::() - .expect("downcasting should not fail"), - ), + /// The particle densities computed during the reconstruction if available + #[getter] + fn particle_densities<'py>( + this: Bound<'py, Self>, + ) -> PyResult>> { + match &this.borrow().inner { + PySurfaceReconstructionData::F32(reconstruction) => reconstruction + .particle_densities() + .map(|densities| utils::view_scalar_generic(densities.as_slice(), this.into_any())) + .transpose(), + PySurfaceReconstructionData::F64(reconstruction) => reconstruction + .particle_densities() + .map(|densities| utils::view_scalar_generic(densities.as_slice(), this.into_any())) + .transpose(), } } @@ -179,6 +175,6 @@ pub fn reconstruct_surface<'py>( .map_err(|e| anyhow!(e))?; PySurfaceReconstruction::try_from_generic(reconstruction) } else { - Err(pyerr_unsupported_scalar()) + Err(utils::pyerr_unsupported_scalar()) } } diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index 1b61cf7..72428ce 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -1,6 +1,12 @@ -use pyo3::PyErr; +use ndarray::{ArrayView, IxDyn}; +use numpy::{Element, PyArray, PyUntypedArray}; use pyo3::exceptions::PyTypeError; +use pyo3::prelude::*; +use pyo3::{Bound, PyAny, PyErr, PyResult}; +use splashsurf_lib::Real; +use splashsurf_lib::nalgebra::SVector; +/// The index type used for all grids and reconstructions in this crate pub(crate) type IndexT = i64; pub(crate) fn pyerr_unsupported_scalar() -> PyErr { @@ -77,3 +83,37 @@ pub(crate) fn transmute_take_into< transmute_same_mut::(value) .map(|value_ref| std::mem::take(value_ref).into()) } + +pub(crate) fn view_generic<'py, R: Element>( + values: &[R], + shape: &[usize], + container: Bound<'py, PyAny>, +) -> PyResult> { + assert_eq!( + shape.iter().product::(), + values.len(), + "shape does not match values length" + ); + let array: ArrayView = + ArrayView::from_shape(shape, values).map_err(anyhow::Error::new)?; + let pyarray = unsafe { PyArray::borrow_from_array(&array, container) }; + Ok(pyarray + .into_any() + .downcast_into::() + .expect("downcast should not fail")) +} + +pub(crate) fn view_scalar_generic<'py, R: Element>( + values: &[R], + container: Bound<'py, PyAny>, +) -> PyResult> { + view_generic(values, &[values.len()], container) +} + +pub(crate) fn view_vec_generic<'py, R: Real + Element, const D: usize>( + values: &[SVector], + container: Bound<'py, PyAny>, +) -> PyResult> { + let coordinates: &[R] = bytemuck::cast_slice(values); + view_generic(coordinates, &[values.len(), D], container) +} diff --git a/splashsurf_lib/src/sph_interpolation.rs b/splashsurf_lib/src/sph_interpolation.rs index 28f3f82..6fe432e 100644 --- a/splashsurf_lib/src/sph_interpolation.rs +++ b/splashsurf_lib/src/sph_interpolation.rs @@ -72,7 +72,7 @@ impl SphInterpolator { tree, } } - + /// Returns the number of particles stored in the interpolator pub fn size(&self) -> usize { self.tree.size() From e6c1482a9dbaecf7acde931eaff9452d755b729f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Sat, 30 Aug 2025 22:58:21 +0200 Subject: [PATCH 35/63] Py: Fix re-shaping in interpolator --- pysplashsurf/src/sph_interpolation.rs | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/pysplashsurf/src/sph_interpolation.rs b/pysplashsurf/src/sph_interpolation.rs index b56522c..74aa58c 100644 --- a/pysplashsurf/src/sph_interpolation.rs +++ b/pysplashsurf/src/sph_interpolation.rs @@ -87,13 +87,18 @@ impl PySphInterpolator { interpolation_points: &Bound<'py, PyUntypedArray>, first_order_correction: bool, ) -> PyResult> { - let shape = particle_quantity.shape(); - if ![1, 2].contains(&shape.len()) || shape[0] != interpolator.size() { + let shape_in = particle_quantity.shape(); + if ![1, 2].contains(&shape_in.len()) || shape_in[0] != interpolator.size() { return Err(PyValueError::new_err( "unsupported shape of per particle quantity", )); } - let n_components = shape.get(1).copied().unwrap_or(1); + let n_components = shape_in.get(1).copied().unwrap_or(1); + let shape_out = { + let mut s = shape_in.to_vec(); + s[0] = interpolation_points.shape()[0]; + s + }; // Get the per-particle quantity as a read-only contiguous slice let quantity = if let Ok(q) = particle_quantity.downcast::>() { @@ -105,11 +110,10 @@ impl PySphInterpolator { }?; let quantity = quantity.as_slice()?; - let points = if let Ok(p) = interpolation_points.downcast::>() { - p.try_readonly()? - } else { - return Err(pyerr_scalar_type_mismatch()); - }; + let points = interpolation_points + .downcast::>() + .map_err(|_| pyerr_scalar_type_mismatch())? + .try_readonly()?; let points: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); fn interpolate_ndim<'py, const D: usize, R: Real + Element>( @@ -133,6 +137,7 @@ impl PySphInterpolator { let py = particle_quantity.py(); let i = interpolator; + let shape = &shape_out; match n_components { 1 => interpolate_ndim::<1, R>(py, i, points, quantity, first_order_correction, shape), 2 => interpolate_ndim::<2, R>(py, i, points, quantity, first_order_correction, shape), @@ -153,7 +158,7 @@ impl PySphInterpolator { #[gen_stub_pymethods] #[pymethods] impl PySphInterpolator { - /// Constructs an SPH interpolator for the given particles + /// Constructs an SPH interpolator (with cubic kernels) for the given particles #[new] fn py_new<'py>( particle_positions: &Bound<'py, PyUntypedArray>, @@ -184,6 +189,7 @@ impl PySphInterpolator { } /// Interpolates a scalar or vectorial per particle quantity to the given points + #[pyo3(signature = (particle_quantity, interpolation_points, *, first_order_correction = false))] fn interpolate_quantity<'py>( &self, particle_quantity: &Bound<'py, PyUntypedArray>, From fac6937faaf692aca1182bbe143fbc9349c0a9f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Sun, 31 Aug 2025 22:22:18 +0200 Subject: [PATCH 36/63] Py: Reimplement reconstruction wrapper without copies --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 31 ++--- pysplashsurf/src/lib.rs | 4 +- pysplashsurf/src/mesh.rs | 64 +++++----- pysplashsurf/src/pipeline.rs | 2 +- pysplashsurf/src/reconstruction.rs | 130 ++++++++++----------- pysplashsurf/src/uniform_grid.rs | 15 ++- pysplashsurf/src/utils.rs | 39 +++++++ 7 files changed, 165 insertions(+), 120 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 77ca877..80c2576 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -110,9 +110,9 @@ class NeighborhoodLists: class SphInterpolator: def __new__(cls, particle_positions:numpy.typing.NDArray[typing.Any], particle_densities:numpy.typing.NDArray[typing.Any], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolator: r""" - Constructs an SPH interpolator for the given particles + Constructs an SPH interpolator (with cubic kernels) for the given particles """ - def interpolate_quantity(self, particle_quantity:numpy.typing.NDArray[typing.Any], interpolation_points:numpy.typing.NDArray[typing.Any], first_order_correction:builtins.bool) -> numpy.typing.NDArray[typing.Any]: + def interpolate_quantity(self, particle_quantity:numpy.typing.NDArray[typing.Any], interpolation_points:numpy.typing.NDArray[typing.Any], *, first_order_correction:builtins.bool=False) -> numpy.typing.NDArray[typing.Any]: r""" Interpolates a scalar or vectorial per particle quantity to the given points """ @@ -122,27 +122,30 @@ class SphInterpolator: """ class SurfaceReconstruction: - r""" - Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data - """ + @property + def grid(self) -> UniformGrid: + r""" + The marching cubes grid parameters used for the surface reconstruction + """ @property def particle_densities(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: r""" - The particle densities computed during the reconstruction if available + The global array of particle densities (`None` if they were only computed locally) """ - def copy_mesh(self) -> TriMesh3d: + @property + def particle_inside_aabb(self) -> typing.Optional[numpy.typing.NDArray[typing.Any]]: r""" - Returns a copy of the surface mesh of the reconstruction + A boolean array indicating whether each particle was inside the AABB used for the reconstruction (`None` if no AABB was set) """ - def copy_grid(self) -> UniformGrid: + @property + def particle_neighbors(self) -> typing.Optional[NeighborhoodLists]: r""" - Returns a copy of the uniform grid parameters used for the reconstruction + The global neighborhood lists per particle (`None` if they were only computed locally) """ - def copy_particle_neighbors(self) -> typing.Optional[builtins.list[builtins.list[builtins.int]]]: + @property + def mesh(self) -> TriMesh3d: r""" - Returns a copy of the per-particle neighborhood lists computed during the reconstruction if available - - The neighborhood lists are only available if the flag for global neighborhood list was set in the reconstruction parameters. + The reconstructed triangle mesh """ class TriMesh3d: diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 8e35ecc..44a9b6a 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -34,11 +34,11 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; + m.add_class::()?; + m.add_class::()?; m.add_class::()?; m.add_class::()?; m.add_class::()?; - m.add_class::()?; - m.add_class::()?; use wrap_pyfunction as wrap; diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 1e7cc9a..97cb555 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,11 +1,8 @@ -use crate::NumpyUsize; -use crate::utils::*; use bytemuck::{NoUninit, Pod}; use ndarray::Array2; use numpy as np; use numpy::prelude::*; use numpy::{Element, PyArray, PyArray2, PyArrayDescr, PyUntypedArray}; -use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3::{IntoPyObjectExt, types::PyList}; use pyo3_stub_gen::derive::*; @@ -19,12 +16,16 @@ use splashsurf_lib::{ nalgebra::{Unit, Vector3}, }; +use crate::NumpyUsize; +use crate::utils; +use crate::utils::{enum_impl_from, enum_wrapper_impl_from}; + fn view_triangles_generic<'py>( triangles: &[TriangleCell], container: Bound<'py, PyAny>, ) -> PyResult>> { let vertex_indices: &[NumpyUsize] = bytemuck::cast_slice(triangles); - let view = view_generic(vertex_indices, &[triangles.len(), 3], container)?.into_any(); + let view = utils::view_generic(vertex_indices, &[triangles.len(), 3], container)?.into_any(); Ok(view.downcast_into::>()?) } @@ -111,13 +112,9 @@ impl Default for PyTriMesh3d { impl PyTriMesh3d { pub fn try_from_generic(mut mesh: TriMesh3d) -> PyResult { - transmute_take_into::<_, TriMesh3d, _>(&mut mesh) - .or_else(|| transmute_take_into::<_, TriMesh3d, _>(&mut mesh)) - .ok_or_else(|| { - PyTypeError::new_err( - "Unsupported scalar type for TriMesh3d. Only f32 and f64 are supported.", - ) - }) + utils::transmute_take_into::<_, TriMesh3d, _>(&mut mesh) + .or_else(|| utils::transmute_take_into::<_, TriMesh3d, _>(&mut mesh)) + .ok_or_else(utils::pyerr_unsupported_scalar) } pub fn as_f32(&self) -> Option<&TriMesh3d> { @@ -165,8 +162,8 @@ impl PyTriMesh3d { #[getter] pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { match &this.borrow().inner { - PyTriMesh3dData::F32(mesh) => view_vec_generic(mesh.vertices(), this.into_any()), - PyTriMesh3dData::F64(mesh) => view_vec_generic(mesh.vertices(), this.into_any()), + PyTriMesh3dData::F32(mesh) => utils::view_vec_generic(mesh.vertices(), this.into_any()), + PyTriMesh3dData::F64(mesh) => utils::view_vec_generic(mesh.vertices(), this.into_any()), } } @@ -219,13 +216,9 @@ enum_wrapper_impl_from!(PyMixedTriQuadMesh3d, MixedTriQuadMesh3d => PyMixed impl PyMixedTriQuadMesh3d { pub fn try_from_generic(mut mesh: MixedTriQuadMesh3d) -> PyResult { - transmute_take_into::<_, MixedTriQuadMesh3d, _>(&mut mesh) - .or_else(|| transmute_take_into::<_, MixedTriQuadMesh3d, _>(&mut mesh)) - .ok_or_else(|| { - PyTypeError::new_err( - "Unsupported scalar type for MixedTriQuadMesh3d. Only f32 and f64 are supported.", - ) - }) + utils::transmute_take_into::<_, MixedTriQuadMesh3d, _>(&mut mesh) + .or_else(|| utils::transmute_take_into::<_, MixedTriQuadMesh3d, _>(&mut mesh)) + .ok_or_else(utils::pyerr_unsupported_scalar) } } @@ -246,10 +239,10 @@ impl PyMixedTriQuadMesh3d { pub fn vertices<'py>(this: Bound<'py, Self>) -> PyResult> { match &this.borrow().inner { PyMixedTriQuadMesh3dData::F32(mesh) => { - view_vec_generic(mesh.vertices(), this.into_any()) + utils::view_vec_generic(mesh.vertices(), this.into_any()) } PyMixedTriQuadMesh3dData::F64(mesh) => { - view_vec_generic(mesh.vertices(), this.into_any()) + utils::view_vec_generic(mesh.vertices(), this.into_any()) } } } @@ -370,6 +363,7 @@ impl PyMeshAttribute { /// View of the attribute data as a numpy array #[getter] pub fn data<'py>(this: Bound<'py, Self>) -> PyResult> { + use utils::{view_scalar_generic, view_vec_generic}; match &this.borrow().inner { PyMeshAttributeData::F32(attr) => match &attr.data { OwnedAttributeData::ScalarU64(data) => view_scalar_generic(data, this.into_any()), @@ -426,16 +420,18 @@ impl PyMeshWithData { } = mesh_with_data; // Convert the inner mesh - let mut mesh_with_data = - if let Some(mesh) = transmute_same_take::>(&mut mesh) { - PyTriMesh3d::try_from_generic(mesh) - .and_then(|tri_mesh| Self::try_from_pymesh(py, tri_mesh)) - } else if let Some(mesh) = transmute_same_take::>(&mut mesh) { - PyMixedTriQuadMesh3d::try_from_generic(mesh) - .and_then(|quad_mesh| Self::try_from_pymesh(py, quad_mesh)) - } else { - Err(pyerr_only_tri_and_tri_quad_mesh()) - }?; + let mut mesh_with_data = if let Some(mesh) = + utils::transmute_same_take::>(&mut mesh) + { + PyTriMesh3d::try_from_generic(mesh) + .and_then(|tri_mesh| Self::try_from_pymesh(py, tri_mesh)) + } else if let Some(mesh) = utils::transmute_same_take::>(&mut mesh) + { + PyMixedTriQuadMesh3d::try_from_generic(mesh) + .and_then(|quad_mesh| Self::try_from_pymesh(py, quad_mesh)) + } else { + Err(utils::pyerr_only_tri_and_tri_quad_mesh()) + }?; fn try_convert_attribute_vec<'a, In: Real + Element, Out: Real + Element>( py: Python<'_>, @@ -445,7 +441,7 @@ impl PyMeshWithData { where PyMeshAttribute: From>, { - transmute_same_take::>, Vec>>( + utils::transmute_same_take::>, Vec>>( attributes, ) .map(|a| { @@ -484,7 +480,7 @@ impl PyMeshWithData { &mut mesh_with_data.cell_attributes, ); } else { - return Err(pyerr_unsupported_scalar()); + return Err(utils::pyerr_unsupported_scalar()); } Ok(mesh_with_data) diff --git a/pysplashsurf/src/pipeline.rs b/pysplashsurf/src/pipeline.rs index 1e1b107..ebaf402 100644 --- a/pysplashsurf/src/pipeline.rs +++ b/pysplashsurf/src/pipeline.rs @@ -153,7 +153,7 @@ pub fn reconstruction_pipeline<'py>( } else { return Err(PyRuntimeError::new_err("reconstruction returned no mesh")); }; - let rec = PySurfaceReconstruction::try_from_generic(reconstruction.raw_reconstruction)?; + let rec = PySurfaceReconstruction::try_from_generic(py, reconstruction.raw_reconstruction)?; Ok((mesh_with_data, rec)) } diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index f08c94a..da964d3 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -1,103 +1,99 @@ use crate::mesh::PyTriMesh3d; +use crate::neighborhood_search::PyNeighborhoodLists; use crate::uniform_grid::PyUniformGrid; use crate::utils; use anyhow::anyhow; +use ndarray::ArrayView1; use numpy as np; use numpy::prelude::*; -use numpy::{Element, PyArray2, PyUntypedArray}; -use pyo3::exceptions::PyTypeError; +use numpy::{Element, PyArray1, PyArray2, PyUntypedArray}; use pyo3::{Bound, prelude::*}; use pyo3_stub_gen::derive::*; use splashsurf_lib::{ Aabb3d, GridDecompositionParameters, Real, SpatialDecomposition, SurfaceReconstruction, nalgebra::Vector3, }; -use utils::{IndexT, enum_wrapper_impl_from}; +use utils::{IndexT, PyFloatVecWrapper}; -enum PySurfaceReconstructionData { - F32(SurfaceReconstruction), - F64(SurfaceReconstruction), -} - -/// Struct containing results of the surface reconstruction including the mesh, grid parameters and optional particle data #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "SurfaceReconstruction")] pub struct PySurfaceReconstruction { - inner: PySurfaceReconstructionData, + grid: Py, + particle_densities: Option, + particle_inside_aabb: Option>, + particle_neighbors: Option>, + mesh: Py, } -enum_wrapper_impl_from!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F32); -enum_wrapper_impl_from!(PySurfaceReconstruction, SurfaceReconstruction => PySurfaceReconstructionData::F64); - impl PySurfaceReconstruction { - pub fn try_from_generic( - mut reconstruction: SurfaceReconstruction, + pub fn try_from_generic<'py, R: Real + Element>( + py: Python<'py>, + reconstruction: SurfaceReconstruction, ) -> PyResult { - utils::transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) - .or_else(|| { - utils::transmute_take_into::<_, SurfaceReconstruction, _>(&mut reconstruction) - }) - .ok_or_else(|| PyTypeError::new_err("unsupported type of reconstruction, only i64 for Index and f32 and f64 for Real type are supported")) + Ok(Self { + grid: Py::new(py, PyUniformGrid::try_from_generic(reconstruction.grid)?)?, + particle_densities: reconstruction + .particle_densities + .map(PyFloatVecWrapper::try_from_generic) + .transpose()?, + particle_inside_aabb: reconstruction.particle_inside_aabb, + particle_neighbors: reconstruction + .particle_neighbors + .map(|n| Py::new(py, PyNeighborhoodLists::from(n))) + .transpose()?, + mesh: Py::new(py, PyTriMesh3d::try_from_generic(reconstruction.mesh)?)?, + }) } } #[gen_stub_pymethods] #[pymethods] impl PySurfaceReconstruction { - /// Returns a copy of the surface mesh of the reconstruction - fn copy_mesh(&self) -> PyResult { - match &self.inner { - PySurfaceReconstructionData::F32(reconstruction) => { - Ok(PyTriMesh3d::from(reconstruction.mesh().clone())) - } - PySurfaceReconstructionData::F64(reconstruction) => { - Ok(PyTriMesh3d::from(reconstruction.mesh().clone())) - } - } - } - - /// Returns a copy of the uniform grid parameters used for the reconstruction - fn copy_grid(&self) -> PyUniformGrid { - match &self.inner { - PySurfaceReconstructionData::F32(reconstruction) => { - PyUniformGrid::from(reconstruction.grid().clone()) - } - PySurfaceReconstructionData::F64(reconstruction) => { - PyUniformGrid::from(reconstruction.grid().clone()) - } - } + /// The marching cubes grid parameters used for the surface reconstruction + #[getter] + fn grid<'py>(this: Bound<'py, Self>) -> Py { + this.borrow().grid.clone_ref(this.py()) } - /// The particle densities computed during the reconstruction if available + /// The global array of particle densities (`None` if they were only computed locally) #[getter] fn particle_densities<'py>( this: Bound<'py, Self>, ) -> PyResult>> { - match &this.borrow().inner { - PySurfaceReconstructionData::F32(reconstruction) => reconstruction - .particle_densities() - .map(|densities| utils::view_scalar_generic(densities.as_slice(), this.into_any())) - .transpose(), - PySurfaceReconstructionData::F64(reconstruction) => reconstruction - .particle_densities() - .map(|densities| utils::view_scalar_generic(densities.as_slice(), this.into_any())) - .transpose(), - } + this.borrow() + .particle_densities + .as_ref() + .map(|p| p.view(this.into_any())) + .transpose() } - /// Returns a copy of the per-particle neighborhood lists computed during the reconstruction if available - /// - /// The neighborhood lists are only available if the flag for global neighborhood list was set in the reconstruction parameters. - fn copy_particle_neighbors(&self) -> Option>> { - match &self.inner { - PySurfaceReconstructionData::F32(reconstruction) => reconstruction - .particle_neighbors() - .map(|neighbors| neighbors.clone()), - PySurfaceReconstructionData::F64(reconstruction) => reconstruction - .particle_neighbors() - .map(|neighbors| neighbors.clone()), - } + /// A boolean array indicating whether each particle was inside the AABB used for the reconstruction (`None` if no AABB was set) + #[getter] + fn particle_inside_aabb<'py>(this: Bound<'py, Self>) -> Option> { + this.borrow().particle_inside_aabb.as_ref().map(|p| { + let array: ArrayView1 = ArrayView1::from(p.as_slice()); + let pyarray = unsafe { PyArray1::borrow_from_array(&array, this.into_any()) }; + pyarray + .into_any() + .downcast_into::() + .expect("downcast should not fail") + }) + } + + /// The global neighborhood lists per particle (`None` if they were only computed locally) + #[getter] + fn particle_neighbors<'py>(this: Bound<'py, Self>) -> Option> { + this.borrow() + .particle_neighbors + .as_ref() + .map(|p| p.clone_ref(this.py())) + } + + /// The reconstructed triangle mesh + #[getter] + fn mesh<'py>(this: Bound<'py, Self>) -> Py { + this.borrow().mesh.clone_ref(this.py()) } } @@ -166,14 +162,14 @@ pub fn reconstruct_surface<'py>( .expect("failed to convert reconstruction parameters to f32"), ) .map_err(|e| anyhow!(e))?; - PySurfaceReconstruction::try_from_generic(reconstruction) + PySurfaceReconstruction::try_from_generic(py, reconstruction) } else if element_type.is_equiv_to(&np::dtype::(py)) { let particles = particles.downcast::>()?.try_readonly()?; let particle_positions: &[Vector3] = bytemuck::cast_slice(particles.as_slice()?); let reconstruction = splashsurf_lib::reconstruct_surface::(particle_positions, ¶meters) .map_err(|e| anyhow!(e))?; - PySurfaceReconstruction::try_from_generic(reconstruction) + PySurfaceReconstruction::try_from_generic(py, reconstruction) } else { Err(utils::pyerr_unsupported_scalar()) } diff --git a/pysplashsurf/src/uniform_grid.rs b/pysplashsurf/src/uniform_grid.rs index 574c156..fb8bd78 100644 --- a/pysplashsurf/src/uniform_grid.rs +++ b/pysplashsurf/src/uniform_grid.rs @@ -1,7 +1,10 @@ -use crate::utils::*; +use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; -use splashsurf_lib::UniformGrid; +use splashsurf_lib::{Real, UniformGrid}; + +use crate::utils; +use crate::utils::{IndexT, enum_wrapper_impl_from}; enum PyUniformGridData { F32(UniformGrid), @@ -20,6 +23,14 @@ enum_wrapper_impl_from!(PyUniformGrid, UniformGrid => PyUniformGrid enum_wrapper_impl_from!(PyUniformGrid, UniformGrid => PyUniformGridData::F64); impl PyUniformGrid { + pub(crate) fn try_from_generic(mut grid: UniformGrid) -> PyResult { + utils::transmute_replace_into::<_, UniformGrid, _>(&mut grid, UniformGrid::new_zero()) + .or_else(|| { + utils::transmute_replace_into::<_, UniformGrid, _>(&mut grid, UniformGrid::new_zero()) + }) + .ok_or_else(|| PyTypeError::new_err("unsupported type of grid, only i64 for Index and f32 and f64 for Real type are supported")) + } + pub(crate) fn as_f32(&self) -> Option<&UniformGrid> { match &self.inner { PyUniformGridData::F32(grid) => Some(grid), diff --git a/pysplashsurf/src/utils.rs b/pysplashsurf/src/utils.rs index 72428ce..095c359 100644 --- a/pysplashsurf/src/utils.rs +++ b/pysplashsurf/src/utils.rs @@ -54,6 +54,32 @@ macro_rules! enum_impl_from { pub(crate) use enum_impl_from; pub(crate) use enum_wrapper_impl_from; +pub enum PyFloatVecWrapper { + F32(Vec), + F64(Vec), +} + +enum_impl_from!(PyFloatVecWrapper, Vec => PyFloatVecWrapper::F32); +enum_impl_from!(PyFloatVecWrapper, Vec => PyFloatVecWrapper::F64); + +impl PyFloatVecWrapper { + pub fn try_from_generic(mut vec: Vec) -> PyResult { + transmute_same_take::, Vec>(&mut vec) + .map(PyFloatVecWrapper::F32) + .or_else(|| { + transmute_same_take::, Vec>(&mut vec).map(PyFloatVecWrapper::F64) + }) + .ok_or_else(pyerr_unsupported_scalar) + } + + pub fn view<'py>(&self, container: Bound<'py, PyAny>) -> PyResult> { + match self { + PyFloatVecWrapper::F32(v) => view_scalar_generic(v, container), + PyFloatVecWrapper::F64(v) => view_scalar_generic(v, container), + } + } +} + /// Transmutes a mutable reference from a generic type to a concrete type if they are identical, otherwise returns None pub(crate) fn transmute_same_mut( value: &mut GenericSrc, @@ -84,6 +110,19 @@ pub(crate) fn transmute_take_into< .map(|value_ref| std::mem::take(value_ref).into()) } +/// Transmutes from a generic type to a concrete type if they are identical, replaces the value and converts it into the target type +pub(crate) fn transmute_replace_into< + GenericSrc: 'static, + ConcreteSrc: Into + 'static, + Target, +>( + value: &mut GenericSrc, + replacement: ConcreteSrc, +) -> Option { + transmute_same_mut::(value) + .map(|value_ref| std::mem::replace(value_ref, replacement).into()) +} + pub(crate) fn view_generic<'py, R: Element>( values: &[R], shape: &[usize], From 88c114d13cf9d9a84d5a343af37872108e648466 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Mon, 1 Sep 2025 14:51:19 +0200 Subject: [PATCH 37/63] Py: Update pyo3 crates, bump minimum Python version to 3.10 --- Cargo.lock | 71 +++++++++++++++++++------------------ pysplashsurf/Cargo.toml | 12 +++---- pysplashsurf/README.md | 6 +++- pysplashsurf/pyproject.toml | 5 +-- 4 files changed, 49 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56a91ce..93d1efd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -176,9 +176,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "camino" -version = "1.1.11" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d07aa9a93b00c76f71bc35d598bed923f6d4f3a9ca5c24b7737ae1a292841c0" +checksum = "dd0b03af37dad7a14518b7691d81acb0f8222604ad3d1b02f6b4bed5188c0cd5" dependencies = [ "serde", ] @@ -213,10 +213,11 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.34" +version = "1.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42bc4aea80032b7bf409b0bc7ccad88853858911b7713a8062fdc0623867bedc" +checksum = "590f9024a68a8c40351881787f1934dc11afd69090f5edb6831464694d836ea3" dependencies = [ + "find-msvc-tools", "shlex", ] @@ -471,6 +472,12 @@ dependencies = [ "log", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e178e4fba8a2726903f6ba98a6d221e76f9c12c650d5dc0e6afdc50677b49650" + [[package]] name = "flate2" version = "1.1.2" @@ -1019,9 +1026,9 @@ dependencies = [ [[package]] name = "numpy" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f1dee9aa8d3f6f8e8b9af3803006101bb3653866ef056d530d53ae68587191" +checksum = "9b2dba356160b54f5371b550575b78130a54718b4c6e46b3f33a6da74a27e78b" dependencies = [ "libc", "ndarray", @@ -1199,9 +1206,9 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8970a78afe0628a3e3430376fc5fd76b6b45c4d43360ffd6cdd40bdde72b682a" +checksum = "7ba0117f4212101ee6544044dae45abe1083d30ce7b29c4b5cbdfa2354e07383" dependencies = [ "anyhow", "indoc", @@ -1217,19 +1224,18 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458eb0c55e7ece017adeba38f2248ff3ac615e53660d7c71a238d7d2a01c7598" +checksum = "4fc6ddaf24947d12a9aa31ac65431fb1b851b8f4365426e182901eabfb87df5f" dependencies = [ - "once_cell", "target-lexicon", ] [[package]] name = "pyo3-ffi" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7114fe5457c61b276ab77c5055f206295b812608083644a5c5b2640c3102565c" +checksum = "025474d3928738efb38ac36d4744a74a400c901c7596199e20e45d98eb194105" dependencies = [ "libc", "pyo3-build-config", @@ -1237,9 +1243,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8725c0a622b374d6cb051d11a0983786448f7785336139c3c94f5aa6bef7e50" +checksum = "2e64eb489f22fe1c95911b77c44cc41e7c19f3082fc81cce90f657cdc42ffded" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -1249,9 +1255,9 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4109984c22491085343c05b0dbc54ddc405c3cf7b4374fc533f5c3313a572ccc" +checksum = "100246c0ecf400b475341b8455a9213344569af29a3c841d29270e53102e0fcf" dependencies = [ "heck", "proc-macro2", @@ -1262,9 +1268,9 @@ dependencies = [ [[package]] name = "pyo3-stub-gen" -version = "0.12.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "650d9624b551894664cc95867ccfe4fd814a5a225c8fe3a75194a3ae51caae1d" +checksum = "b93cd67bcfbf726f81cd5d5f2cc85a69e089b4eaa11bb41a6514ad1783fb9355" dependencies = [ "anyhow", "chrono", @@ -1284,9 +1290,9 @@ dependencies = [ [[package]] name = "pyo3-stub-gen-derive" -version = "0.12.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73947c71903f0e3e31a302a350567594063c75ac155031e40519721429898649" +checksum = "3f2933be64abedb32a666273e843a1c949e957c18071cf52d543daf4adb2b4e9" dependencies = [ "heck", "proc-macro2", @@ -1481,9 +1487,9 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "safe_arch" -version = "0.9.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb5032219cc30e5bb98749b19a18ceb2cf15e24ba8d517a7e64dff4f1f1eca5" +checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" dependencies = [ "bytemuck", ] @@ -1879,11 +1885,11 @@ dependencies = [ [[package]] name = "wasi" -version = "0.14.2+wasi-0.2.4" +version = "0.14.3+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] @@ -1966,9 +1972,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.34" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a0ab08c041f0cbb00a12fd091b2877dcec2311f90f87a88391d4b0961ffb4fe" +checksum = "0ce5da8ecb62bcd8ec8b7ea19f69a51275e91299be594ea5cc6ef7819e16cd03" dependencies = [ "bytemuck", "safe_arch", @@ -2190,13 +2196,10 @@ dependencies = [ ] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-bindgen" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags", -] +checksum = "052283831dbae3d879dc7f51f3d92703a316ca49f91540417d38591826127814" [[package]] name = "xz2" diff --git a/pysplashsurf/Cargo.toml b/pysplashsurf/Cargo.toml index 5dc8b28..59c847b 100644 --- a/pysplashsurf/Cargo.toml +++ b/pysplashsurf/Cargo.toml @@ -8,18 +8,18 @@ license.workspace = true [dependencies] splashsurf = { path = "../splashsurf" } splashsurf_lib = { path = "../splashsurf_lib" } -pyo3 = { version = "0.25.0", features = ["anyhow"] } -numpy = "0.25.0" +pyo3 = { version = "0.26", features = ["anyhow"] } +numpy = "0.26" ndarray = "0.16.1" -bytemuck = { version = "1.23.0", features = ["extern_crate_alloc"] } +bytemuck = { version = "1.23", features = ["extern_crate_alloc"] } anyhow = "1.0.98" -pyo3-stub-gen = "0.12.2" +pyo3-stub-gen = "0.13" [build-dependencies] -pyo3-build-config = "0.25.1" +pyo3-build-config = { version = "0.26" , features = ["resolve-config"] } [features] -extension-module = ["pyo3/extension-module", "pyo3/abi3-py37"] +extension-module = ["pyo3/extension-module", "pyo3/abi3-py310"] default = ["extension-module"] [lib] diff --git a/pysplashsurf/README.md b/pysplashsurf/README.md index e2e6215..6fe29ea 100644 --- a/pysplashsurf/README.md +++ b/pysplashsurf/README.md @@ -2,11 +2,15 @@ ![splashsurf logo](https://raw.githubusercontent.com/InteractiveComputerGraphics/splashsurf/main/logos/logo_small.svg "splashsurf") +![PyPI - Version](https://img.shields.io/pypi/v/pysplashsurf) +![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pysplashsurf) + + pySplashsurf provides Python bindings for `splashsurf`, an open source surface reconstruction library for particle data from SPH simulations. Detailed information on the surface reconstruction and library itself and its API can be found on the [project website (splashsurf.physics-simulation.org)](https://splashsurf.physics-simulation.org/) or the [main repository](https://github.com/InteractiveComputerGraphics/splashsurf). ## Installation -Requires Python version 3.7+ +Requires Python version 3.10+ ``` pip install pysplashsurf ``` diff --git a/pysplashsurf/pyproject.toml b/pysplashsurf/pyproject.toml index 0a206ba..63ee144 100644 --- a/pysplashsurf/pyproject.toml +++ b/pysplashsurf/pyproject.toml @@ -13,15 +13,12 @@ authors = [ {name = "Interactive Computer Graphics"}, {name = "Fabian Löschner"}, ] -requires-python = ">=3.7" +requires-python = ">=3.10" classifiers = [ "Programming Language :: Rust", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", From 8cf7fee3adca703082f828440594ff416553cd5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Mon, 1 Sep 2025 15:13:26 +0200 Subject: [PATCH 38/63] Py: Update AABB --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 7 +++++++ pysplashsurf/src/aabb.rs | 9 ++++++++- splashsurf_lib/src/aabb.rs | 4 ++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 80c2576..3d07b28 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -8,6 +8,9 @@ import typing from enum import Enum class Aabb3d: + r""" + Three-dimensional axis-aligned bounding box defined by its minimum and maximum corners + """ @property def min(self) -> numpy.typing.NDArray[numpy.float64]: r""" @@ -28,6 +31,10 @@ class Aabb3d: r""" Constructs the smallest AABB fitting around all the given points """ + def contains_point(self, point:typing.Sequence[builtins.float]) -> builtins.bool: + r""" + Checks if the given point is inside the AABB, the AABB is considered to be half-open to its max coordinate + """ class MeshAttribute: @property diff --git a/pysplashsurf/src/aabb.rs b/pysplashsurf/src/aabb.rs index 0ad6070..e9b2fc1 100644 --- a/pysplashsurf/src/aabb.rs +++ b/pysplashsurf/src/aabb.rs @@ -7,6 +7,7 @@ use splashsurf_lib::{Aabb3d, Real, nalgebra::Vector3}; use crate::utils::*; +/// Three-dimensional axis-aligned bounding box defined by its minimum and maximum corners #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "Aabb3d")] @@ -25,6 +26,7 @@ impl From> for PyAabb3d { } impl PyAabb3d { + /// Convert to an [`splashsurf_lib::Aabb3d`] with the given scalar type pub(crate) fn inner(&self) -> Aabb3d { Aabb3d::new( self.min.map(|x| R::from_f64(x).unwrap()), @@ -47,7 +49,6 @@ impl PyAabb3d { /// Constructs an AABB with the given min and max coordinates #[staticmethod] pub fn from_min_max<'py>(min: [f64; 3], max: [f64; 3]) -> Self { - // TODO: Check with numpy arrays as input Self { min: Vector3::from(min), max: Vector3::from(max), @@ -79,4 +80,10 @@ impl PyAabb3d { pub fn max<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { PyArray::from_slice(py, self.max.as_slice()) } + + /// Checks if the given point is inside the AABB, the AABB is considered to be half-open to its max coordinate + pub fn contains_point(&self, point: [f64; 3]) -> bool { + let point = &Vector3::from(point); + point >= &self.min && point < &self.max + } } diff --git a/splashsurf_lib/src/aabb.rs b/splashsurf_lib/src/aabb.rs index 8545b95..8313c80 100644 --- a/splashsurf_lib/src/aabb.rs +++ b/splashsurf_lib/src/aabb.rs @@ -212,12 +212,12 @@ where self.min + (self.extents() / (R::one() + R::one())) } - /// Checks if the given AABB is inside of the AABB, the AABB is considered to be half-open to its max coordinate + /// Checks if the given AABB is inside the AABB, the AABB is considered to be half-open to its max coordinate pub fn contains_aabb(&self, other: &Self) -> bool { self.contains_point(&other.min) || self.contains_point(&other.max) } - /// Checks if the given point is inside of the AABB, the AABB is considered to be half-open to its max coordinate + /// Checks if the given point is inside the AABB, the AABB is considered to be half-open to its max coordinate pub fn contains_point(&self, point: &SVector) -> bool { point >= &self.min && point < &self.max } From 00cb59342d0310a2754986c25128edbd056d6a0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Mon, 1 Sep 2025 16:07:19 +0200 Subject: [PATCH 39/63] Py: Update write_to_file and attribute getters (return dicts) --- pysplashsurf/pysplashsurf/__init__.py | 168 +++++++++++++-------- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 12 +- pysplashsurf/src/mesh.rs | 43 ++++-- 3 files changed, 146 insertions(+), 77 deletions(-) diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index 0101d18..ed1d2c9 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -3,59 +3,67 @@ import numpy as np import sys + def run_pysplashsurf(): run_splashsurf(sys.argv) + def push_point_attribute(self, name: str, data: np.ndarray, real_type): """Add a point attribute to the mesh""" if data.ndim == 2: return self.push_point_attribute_vector_real(name, data) - + elif data.ndim == 1: if data.dtype == np.uint64: return self.push_point_attribute_scalar_u64(name, data) - + elif data.dtype == real_type: return self.push_point_attribute_scalar_real(name, data) - + else: - raise ValueError("Not a valid data type, try explicitly specifying uint64 or float64") - + raise ValueError( + "Not a valid data type, try explicitly specifying uint64 or float64" + ) + else: raise ValueError("Not a valid data array") - + + def push_cell_attribute(self, name: str, data: np.ndarray, real_type): """Add a cell attribute to the mesh""" if data.ndim == 2: return self.push_cell_attribute_vector_real(name, data) - + elif data.ndim == 1: if data.dtype == np.uint64: return self.push_cell_attribute_scalar_u64(name, data) - + elif data.dtype == real_type: return self.push_cell_attribute_scalar_real(name, data) - + else: - raise ValueError("Not a valid data type, try explicitly specifying uint64 or float64") - + raise ValueError( + "Not a valid data type, try explicitly specifying uint64 or float64" + ) + else: raise ValueError("Not a valid data array") -def write_to_file(mesh_with_data, filename, file_format=None, consume_object=False): + +def write_to_file(mesh, filename, file_format=None, consume_object=False): """Write the mesh and its attributes to a file using meshio - + Parameters ---------- - mesh: TriMeshWithDataF64 | TriMeshWithDataF32 | MixedTriQuadMeshWithDataF64 | MixedTriQuadMeshWithDataF32 - Mesh with data object to write - + mesh: TriMesh3d | MixedTriQuadMesh3d | MeshWithData + Mesh object to write + filename: Any File path for the output file - + file_format: str | None File format for the output file, generally also derived from filename - + consume_object: bool Flag for specifying whether the MeshWithData object should be consumed for a faster execution. Only consumes the mesh field. @@ -63,37 +71,56 @@ def write_to_file(mesh_with_data, filename, file_format=None, consume_object=Fal try: import meshio except ImportError: - raise ImportError("meshio is not installed, please install it with with `pip install meshio` to use this function") - - mesh = mesh_with_data.take_mesh() if consume_object else mesh_with_data.mesh + raise ImportError( + "meshio is not installed, please install it with with `pip install meshio` to use this function" + ) - point_data = mesh_with_data.get_point_attributes() - cell_data = mesh_with_data.get_cell_attributes() + point_data = ( + mesh.point_attributes if type(mesh) is pysplashsurf.MeshWithData else {} + ) + cell_data = mesh.cell_attributes if type(mesh) is pysplashsurf.MeshWithData else {} + mesh = mesh.mesh if type(mesh) is pysplashsurf.MeshWithData else mesh - if type(mesh) is pysplashsurf.TriMesh3dF64 or type(mesh) is pysplashsurf.TriMesh3dF32: - verts, tris = mesh.take_vertices_and_triangles() if consume_object else (mesh.vertices, mesh.triangles) - meshio.write_points_cells(filename, verts, [("triangle", tris)], point_data=point_data, cell_data=cell_data, file_format=file_format) - + if type(mesh) is pysplashsurf.TriMesh3d: + meshio.write_points_cells( + filename, + mesh.vertices, + [("triangle", mesh.triangles)], + point_data=point_data, + cell_data=cell_data, + file_format=file_format, + ) + elif type(mesh) is pysplashsurf.MixedTriQuadMesh3d: + cells = [ + ("triangle", mesh.get_triangles()), + ("quad", mesh.get_quads()), + ] + meshio.write_points_cells( + filename, + mesh.vertices, + cells, + point_data=point_data, + cell_data=cell_data, + file_format=file_format, + ) else: - verts, cells = mesh.take_vertices_and_cells() if consume_object else (mesh.vertices, mesh.cells) - cells = [("triangle", list(filter(lambda x: len(x) == 3, cells))), ("quad", list(filter(lambda x: len(x) == 4, cells)))] - meshio.write_points_cells(filename, verts, cells, point_data=point_data, cell_data=cell_data, file_format=file_format) + raise TypeError("unsupported mesh type") def create_mesh_with_data_object(mesh): """Create the corresponding mesh with data object to a mesh object - + Parameters ---------- mesh: TriMesh3dF64 | TriMesh3dF32 | MixedTriQuadMesh3dF64 | MixedTriQuadMesh3dF32 Mesh object to convert - + Returns ------- TriMeshWithDataF64 | TriMeshWithDataF32 | MixedTriQuadMeshWithDataF64 | MixedTriQuadMeshWithDataF32 Mesh with data object """ - + if type(mesh) is TriMesh3dF64: return TriMeshWithDataF64(mesh) elif type(mesh) is TriMesh3dF32: @@ -105,77 +132,98 @@ def create_mesh_with_data_object(mesh): else: raise ValueError("Invalid mesh type") -def create_sph_interpolator_object(particle_positions, particle_densities, particle_rest_mass, compact_support_radius): + +def create_sph_interpolator_object( + particle_positions, particle_densities, particle_rest_mass, compact_support_radius +): """Create the corresponding SPH interpolator object to a set of particle data - + Parameters ---------- particle_positions: np.ndarray 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] - + particle_densities: np.ndarray 1-dimensional array containing all particle densities - + particle_rest_mass: float Rest mass of the particles - + compact_support_radius: float Compact support radius of the SPH kernel - + Returns ------- SphInterpolatorF32 | SphInterpolatorF64 SphInterpolator object """ - - if particle_positions.dtype == 'float32': - return SphInterpolatorF32(particle_positions, particle_densities, particle_rest_mass, compact_support_radius) - elif particle_positions.dtype == 'float64': - return SphInterpolatorF64(particle_positions, particle_densities, particle_rest_mass, compact_support_radius) + + if particle_positions.dtype == "float32": + return SphInterpolatorF32( + particle_positions, + particle_densities, + particle_rest_mass, + compact_support_radius, + ) + elif particle_positions.dtype == "float64": + return SphInterpolatorF64( + particle_positions, + particle_densities, + particle_rest_mass, + compact_support_radius, + ) else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particle_positions)") + raise ValueError( + "Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particle_positions)" + ) + def create_aabb_object(aabb_min, aabb_max): """Create the corresponding AABB object to a set of min and max values - + Parameters ---------- aabb_min: np.ndarray Smallest corner of the axis-aligned bounding box - + aabb_max: np.ndarray Largest corner of the axis-aligned bounding box - + Returns ------- Aabb3dF32 | Aabb3dF64 Aabb object """ - - if aabb_min.dtype == 'float32': - return Aabb3dF32(aabb_min, aabb_max) - elif aabb_min.dtype == 'float64': + + if aabb_min.dtype == "float32": + return Aabb3dF32(aabb_min, aabb_max) + elif aabb_min.dtype == "float64": return Aabb3dF64(aabb_min, aabb_max) else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for aabb_min and aabb_max)") + raise ValueError( + "Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for aabb_min and aabb_max)" + ) + def create_aabb_object_from_points(points): """Create the corresponding AABB object to a set of points - + Parameters ---------- points: np.ndarray 2-dimensional array containing all point positions [[ax, ay, az], [bx, by, bz], ...] - + Returns ------- Aabb3dF32 | Aabb3dF64 Aabb object """ - - if points.dtype == 'float32': - return Aabb3dF32.from_points(points) - elif points.dtype == 'float64': + + if points.dtype == "float32": + return Aabb3dF32.from_points(points) + elif points.dtype == "float64": return Aabb3dF64.from_points(points) else: - raise ValueError("Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)") + raise ValueError( + "Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)" + ) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 3d07b28..32df11e 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -62,12 +62,18 @@ class MeshWithData: @property def mesh_type(self) -> MeshType: r""" - Returns the type of the underlying mesh + Type of the underlying mesh """ @property - def point_attributes(self) -> typing.List[MeshAttribute]: ... + def point_attributes(self) -> dict[str, numpy.typing.NDArray]: + r""" + The attributes attached points (vertices) of the mesh + """ @property - def cell_attributes(self) -> typing.List[MeshAttribute]: ... + def cell_attributes(self) -> dict[str, numpy.typing.NDArray]: + r""" + The attributes attached to the cells (triangles or quads) of the mesh + """ @property def mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 97cb555..64de97e 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -3,8 +3,9 @@ use ndarray::Array2; use numpy as np; use numpy::prelude::*; use numpy::{Element, PyArray, PyArray2, PyArrayDescr, PyUntypedArray}; +use pyo3::IntoPyObjectExt; use pyo3::prelude::*; -use pyo3::{IntoPyObjectExt, types::PyList}; +use pyo3::types::{IntoPyDict, PyDict}; use pyo3_stub_gen::derive::*; use splashsurf_lib::mesh::TriangleCell; use splashsurf_lib::{ @@ -499,7 +500,7 @@ impl PyMeshWithData { } } - /// Returns the type of the underlying mesh + /// Type of the underlying mesh #[getter] pub fn mesh_type(&self) -> MeshType { match &self.mesh { @@ -508,22 +509,36 @@ impl PyMeshWithData { } } + /// The attributes attached points (vertices) of the mesh #[getter] - #[gen_stub(override_return_type(type_repr="typing.List[MeshAttribute]", imports=()))] - pub fn point_attributes<'py>(&self, py: Python<'py>) -> PyResult> { - PyList::new( - py, - self.point_attributes.iter().map(|attr| attr.clone_ref(py)), - ) + #[gen_stub(override_return_type(type_repr="dict[str, numpy.typing.NDArray]", imports=()))] + pub fn point_attributes<'py>(&self, py: Python<'py>) -> PyResult> { + self.point_attributes + .iter() + .map(|attr| -> PyResult<_> { + let attr = attr.clone_ref(py).into_bound(py); + let name = attr.try_borrow()?.name(); + let data = PyMeshAttribute::data(attr)?; + Ok((name, data)) + }) + .collect::, _>>()? + .into_py_dict(py) } + /// The attributes attached to the cells (triangles or quads) of the mesh #[getter] - #[gen_stub(override_return_type(type_repr="typing.List[MeshAttribute]", imports=()))] - pub fn cell_attributes<'py>(&self, py: Python<'py>) -> PyResult> { - PyList::new( - py, - self.cell_attributes.iter().map(|attr| attr.clone_ref(py)), - ) + #[gen_stub(override_return_type(type_repr="dict[str, numpy.typing.NDArray]", imports=()))] + pub fn cell_attributes<'py>(&self, py: Python<'py>) -> PyResult> { + self.cell_attributes + .iter() + .map(|attr| -> PyResult<_> { + let attr = attr.clone_ref(py).into_bound(py); + let name = attr.try_borrow()?.name(); + let data = PyMeshAttribute::data(attr)?; + Ok((name, data)) + }) + .collect::, _>>()? + .into_py_dict(py) } pub fn as_tri3d<'py, 'a>(&'a self, py: Python<'py>) -> Option> { From 1df6d6580b6cd9c7cc04d18189aeaa215abdbff2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Mon, 1 Sep 2025 16:35:37 +0200 Subject: [PATCH 40/63] Py: Add annotations import to stub file --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 3 +- pysplashsurf/src/bin/stub_gen.rs | 36 +++++++++++++++++++++- pysplashsurf/src/lib.rs | 1 - 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 32df11e..437c48f 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -1,6 +1,8 @@ # This file is automatically generated by pyo3_stub_gen # ruff: noqa: E501, F401 +from __future__ import annotations + import builtins import numpy import numpy.typing @@ -281,4 +283,3 @@ def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attri """ def triangulate_density_map(values:numpy.typing.NDArray[typing.Any], grid:UniformGrid, *, iso_surface_threshold:builtins.float) -> TriMesh3d: ... - diff --git a/pysplashsurf/src/bin/stub_gen.rs b/pysplashsurf/src/bin/stub_gen.rs index befaaa8..0d1f945 100644 --- a/pysplashsurf/src/bin/stub_gen.rs +++ b/pysplashsurf/src/bin/stub_gen.rs @@ -1,10 +1,44 @@ // Run `cargo run --bin stub_gen` to generate a stub file for the extension + use pyo3_stub_gen::Result; +use std::fs; + +fn add_future_imports() -> Result<()> { + // Read the original file + let content = fs::read_to_string("pysplashsurf.pyi")?; + let lines: Vec<&str> = content.lines().collect(); + + // Find where to insert the import (after comments) + let mut insert_index = 0; + for (i, line) in lines.iter().enumerate() { + if !line.trim().starts_with('#') && !line.trim().is_empty() { + insert_index = i; + break; + } + } + + // Create new content with the import added + let mut new_lines = Vec::new(); + new_lines.extend_from_slice(&lines[..insert_index]); + new_lines.push("from __future__ import annotations"); + if insert_index < lines.len() && !lines[insert_index].is_empty() { + new_lines.push(""); + } + new_lines.extend_from_slice(&lines[insert_index..]); + + // Write the modified content back to the file + fs::write("pysplashsurf.pyi", new_lines.join("\n"))?; + + Ok(()) +} fn main() -> Result<()> { // `stub_info` is a function defined by `define_stub_info_gatherer!` macro. let stub = pysplashsurf::stub_info()?; stub.generate()?; - std::fs::rename("pysplashsurf.pyi", "pysplashsurf/pysplashsurf.pyi")?; + + add_future_imports()?; + + fs::rename("pysplashsurf.pyi", "pysplashsurf/pysplashsurf.pyi")?; Ok(()) } diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 44a9b6a..af413cd 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -1,7 +1,6 @@ use pyo3::prelude::*; use pyo3::types::{PyList, PyString}; use pyo3_stub_gen::define_stub_info_gatherer; - use splashsurf::cli; #[cfg(target_pointer_width = "32")] From c6264ae7db1f6c406e1be13c0cc1100db38e4120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Mon, 1 Sep 2025 17:09:11 +0200 Subject: [PATCH 41/63] Py: Update doc strings --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 4 +-- pysplashsurf/src/mesh.rs | 30 +++++++++++----------- 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 437c48f..2a1e418 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -81,8 +81,6 @@ class MeshWithData: r""" The contained mesh without associated data and attributes """ - def as_tri3d(self) -> typing.Optional[TriMesh3d]: ... - def as_mixed_tri_quad3d(self) -> typing.Optional[MixedTriQuadMesh3d]: ... def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" Returns a copy of the contained mesh without associated data and attributes @@ -105,7 +103,7 @@ class MixedTriQuadMesh3d: """ def get_quads(self) -> numpy.typing.NDArray[numpy.uint64]: r""" - Returns a copy of all quad cells of the mesh as an `Nx3` array of vertex indices + Returns a copy of all quad cells of the mesh as an `Nx4` array of vertex indices """ class NeighborhoodLists: diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 64de97e..0015fa6 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -264,7 +264,7 @@ impl PyMixedTriQuadMesh3d { }) } - /// Returns a copy of all quad cells of the mesh as an `Nx3` array of vertex indices + /// Returns a copy of all quad cells of the mesh as an `Nx4` array of vertex indices pub fn get_quads<'py>(&self, py: Python<'py>) -> PyResult>> { let cells = match &self.inner { PyMixedTriQuadMesh3dData::F32(mesh) => mesh.cells.as_slice(), @@ -486,6 +486,20 @@ impl PyMeshWithData { Ok(mesh_with_data) } + + pub fn as_tri3d<'py, 'a>(&'a self, py: Python<'py>) -> Option> { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => Some(mesh.clone_ref(py)), + _ => None, + } + } + + pub fn as_mixed_tri_quad3d<'py>(&self, py: Python<'py>) -> Option> { + match &self.mesh { + PyMesh3dData::MixedTriQuad3d(mesh) => Some(mesh.clone_ref(py)), + _ => None, + } + } } #[gen_stub_pymethods] @@ -541,20 +555,6 @@ impl PyMeshWithData { .into_py_dict(py) } - pub fn as_tri3d<'py, 'a>(&'a self, py: Python<'py>) -> Option> { - match &self.mesh { - PyMesh3dData::Tri3d(mesh) => Some(mesh.clone_ref(py)), - _ => None, - } - } - - pub fn as_mixed_tri_quad3d<'py>(&self, py: Python<'py>) -> Option> { - match &self.mesh { - PyMesh3dData::MixedTriQuad3d(mesh) => Some(mesh.clone_ref(py)), - _ => None, - } - } - /// The contained mesh without associated data and attributes #[getter] #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] From 8707d349690d587472c7c44555134b3f0b326626 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Mon, 1 Sep 2025 17:09:26 +0200 Subject: [PATCH 42/63] Py: Update sphinx templates and doc strings --- .../pysplashsurf/docs/requirements.txt | 1 + pysplashsurf/pysplashsurf/docs/source/api.rst | 22 ++++---- .../pysplashsurf/docs/source/classes.rst | 53 +++++++++++-------- pysplashsurf/pysplashsurf/docs/source/conf.py | 27 ++++++++-- .../source/{methods.rst => functions.rst} | 8 +-- .../pysplashsurf/docs/source/index.md | 4 +- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 18 +++++++ pysplashsurf/src/mesh.rs | 3 ++ pysplashsurf/src/neighborhood_search.rs | 1 + pysplashsurf/src/reconstruction.rs | 1 + pysplashsurf/src/sph_interpolation.rs | 1 + 11 files changed, 99 insertions(+), 40 deletions(-) rename pysplashsurf/pysplashsurf/docs/source/{methods.rst => functions.rst} (82%) diff --git a/pysplashsurf/pysplashsurf/docs/requirements.txt b/pysplashsurf/pysplashsurf/docs/requirements.txt index 379f483..4ed2155 100644 --- a/pysplashsurf/pysplashsurf/docs/requirements.txt +++ b/pysplashsurf/pysplashsurf/docs/requirements.txt @@ -5,4 +5,5 @@ meshio==5.3.5 sphinx_rtd_theme==3.0.1 numpydoc==1.8.0 myst-parser==4.0.1 +sphinx-autodoc-typehints==3.2.0 pysplashsurf diff --git a/pysplashsurf/pysplashsurf/docs/source/api.rst b/pysplashsurf/pysplashsurf/docs/source/api.rst index e8b95fa..8a8b73f 100644 --- a/pysplashsurf/pysplashsurf/docs/source/api.rst +++ b/pysplashsurf/pysplashsurf/docs/source/api.rst @@ -3,8 +3,8 @@ API .. currentmodule:: pysplashsurf -Methods -------- +Functions +--------- .. autosummary:: check_mesh_consistency @@ -20,17 +20,19 @@ Methods par_laplacian_smoothing_normals_inplace reconstruct_surface reconstruction_pipeline + reconstruction_pipeline_multi write_to_file Classes ------- .. autosummary:: - Aabb3dF32 - MixedTriQuadMesh3dF32 - MixedTriQuadMeshWithDataF32 - SphInterpolatorF32 - SurfaceReconstructionF32 - TriMesh3dF32 - TriMeshWithDataF32 - UniformGridF32 \ No newline at end of file + Aabb3d + MixedTriQuadMesh3d + NeighborhoodLists + SphInterpolator + SurfaceReconstruction + TriMesh3d + TriMeshWithData + UniformGrid + VertexVertexConnectivity \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/classes.rst b/pysplashsurf/pysplashsurf/docs/source/classes.rst index 8060dcf..f2def96 100644 --- a/pysplashsurf/pysplashsurf/docs/source/classes.rst +++ b/pysplashsurf/pysplashsurf/docs/source/classes.rst @@ -1,42 +1,53 @@ Classes ======= -Additionally to the classes on this page, there exists a F64 version for every class which is otherwise identical to the F32 version. - -For more information on the classes, refer to the `Rust documentation `_ of splashsurf_lib. +For more information on the classes, refer to the `Rust documentation `_ of ``splashsurf_lib``. .. currentmodule:: pysplashsurf -.. autoclass:: Aabb3dF32 +.. autoclass:: SphInterpolator + :members: - See `Aabb3d `_ for more information. + See `SphInterpolator `_ for more information. -.. autoclass:: MixedTriQuadMesh3dF32 +Mesh types +---------- - See `MixedTriQuadMesh3d `_ for more information. +.. autoclass:: TriMesh3d + :members: -.. autoclass:: MixedTriQuadMeshWithDataF32 - :exclude-members: push_point_attribute_scalar_u64, push_point_attribute_scalar_real, push_point_attribute_vector_real, push_cell_attribute_scalar_real, push_cell_attribute_scalar_u64, push_cell_attribute_vector_real + See `TriMesh3d `_ for more information. - See `MeshWithData `_ for more information. +.. autoclass:: MixedTriQuadMesh3d + :members: -.. autoclass:: SphInterpolatorF32 + See `MixedTriQuadMesh3d `_ for more information. - See `SphInterpolator `_ for more information. +.. autoclass:: MeshWithData + :members: -.. autoclass:: SurfaceReconstructionF32 + See `MeshWithData `_ for more information. - See `SurfaceReconstruction `_ for more information. +Helper and return types +----------------------- -.. autoclass:: TriMesh3dF32 +.. autoclass:: Aabb3d + :members: + + See `Aabb3d `_ for more information. - See `TriMesh3d `_ for more information. +.. autoclass:: NeighborhoodLists + :members: -.. autoclass:: TriMeshWithDataF32 - :exclude-members: push_point_attribute_scalar_u64, push_point_attribute_scalar_real, push_point_attribute_vector_real, push_cell_attribute_scalar_real, push_cell_attribute_scalar_u64, push_cell_attribute_vector_real +.. autoclass:: SurfaceReconstruction + :members: - See `MeshWithData `_ for more information. + See `SurfaceReconstruction `_ for more information. + +.. autoclass:: UniformGrid + :members: -.. autoclass:: UniformGridF32 + See `UniformGrid `_ for more information. - See `UniformGrid `_ for more information. \ No newline at end of file +.. autoclass:: VertexVertexConnectivity + :members: \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/conf.py b/pysplashsurf/pysplashsurf/docs/source/conf.py index fe315e1..7d3e0b7 100644 --- a/pysplashsurf/pysplashsurf/docs/source/conf.py +++ b/pysplashsurf/pysplashsurf/docs/source/conf.py @@ -10,10 +10,19 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) -import pysplashsurf +#import pysplashsurf + +import os +import sys +# #sys.path.append(os.path.abspath('..')) + +from sphinx.ext.autodoc.importer import import_module +rootpath = '/Users/floeschner/programming/splashsurf/pysplashsurf/pysplashsurf' +sys_path = list(sys.path) +sys.path.insert(0, str(rootpath)) +pysplashsurf = import_module('pysplashsurf') + +#import pysplashsurf # -- Project information ----------------------------------------------------- @@ -33,6 +42,7 @@ 'numpydoc', 'myst_parser', 'sphinx_rtd_theme', + 'sphinx_autodoc_typehints' ] source_suffix = ['.rst', '.md'] @@ -59,3 +69,12 @@ # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] + +autodoc_typehints = "both" + +always_document_param_types = True +always_use_bars_union = True +typehints_document_rtype = False +typehints_use_rtype = False +typehints_use_signature = True +typehints_use_signature_return = True diff --git a/pysplashsurf/pysplashsurf/docs/source/methods.rst b/pysplashsurf/pysplashsurf/docs/source/functions.rst similarity index 82% rename from pysplashsurf/pysplashsurf/docs/source/methods.rst rename to pysplashsurf/pysplashsurf/docs/source/functions.rst index 420f6a0..0c12f2a 100644 --- a/pysplashsurf/pysplashsurf/docs/source/methods.rst +++ b/pysplashsurf/pysplashsurf/docs/source/functions.rst @@ -1,7 +1,7 @@ -Methods -======= +Functions +========= -All methods infer float precision based on the input (32bit or 64bit). +All functions infer float precision based on the input (32bit or 64bit). .. currentmodule:: pysplashsurf @@ -31,4 +31,6 @@ All methods infer float precision based on the input (32bit or 64bit). .. autofunction:: reconstruction_pipeline +.. autofunction:: reconstruction_pipeline_multi + .. autofunction:: write_to_file \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/docs/source/index.md b/pysplashsurf/pysplashsurf/docs/source/index.md index f24350a..a0f6ece 100644 --- a/pysplashsurf/pysplashsurf/docs/source/index.md +++ b/pysplashsurf/pysplashsurf/docs/source/index.md @@ -3,7 +3,7 @@ ```{toctree} :caption: Table of Contents -methods -classes api +functions +classes ``` \ No newline at end of file diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 2a1e418..e1097ee 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -56,6 +56,9 @@ class MeshAttribute: """ class MeshWithData: + r""" + Mesh with attached point and cell attributes + """ @property def dtype(self) -> numpy.dtype: r""" @@ -87,6 +90,9 @@ class MeshWithData: """ class MixedTriQuadMesh3d: + r""" + Mixed triangle and quad surface mesh in 3D + """ @property def dtype(self) -> numpy.dtype: r""" @@ -107,6 +113,9 @@ class MixedTriQuadMesh3d: """ class NeighborhoodLists: + r""" + Per particle neighborhood lists + """ def __len__(self) -> builtins.int: r""" Returns the number of particles for which neighborhood lists are stored @@ -121,6 +130,9 @@ class NeighborhoodLists: """ class SphInterpolator: + r""" + Interpolator of per-particle quantities to arbitrary points using SPH (cubic kernel) interpolation + """ def __new__(cls, particle_positions:numpy.typing.NDArray[typing.Any], particle_densities:numpy.typing.NDArray[typing.Any], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolator: r""" Constructs an SPH interpolator (with cubic kernels) for the given particles @@ -135,6 +147,9 @@ class SphInterpolator: """ class SurfaceReconstruction: + r""" + Result returned by surface reconstruction functions with surface mesh and other data + """ @property def grid(self) -> UniformGrid: r""" @@ -162,6 +177,9 @@ class SurfaceReconstruction: """ class TriMesh3d: + r""" + Triangle surface mesh in 3D + """ @property def dtype(self) -> numpy.dtype: r""" diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 0015fa6..f37c0d9 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -92,6 +92,7 @@ enum PyTriMesh3dData { F64(TriMesh3d), } +/// Triangle surface mesh in 3D #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "TriMesh3d")] @@ -204,6 +205,7 @@ enum PyMixedTriQuadMesh3dData { F64(MixedTriQuadMesh3d), } +/// Mixed triangle and quad surface mesh in 3D #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "MixedTriQuadMesh3d")] @@ -380,6 +382,7 @@ impl PyMeshAttribute { } } +/// Mesh with attached point and cell attributes #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "MeshWithData")] diff --git a/pysplashsurf/src/neighborhood_search.rs b/pysplashsurf/src/neighborhood_search.rs index 34585a8..1f08cdf 100644 --- a/pysplashsurf/src/neighborhood_search.rs +++ b/pysplashsurf/src/neighborhood_search.rs @@ -12,6 +12,7 @@ use crate::utils::*; // TODO: Bindings for flat neighborhood search // TODO: Bindings for computing particle densities +/// Per particle neighborhood lists #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "NeighborhoodLists")] diff --git a/pysplashsurf/src/reconstruction.rs b/pysplashsurf/src/reconstruction.rs index da964d3..fcb4d7c 100644 --- a/pysplashsurf/src/reconstruction.rs +++ b/pysplashsurf/src/reconstruction.rs @@ -15,6 +15,7 @@ use splashsurf_lib::{ }; use utils::{IndexT, PyFloatVecWrapper}; +/// Result returned by surface reconstruction functions with surface mesh and other data #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "SurfaceReconstruction")] diff --git a/pysplashsurf/src/sph_interpolation.rs b/pysplashsurf/src/sph_interpolation.rs index 74aa58c..685c936 100644 --- a/pysplashsurf/src/sph_interpolation.rs +++ b/pysplashsurf/src/sph_interpolation.rs @@ -19,6 +19,7 @@ enum PySphInterpolatorWrapper { F64(SphInterpolator), } +/// Interpolator of per-particle quantities to arbitrary points using SPH (cubic kernel) interpolation #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "SphInterpolator")] From 9e76d760afdf2dc71dcde5af70fa40cd5a4fc298 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 09:53:26 +0200 Subject: [PATCH 43/63] Py: Update documentation --- pysplashsurf/pysplashsurf/docs/source/api.rst | 13 ++++++++++--- pysplashsurf/pysplashsurf/docs/source/functions.rst | 2 +- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 8 ++++---- pysplashsurf/src/mesh.rs | 6 +++--- pysplashsurf/src/sph_interpolation.rs | 2 +- 5 files changed, 19 insertions(+), 12 deletions(-) diff --git a/pysplashsurf/pysplashsurf/docs/source/api.rst b/pysplashsurf/pysplashsurf/docs/source/api.rst index 8a8b73f..64e0152 100644 --- a/pysplashsurf/pysplashsurf/docs/source/api.rst +++ b/pysplashsurf/pysplashsurf/docs/source/api.rst @@ -1,8 +1,15 @@ -API -=== +API Overview +============ .. currentmodule:: pysplashsurf +The main functionality of ``pysplashsurf`` is provided by the :py:func:`reconstruction_pipeline` function which implements all features of the ``splashsurf`` CLI including the surface reconstruction from particles and optional post-processing, and the :py:func:`reconstruct_surface` function which only performs the surface reconstruction itself. + +**Data types:** The functions of the package accept Python ``float`` for scalar parameters and Numpy arrays of data-type ``np.float32`` or ``np.float64`` for array inputs (e.g. particle positions). +Outputs will be of the same float precision as the input arrays. +Array-like inputs have to be contiguous (C-order) in memory. +All array-like and object type (e.g. :py:class:`Aabb3d`) inputs to a function call have to use the same float data-type. + Functions --------- @@ -20,7 +27,6 @@ Functions par_laplacian_smoothing_normals_inplace reconstruct_surface reconstruction_pipeline - reconstruction_pipeline_multi write_to_file Classes @@ -28,6 +34,7 @@ Classes .. autosummary:: Aabb3d + MeshWithData MixedTriQuadMesh3d NeighborhoodLists SphInterpolator diff --git a/pysplashsurf/pysplashsurf/docs/source/functions.rst b/pysplashsurf/pysplashsurf/docs/source/functions.rst index 0c12f2a..766268c 100644 --- a/pysplashsurf/pysplashsurf/docs/source/functions.rst +++ b/pysplashsurf/pysplashsurf/docs/source/functions.rst @@ -1,7 +1,7 @@ Functions ========= -All functions infer float precision based on the input (32bit or 64bit). +All functions infer float precision based on the input (``np.float32`` or ``np.float64``). .. currentmodule:: pysplashsurf diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index e1097ee..7de39a1 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -62,7 +62,7 @@ class MeshWithData: @property def dtype(self) -> numpy.dtype: r""" - Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) """ @property def mesh_type(self) -> MeshType: @@ -96,7 +96,7 @@ class MixedTriQuadMesh3d: @property def dtype(self) -> numpy.dtype: r""" - Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) """ @property def vertices(self) -> numpy.typing.NDArray[typing.Any]: @@ -131,7 +131,7 @@ class NeighborhoodLists: class SphInterpolator: r""" - Interpolator of per-particle quantities to arbitrary points using SPH (cubic kernel) interpolation + Interpolator of per-particle quantities to arbitrary points using SPH interpolation (with cubic kernel) """ def __new__(cls, particle_positions:numpy.typing.NDArray[typing.Any], particle_densities:numpy.typing.NDArray[typing.Any], particle_rest_mass:builtins.float, compact_support_radius:builtins.float) -> SphInterpolator: r""" @@ -183,7 +183,7 @@ class TriMesh3d: @property def dtype(self) -> numpy.dtype: r""" - Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) """ @property def vertices(self) -> numpy.typing.NDArray[typing.Any]: diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index f37c0d9..0076fa5 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -151,7 +151,7 @@ impl PyTriMesh3d { #[gen_stub_pymethods] #[pymethods] impl PyTriMesh3d { - /// Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + /// Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { match &self.inner { @@ -228,7 +228,7 @@ impl PyMixedTriQuadMesh3d { #[gen_stub_pymethods] #[pymethods] impl PyMixedTriQuadMesh3d { - /// Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + /// Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { match &self.inner { @@ -508,7 +508,7 @@ impl PyMeshWithData { #[gen_stub_pymethods] #[pymethods] impl PyMeshWithData { - /// Numpy dtype of the underlying scalar type (either `np.float32` or `np.float64`) + /// Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { match &self.mesh { diff --git a/pysplashsurf/src/sph_interpolation.rs b/pysplashsurf/src/sph_interpolation.rs index 685c936..841c4bc 100644 --- a/pysplashsurf/src/sph_interpolation.rs +++ b/pysplashsurf/src/sph_interpolation.rs @@ -19,7 +19,7 @@ enum PySphInterpolatorWrapper { F64(SphInterpolator), } -/// Interpolator of per-particle quantities to arbitrary points using SPH (cubic kernel) interpolation +/// Interpolator of per-particle quantities to arbitrary points using SPH interpolation (with cubic kernel) #[gen_stub_pyclass] #[pyclass] #[pyo3(name = "SphInterpolator")] From 7d3af942de881b566fd7f49da2a76716bcf361fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 10:50:26 +0200 Subject: [PATCH 44/63] Py: Move mesh writing to rust code --- pysplashsurf/README.md | 7 +- pysplashsurf/pysplashsurf/__init__.py | 222 --------------------- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 22 +- pysplashsurf/src/mesh.rs | 104 +++++++++- 4 files changed, 115 insertions(+), 240 deletions(-) diff --git a/pysplashsurf/README.md b/pysplashsurf/README.md index 6fe29ea..59ceba6 100644 --- a/pysplashsurf/README.md +++ b/pysplashsurf/README.md @@ -45,9 +45,10 @@ import meshio import numpy as np import pysplashsurf +# Load particles from mesh file mesh = meshio.read("input.vtk") particles = np.array(mesh.points, dtype=np.float64) - +# Reconstruct the points/particles with some post-processing mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( particles, particle_radius=0.025, @@ -65,8 +66,8 @@ mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( subdomain_num_cubes_per_dim=64, output_mesh_smoothing_weights=True ) - -pysplashsurf.write_to_file(mesh_with_data, "output.vtk") +# Write the mesh with attributes to a file using meshio +mesh_with_data.write_to_file("surface.vtk") ``` The `reconstruction_pipeline` method provides (mostly) the same arguments as the splashsurf binary CLI. It may be necessary to specify the `dtype` of a function input (as done for `particles` in the example) so that the bindings know what data type to use internally. diff --git a/pysplashsurf/pysplashsurf/__init__.py b/pysplashsurf/pysplashsurf/__init__.py index ed1d2c9..e4fdf45 100644 --- a/pysplashsurf/pysplashsurf/__init__.py +++ b/pysplashsurf/pysplashsurf/__init__.py @@ -1,229 +1,7 @@ from .pysplashsurf import * from . import bgeo -import numpy as np import sys def run_pysplashsurf(): run_splashsurf(sys.argv) - - -def push_point_attribute(self, name: str, data: np.ndarray, real_type): - """Add a point attribute to the mesh""" - if data.ndim == 2: - return self.push_point_attribute_vector_real(name, data) - - elif data.ndim == 1: - if data.dtype == np.uint64: - return self.push_point_attribute_scalar_u64(name, data) - - elif data.dtype == real_type: - return self.push_point_attribute_scalar_real(name, data) - - else: - raise ValueError( - "Not a valid data type, try explicitly specifying uint64 or float64" - ) - - else: - raise ValueError("Not a valid data array") - - -def push_cell_attribute(self, name: str, data: np.ndarray, real_type): - """Add a cell attribute to the mesh""" - if data.ndim == 2: - return self.push_cell_attribute_vector_real(name, data) - - elif data.ndim == 1: - if data.dtype == np.uint64: - return self.push_cell_attribute_scalar_u64(name, data) - - elif data.dtype == real_type: - return self.push_cell_attribute_scalar_real(name, data) - - else: - raise ValueError( - "Not a valid data type, try explicitly specifying uint64 or float64" - ) - - else: - raise ValueError("Not a valid data array") - - -def write_to_file(mesh, filename, file_format=None, consume_object=False): - """Write the mesh and its attributes to a file using meshio - - Parameters - ---------- - mesh: TriMesh3d | MixedTriQuadMesh3d | MeshWithData - Mesh object to write - - filename: Any - File path for the output file - - file_format: str | None - File format for the output file, generally also derived from filename - - consume_object: bool - Flag for specifying whether the MeshWithData object should be consumed for a faster execution. - Only consumes the mesh field. - """ - try: - import meshio - except ImportError: - raise ImportError( - "meshio is not installed, please install it with with `pip install meshio` to use this function" - ) - - point_data = ( - mesh.point_attributes if type(mesh) is pysplashsurf.MeshWithData else {} - ) - cell_data = mesh.cell_attributes if type(mesh) is pysplashsurf.MeshWithData else {} - mesh = mesh.mesh if type(mesh) is pysplashsurf.MeshWithData else mesh - - if type(mesh) is pysplashsurf.TriMesh3d: - meshio.write_points_cells( - filename, - mesh.vertices, - [("triangle", mesh.triangles)], - point_data=point_data, - cell_data=cell_data, - file_format=file_format, - ) - elif type(mesh) is pysplashsurf.MixedTriQuadMesh3d: - cells = [ - ("triangle", mesh.get_triangles()), - ("quad", mesh.get_quads()), - ] - meshio.write_points_cells( - filename, - mesh.vertices, - cells, - point_data=point_data, - cell_data=cell_data, - file_format=file_format, - ) - else: - raise TypeError("unsupported mesh type") - - -def create_mesh_with_data_object(mesh): - """Create the corresponding mesh with data object to a mesh object - - Parameters - ---------- - mesh: TriMesh3dF64 | TriMesh3dF32 | MixedTriQuadMesh3dF64 | MixedTriQuadMesh3dF32 - Mesh object to convert - - Returns - ------- - TriMeshWithDataF64 | TriMeshWithDataF32 | MixedTriQuadMeshWithDataF64 | MixedTriQuadMeshWithDataF32 - Mesh with data object - """ - - if type(mesh) is TriMesh3dF64: - return TriMeshWithDataF64(mesh) - elif type(mesh) is TriMesh3dF32: - return TriMeshWithDataF32(mesh) - elif type(mesh) is MixedTriQuadMesh3dF64: - return MixedTriQuadMeshWithDataF64(mesh) - elif type(mesh) is MixedTriQuadMesh3dF32: - return MixedTriQuadMeshWithDataF32(mesh) - else: - raise ValueError("Invalid mesh type") - - -def create_sph_interpolator_object( - particle_positions, particle_densities, particle_rest_mass, compact_support_radius -): - """Create the corresponding SPH interpolator object to a set of particle data - - Parameters - ---------- - particle_positions: np.ndarray - 2-dimensional array containing all particle positions [[ax, ay, az], [bx, by, bz], ...] - - particle_densities: np.ndarray - 1-dimensional array containing all particle densities - - particle_rest_mass: float - Rest mass of the particles - - compact_support_radius: float - Compact support radius of the SPH kernel - - Returns - ------- - SphInterpolatorF32 | SphInterpolatorF64 - SphInterpolator object - """ - - if particle_positions.dtype == "float32": - return SphInterpolatorF32( - particle_positions, - particle_densities, - particle_rest_mass, - compact_support_radius, - ) - elif particle_positions.dtype == "float64": - return SphInterpolatorF64( - particle_positions, - particle_densities, - particle_rest_mass, - compact_support_radius, - ) - else: - raise ValueError( - "Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for particle_positions)" - ) - - -def create_aabb_object(aabb_min, aabb_max): - """Create the corresponding AABB object to a set of min and max values - - Parameters - ---------- - aabb_min: np.ndarray - Smallest corner of the axis-aligned bounding box - - aabb_max: np.ndarray - Largest corner of the axis-aligned bounding box - - Returns - ------- - Aabb3dF32 | Aabb3dF64 - Aabb object - """ - - if aabb_min.dtype == "float32": - return Aabb3dF32(aabb_min, aabb_max) - elif aabb_min.dtype == "float64": - return Aabb3dF64(aabb_min, aabb_max) - else: - raise ValueError( - "Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for aabb_min and aabb_max)" - ) - - -def create_aabb_object_from_points(points): - """Create the corresponding AABB object to a set of points - - Parameters - ---------- - points: np.ndarray - 2-dimensional array containing all point positions [[ax, ay, az], [bx, by, bz], ...] - - Returns - ------- - Aabb3dF32 | Aabb3dF64 - Aabb object - """ - - if points.dtype == "float32": - return Aabb3dF32.from_points(points) - elif points.dtype == "float64": - return Aabb3dF64.from_points(points) - else: - raise ValueError( - "Invalid data type (only float32 and float64 are supported, consider explicitly specifying the dtype for points)" - ) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 7de39a1..6565445 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -70,6 +70,11 @@ class MeshWithData: Type of the underlying mesh """ @property + def mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: + r""" + The contained mesh without associated data and attributes + """ + @property def point_attributes(self) -> dict[str, numpy.typing.NDArray]: r""" The attributes attached points (vertices) of the mesh @@ -79,15 +84,14 @@ class MeshWithData: r""" The attributes attached to the cells (triangles or quads) of the mesh """ - @property - def mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: - r""" - The contained mesh without associated data and attributes - """ def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" Returns a copy of the contained mesh without associated data and attributes """ + def write_to_file(self, path:builtins.str, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: + r""" + Writes the mesh and its attributes to a file using `meshio.write_points_cells` + """ class MixedTriQuadMesh3d: r""" @@ -111,6 +115,10 @@ class MixedTriQuadMesh3d: r""" Returns a copy of all quad cells of the mesh as an `Nx4` array of vertex indices """ + def write_to_file(self, path:builtins.str, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: + r""" + Writes the mesh to a file using `meshio.write_points_cells` + """ class NeighborhoodLists: r""" @@ -203,6 +211,10 @@ class TriMesh3d: r""" Computes the vertex-vertex connectivity of the mesh """ + def write_to_file(self, path:builtins.str, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: + r""" + Writes the mesh to a file using `meshio.write_points_cells` + """ class UniformGrid: r""" diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 0076fa5..3718559 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -197,6 +197,19 @@ impl PyTriMesh3d { }; PyVertexVertexConnectivity::new(connectivity) } + + /// Writes the mesh to a file using `meshio.write_points_cells` + #[pyo3(signature = (path, *, file_format = Some("vtk42")))] + pub fn write_to_file<'py>( + this: Bound<'py, Self>, + path: &str, + file_format: Option<&str>, + ) -> PyResult<()> { + let py = this.py(); + let mesh = + PyMeshWithData::try_from_pymesh(py, this.unbind().clone_ref(py))?.into_pyobject(py)?; + PyMeshWithData::write_to_file(mesh, path, file_format) + } } #[derive(Clone)] @@ -278,6 +291,19 @@ impl PyMixedTriQuadMesh3d { _ => None, }) } + + /// Writes the mesh to a file using `meshio.write_points_cells` + #[pyo3(signature = (path, *, file_format = Some("vtk42")))] + pub fn write_to_file<'py>( + this: Bound<'py, Self>, + path: &str, + file_format: Option<&str>, + ) -> PyResult<()> { + let py = this.py(); + let mesh = + PyMeshWithData::try_from_pymesh(py, this.unbind().clone_ref(py))?.into_pyobject(py)?; + PyMeshWithData::write_to_file(mesh, path, file_format) + } } pub fn filter_cells<'py, C, const N: usize, F>( @@ -526,6 +552,16 @@ impl PyMeshWithData { } } + /// The contained mesh without associated data and attributes + #[getter] + #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] + pub fn mesh<'py>(&self, py: Python<'py>) -> Py { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => mesh.clone_ref(py).into_any(), + PyMesh3dData::MixedTriQuad3d(mesh) => mesh.clone_ref(py).into_any(), + } + } + /// The attributes attached points (vertices) of the mesh #[getter] #[gen_stub(override_return_type(type_repr="dict[str, numpy.typing.NDArray]", imports=()))] @@ -558,16 +594,6 @@ impl PyMeshWithData { .into_py_dict(py) } - /// The contained mesh without associated data and attributes - #[getter] - #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] - pub fn mesh<'py>(&self, py: Python<'py>) -> Py { - match &self.mesh { - PyMesh3dData::Tri3d(mesh) => mesh.clone_ref(py).into_any(), - PyMesh3dData::MixedTriQuad3d(mesh) => mesh.clone_ref(py).into_any(), - } - } - /// Returns a copy of the contained mesh without associated data and attributes #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] pub fn copy_mesh<'py>(&self, py: Python<'py>) -> PyResult> { @@ -576,4 +602,62 @@ impl PyMeshWithData { PyMesh3dData::MixedTriQuad3d(mesh) => mesh.borrow(py).clone().into_bound_py_any(py), } } + + /// Writes the mesh and its attributes to a file using `meshio.write_points_cells` + #[pyo3(signature = (path, *, file_format = Some("vtk42")))] + pub fn write_to_file<'py>( + this: Bound<'py, Self>, + path: &str, + file_format: Option<&str>, + ) -> PyResult<()> { + let py = this.py(); + let meshio = PyModule::import(py, "meshio")?; + let write_points_cells = meshio.getattr("write_points_cells")?; + + let this = this.borrow(); + + let filename = path.into_py_any(py)?; + let points = match &this.mesh { + PyMesh3dData::Tri3d(mesh) => PyTriMesh3d::vertices(mesh.clone_ref(py).into_bound(py))?, + PyMesh3dData::MixedTriQuad3d(mesh) => { + PyMixedTriQuadMesh3d::vertices(mesh.clone_ref(py).into_bound(py))? + } + } + .into_py_any(py)?; + let cells = match &this.mesh { + PyMesh3dData::Tri3d(mesh) => { + let triangles = PyTriMesh3d::triangles(mesh.clone_ref(py).into_bound(py))?; + let dict = [("triangle", triangles)].into_py_dict(py)?; + dict.into_py_any(py)? + } + PyMesh3dData::MixedTriQuad3d(mesh) => { + let triangles = mesh.borrow(py).get_triangles(py)?; + let quads = mesh.borrow(py).get_quads(py)?; + let dict = [("triangle", triangles), ("quad", quads)].into_py_dict(py)?; + dict.into_py_any(py)? + } + }; + let point_data = this.point_attributes(py)?.into_py_any(py)?; + let cell_data = this.cell_attributes(py)?.into_py_any(py)?; + let field_data = py.None(); + let point_sets = py.None(); + let cell_sets = py.None(); + let file_format = file_format.into_py_any(py)?; + + let args_vec: Vec<(&str, Py)> = vec![ + ("filename", filename), + ("points", points), + ("cells", cells), + ("point_data", point_data), + ("cell_data", cell_data), + ("field_data", field_data), + ("point_sets", point_sets), + ("cell_sets", cell_sets), + ("file_format", file_format), + ]; + let args = args_vec.into_py_dict(py)?; + + let _ = write_points_cells.call((), Some(&args))?; + Ok(()) + } } From a51b5aefd1a3f46d9f5458686835ff88ddd6391b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 13:00:02 +0200 Subject: [PATCH 45/63] Py: Add attributes to MeshWithData --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 32 +++++ pysplashsurf/src/mesh.rs | 139 ++++++++++++++++++++- 2 files changed, 169 insertions(+), 2 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 6565445..cb2e9f3 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -65,6 +65,16 @@ class MeshWithData: Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) """ @property + def nvertices(self) -> builtins.int: + r""" + Number of vertices in the mesh + """ + @property + def ncells(self) -> builtins.int: + r""" + Number of cells (triangles or quads) in the mesh + """ + @property def mesh_type(self) -> MeshType: r""" Type of the underlying mesh @@ -88,6 +98,28 @@ class MeshWithData: r""" Returns a copy of the contained mesh without associated data and attributes """ + def add_point_attribute(self, name:builtins.str, attribute:numpy.typing.NDArray[typing.Any]) -> None: + r""" + Attaches a point attribute to the mesh + + There has to be exactly one attribute value per vertex in the mesh. + As attribute data, the following numpy array types are supported: + - 1D array with shape (N,) of `np.uint64` + - 1D array with shape (N,) of the mesh scalar type (`np.float32` or `np.float64`) + - 2D array with shape (N,3) of the mesh scalar type (`np.float32` or `np.float64`) + The data is copied into the mesh object. + """ + def add_cell_attribute(self, name:builtins.str, attribute:numpy.typing.NDArray[typing.Any]) -> None: + r""" + Attaches a cell attribute to the mesh + + There has to be exactly one attribute value per cell in the mesh. + As attribute data, the following numpy array types are supported: + - 1D array with shape (N,) of `np.uint64` + - 1D array with shape (N,) of the mesh scalar type (`np.float32` or `np.float64`) + - 2D array with shape (N,3) of the mesh scalar type (`np.float32` or `np.float64`) + The data is copied into the mesh object. + """ def write_to_file(self, path:builtins.str, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" Writes the mesh and its attributes to a file using `meshio.write_points_cells` diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 3718559..10ec18f 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -2,10 +2,11 @@ use bytemuck::{NoUninit, Pod}; use ndarray::Array2; use numpy as np; use numpy::prelude::*; -use numpy::{Element, PyArray, PyArray2, PyArrayDescr, PyUntypedArray}; +use numpy::{Element, PyArray, PyArray1, PyArray2, PyArrayDescr, PyUntypedArray}; use pyo3::IntoPyObjectExt; +use pyo3::exceptions::{PyTypeError, PyValueError}; use pyo3::prelude::*; -use pyo3::types::{IntoPyDict, PyDict}; +use pyo3::types::{IntoPyDict, PyDict, PyTuple}; use pyo3_stub_gen::derive::*; use splashsurf_lib::mesh::TriangleCell; use splashsurf_lib::{ @@ -16,6 +17,7 @@ use splashsurf_lib::{ }, nalgebra::{Unit, Vector3}, }; +use std::ops::Deref; use crate::NumpyUsize; use crate::utils; @@ -360,6 +362,37 @@ pub struct PyMeshAttribute { enum_wrapper_impl_from!(PyMeshAttribute, OwnedMeshAttribute => PyMeshAttributeData::F32); enum_wrapper_impl_from!(PyMeshAttribute, OwnedMeshAttribute => PyMeshAttributeData::F64); +impl PyMeshAttribute { + pub fn try_from_generic<'py, R: Real + Element>( + name: String, + data: Bound<'py, PyUntypedArray>, + ) -> PyResult + where + PyMeshAttribute: From>, + { + let data = if let Ok(data) = data.downcast::>() { + OwnedAttributeData::ScalarU64(data.try_readonly()?.as_array().to_vec().into()) + } else if let Ok(data) = data.downcast::>() { + OwnedAttributeData::ScalarReal(data.try_readonly()?.as_array().to_vec().into()) + } else if let Ok(data) = data.downcast::>() { + let data_vec = data.try_readonly()?.as_slice()?.to_vec(); + if data.shape()[1] == 1 { + OwnedAttributeData::ScalarReal(bytemuck::cast_vec(data_vec).into()) + } else if data.shape()[1] == 3 { + OwnedAttributeData::Vector3Real(bytemuck::cast_vec(data_vec).into()) + } else { + return Err(PyValueError::new_err( + "expected Nx1 or Nx3 array for Vector3Real attribute data", + )); + } + } else { + return Err(PyTypeError::new_err("unsupported attribute data type")); + }; + + Ok(Self::from(OwnedMeshAttribute { name, data })) + } +} + #[gen_stub_pymethods] #[pymethods] impl PyMeshAttribute { @@ -543,6 +576,36 @@ impl PyMeshWithData { } } + /// Number of vertices in the mesh + #[getter] + pub fn nvertices<'py>(&self, py: Python<'py>) -> usize { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => match &mesh.borrow(py).deref().inner { + PyTriMesh3dData::F32(mesh) => mesh.vertices.len(), + PyTriMesh3dData::F64(mesh) => mesh.vertices.len(), + }, + PyMesh3dData::MixedTriQuad3d(mesh) => match &mesh.borrow(py).deref().inner { + PyMixedTriQuadMesh3dData::F32(mesh) => mesh.vertices.len(), + PyMixedTriQuadMesh3dData::F64(mesh) => mesh.vertices.len(), + }, + } + } + + /// Number of cells (triangles or quads) in the mesh + #[getter] + pub fn ncells<'py>(&self, py: Python<'py>) -> usize { + match &self.mesh { + PyMesh3dData::Tri3d(mesh) => match &mesh.borrow(py).deref().inner { + PyTriMesh3dData::F32(mesh) => mesh.triangles.len(), + PyTriMesh3dData::F64(mesh) => mesh.triangles.len(), + }, + PyMesh3dData::MixedTriQuad3d(mesh) => match &mesh.borrow(py).deref().inner { + PyMixedTriQuadMesh3dData::F32(mesh) => mesh.cells.len(), + PyMixedTriQuadMesh3dData::F64(mesh) => mesh.cells.len(), + }, + } + } + /// Type of the underlying mesh #[getter] pub fn mesh_type(&self) -> MeshType { @@ -603,6 +666,78 @@ impl PyMeshWithData { } } + /// Attaches a point attribute to the mesh + /// + /// There has to be exactly one attribute value per vertex in the mesh. + /// As attribute data, the following numpy array types are supported: + /// - 1D array with shape (N,) of `np.uint64` + /// - 1D array with shape (N,) of the mesh scalar type (`np.float32` or `np.float64`) + /// - 2D array with shape (N,3) of the mesh scalar type (`np.float32` or `np.float64`) + /// The data is copied into the mesh object. + pub fn add_point_attribute<'py>( + &mut self, + py: Python<'py>, + name: String, + attribute: Bound<'py, PyUntypedArray>, + ) -> PyResult<()> { + assert_eq!( + attribute.shape()[0], + self.nvertices(py), + "number of attribute values must match number of vertices in the mesh" + ); + + let dtype = self.dtype(py); + let attribute = if dtype.is_equiv_to(&np::dtype::(py)) { + PyMeshAttribute::try_from_generic::(name, attribute)? + } else if dtype.is_equiv_to(&np::dtype::(py)) { + PyMeshAttribute::try_from_generic::(name, attribute)? + } else { + return Err(PyTypeError::new_err( + "unsupported dtype for mesh vertices (expected float32 or float64)", + )); + }; + + self.point_attributes + .push(attribute.into_pyobject(py)?.unbind()); + Ok(()) + } + + /// Attaches a cell attribute to the mesh + /// + /// There has to be exactly one attribute value per cell in the mesh. + /// As attribute data, the following numpy array types are supported: + /// - 1D array with shape (N,) of `np.uint64` + /// - 1D array with shape (N,) of the mesh scalar type (`np.float32` or `np.float64`) + /// - 2D array with shape (N,3) of the mesh scalar type (`np.float32` or `np.float64`) + /// The data is copied into the mesh object. + pub fn add_cell_attribute<'py>( + &mut self, + py: Python<'py>, + name: String, + attribute: Bound<'py, PyUntypedArray>, + ) -> PyResult<()> { + assert_eq!( + attribute.shape()[0], + self.ncells(py), + "number of attribute values must match number of cells in the mesh" + ); + + let dtype = self.dtype(py); + let attribute = if dtype.is_equiv_to(&np::dtype::(py)) { + PyMeshAttribute::try_from_generic::(name, attribute)? + } else if dtype.is_equiv_to(&np::dtype::(py)) { + PyMeshAttribute::try_from_generic::(name, attribute)? + } else { + return Err(PyTypeError::new_err( + "unsupported dtype for mesh vertices (expected float32 or float64)", + )); + }; + + self.cell_attributes + .push(attribute.into_pyobject(py)?.unbind()); + Ok(()) + } + /// Writes the mesh and its attributes to a file using `meshio.write_points_cells` #[pyo3(signature = (path, *, file_format = Some("vtk42")))] pub fn write_to_file<'py>( From c3ee74fd23b6ab54ec5361705725128999b0777c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 13:21:20 +0200 Subject: [PATCH 46/63] Py: Mesh copy and Mesh to MeshWithData constructor --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 16 ++++++ pysplashsurf/src/mesh.rs | 57 ++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index cb2e9f3..9fc3f0b 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -94,10 +94,18 @@ class MeshWithData: r""" The attributes attached to the cells (triangles or quads) of the mesh """ + def __new__(cls, mesh:typing.Union[TriMesh3d, MeshWithData]) -> MeshWithData: + r""" + Wraps an existing mesh object (either `TriMesh3d` or `MixedTriQuadMesh3d`) such that data (point and cell attributes) can be attached to it + """ def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" Returns a copy of the contained mesh without associated data and attributes """ + def copy(self) -> MeshWithData: + r""" + Returns a copy (deep copy) of this mesh with its data and attributes + """ def add_point_attribute(self, name:builtins.str, attribute:numpy.typing.NDArray[typing.Any]) -> None: r""" Attaches a point attribute to the mesh @@ -139,6 +147,10 @@ class MixedTriQuadMesh3d: r""" The `Nx3` array of vertex positions of the mesh """ + def copy(self) -> MixedTriQuadMesh3d: + r""" + Returns a copy (deep copy) of this mesh + """ def get_triangles(self) -> numpy.typing.NDArray[numpy.uint64]: r""" Returns a copy of all triangle cells of the mesh as an `Nx3` array of vertex indices @@ -235,6 +247,10 @@ class TriMesh3d: r""" The `Mx3` array of vertex indices per triangle """ + def copy(self) -> TriMesh3d: + r""" + Returns a copy (deep copy) of this mesh + """ def vertex_normals_parallel(self) -> numpy.typing.NDArray[typing.Any]: r""" Computes the vertex normals of the mesh using an area weighted average of the adjacent triangle faces diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 10ec18f..0be7b95 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -180,6 +180,11 @@ impl PyTriMesh3d { } } + /// Returns a copy (deep copy) of this mesh + pub fn copy(&self) -> Self { + self.clone() + } + /// Computes the vertex normals of the mesh using an area weighted average of the adjacent triangle faces pub fn vertex_normals_parallel<'py>( &self, @@ -265,6 +270,11 @@ impl PyMixedTriQuadMesh3d { } } + /// Returns a copy (deep copy) of this mesh + pub fn copy(&self) -> Self { + self.clone() + } + /// Returns a copy of all triangle cells of the mesh as an `Nx3` array of vertex indices pub fn get_triangles<'py>( &self, @@ -567,6 +577,25 @@ impl PyMeshWithData { #[gen_stub_pymethods] #[pymethods] impl PyMeshWithData { + /// Wraps an existing mesh object (either `TriMesh3d` or `MixedTriQuadMesh3d`) such that data (point and cell attributes) can be attached to it + #[new] + fn py_new<'py>( + #[gen_stub(override_type(type_repr="typing.Union[TriMesh3d, MeshWithData]", imports=()))] + mesh: Bound<'py, PyAny>, + ) -> PyResult { + if mesh.is_instance_of::() { + let mesh = mesh.downcast_into::()?; + PyMeshWithData::try_from_pymesh(mesh.py(), mesh.unbind()) + } else if mesh.is_instance_of::() { + let mesh = mesh.downcast_into::()?; + PyMeshWithData::try_from_pymesh(mesh.py(), mesh.unbind()) + } else { + Err(PyTypeError::new_err( + "unsupported mesh type, expected TriMesh3d or MixedTriQuadMesh3d", + )) + } + } + /// Numpy dtype of the underlying scalar type (either ``np.float32`` or ``np.float64``) #[getter] pub fn dtype<'py>(&self, py: Python<'py>) -> Bound<'py, PyArrayDescr> { @@ -666,6 +695,34 @@ impl PyMeshWithData { } } + /// Returns a copy (deep copy) of this mesh with its data and attributes + pub fn copy<'py>(&self, py: Python<'py>) -> PyResult { + Ok(Self { + mesh: match &self.mesh { + PyMesh3dData::Tri3d(mesh) => { + PyMesh3dData::from(mesh.borrow(py).clone().into_pyobject(py)?.unbind()) + } + PyMesh3dData::MixedTriQuad3d(mesh) => { + PyMesh3dData::from(mesh.borrow(py).clone().into_pyobject(py)?.unbind()) + } + }, + point_attributes: self + .point_attributes + .iter() + .map(|attr| -> PyResult> { + Ok(attr.borrow(py).clone().into_pyobject(py)?.unbind()) + }) + .collect::, _>>()?, + cell_attributes: self + .cell_attributes + .iter() + .map(|attr| -> PyResult> { + Ok(attr.borrow(py).clone().into_pyobject(py)?.unbind()) + }) + .collect::, _>>()?, + }) + } + /// Attaches a point attribute to the mesh /// /// There has to be exactly one attribute value per vertex in the mesh. From ea9f7ded2999ef3e4e8d95e18122bb7720376949 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 13:58:32 +0200 Subject: [PATCH 47/63] Py: Fix tests, fix bugs --- pysplashsurf/README.md | 4 +- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 20 +- pysplashsurf/src/aabb.rs | 2 +- pysplashsurf/src/mesh.rs | 28 +- pysplashsurf/tests/main.rs | 7 - pysplashsurf/tests/test_calling.py | 324 +++++++++++++-------- 6 files changed, 229 insertions(+), 156 deletions(-) delete mode 100644 pysplashsurf/tests/main.rs diff --git a/pysplashsurf/README.md b/pysplashsurf/README.md index 59ceba6..b5fb153 100644 --- a/pysplashsurf/README.md +++ b/pysplashsurf/README.md @@ -48,6 +48,7 @@ import pysplashsurf # Load particles from mesh file mesh = meshio.read("input.vtk") particles = np.array(mesh.points, dtype=np.float64) + # Reconstruct the points/particles with some post-processing mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( particles, @@ -66,7 +67,8 @@ mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( subdomain_num_cubes_per_dim=64, output_mesh_smoothing_weights=True ) -# Write the mesh with attributes to a file using meshio + +# Write the mesh with attributes to file using meshio mesh_with_data.write_to_file("surface.vtk") ``` The `reconstruction_pipeline` method provides (mostly) the same arguments as the splashsurf binary CLI. diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 9fc3f0b..631057a 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -6,6 +6,8 @@ from __future__ import annotations import builtins import numpy import numpy.typing +import os +import pathlib import typing from enum import Enum @@ -112,9 +114,9 @@ class MeshWithData: There has to be exactly one attribute value per vertex in the mesh. As attribute data, the following numpy array types are supported: - - 1D array with shape (N,) of `np.uint64` - - 1D array with shape (N,) of the mesh scalar type (`np.float32` or `np.float64`) - - 2D array with shape (N,3) of the mesh scalar type (`np.float32` or `np.float64`) + - 1D array with shape (N,) of ``np.uint64`` + - 1D array with shape (N,) of the mesh scalar type (``np.float32`` or ``np.float64``) + - 2D array with shape (N,3) of the mesh scalar type (``np.float32`` or ``np.float64``) The data is copied into the mesh object. """ def add_cell_attribute(self, name:builtins.str, attribute:numpy.typing.NDArray[typing.Any]) -> None: @@ -123,12 +125,12 @@ class MeshWithData: There has to be exactly one attribute value per cell in the mesh. As attribute data, the following numpy array types are supported: - - 1D array with shape (N,) of `np.uint64` - - 1D array with shape (N,) of the mesh scalar type (`np.float32` or `np.float64`) - - 2D array with shape (N,3) of the mesh scalar type (`np.float32` or `np.float64`) + - 1D array with shape (N,) of ``np.uint64`` + - 1D array with shape (N,) of the mesh scalar type (``np.float32`` or ``np.float64``) + - 2D array with shape (N,3) of the mesh scalar type (``np.float32`` or ``np.float64``) The data is copied into the mesh object. """ - def write_to_file(self, path:builtins.str, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: + def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" Writes the mesh and its attributes to a file using `meshio.write_points_cells` """ @@ -159,7 +161,7 @@ class MixedTriQuadMesh3d: r""" Returns a copy of all quad cells of the mesh as an `Nx4` array of vertex indices """ - def write_to_file(self, path:builtins.str, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: + def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" Writes the mesh to a file using `meshio.write_points_cells` """ @@ -259,7 +261,7 @@ class TriMesh3d: r""" Computes the vertex-vertex connectivity of the mesh """ - def write_to_file(self, path:builtins.str, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: + def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" Writes the mesh to a file using `meshio.write_points_cells` """ diff --git a/pysplashsurf/src/aabb.rs b/pysplashsurf/src/aabb.rs index e9b2fc1..b650e81 100644 --- a/pysplashsurf/src/aabb.rs +++ b/pysplashsurf/src/aabb.rs @@ -38,7 +38,7 @@ impl PyAabb3d { points: &Bound<'py, PyArray2>, ) -> PyResult { let points = points.try_readonly()?; - let points_vec: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); + let points_vec: &[Vector3] = bytemuck::cast_slice(points.as_slice()?); Ok(Self::from(Aabb3d::par_from_points(points_vec))) } } diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 0be7b95..be16a08 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -1,3 +1,6 @@ +use crate::NumpyUsize; +use crate::utils; +use crate::utils::{enum_impl_from, enum_wrapper_impl_from}; use bytemuck::{NoUninit, Pod}; use ndarray::Array2; use numpy as np; @@ -6,7 +9,7 @@ use numpy::{Element, PyArray, PyArray1, PyArray2, PyArrayDescr, PyUntypedArray}; use pyo3::IntoPyObjectExt; use pyo3::exceptions::{PyTypeError, PyValueError}; use pyo3::prelude::*; -use pyo3::types::{IntoPyDict, PyDict, PyTuple}; +use pyo3::types::{IntoPyDict, PyDict}; use pyo3_stub_gen::derive::*; use splashsurf_lib::mesh::TriangleCell; use splashsurf_lib::{ @@ -18,10 +21,7 @@ use splashsurf_lib::{ nalgebra::{Unit, Vector3}, }; use std::ops::Deref; - -use crate::NumpyUsize; -use crate::utils; -use crate::utils::{enum_impl_from, enum_wrapper_impl_from}; +use std::path::PathBuf; fn view_triangles_generic<'py>( triangles: &[TriangleCell], @@ -209,7 +209,7 @@ impl PyTriMesh3d { #[pyo3(signature = (path, *, file_format = Some("vtk42")))] pub fn write_to_file<'py>( this: Bound<'py, Self>, - path: &str, + path: PathBuf, file_format: Option<&str>, ) -> PyResult<()> { let py = this.py(); @@ -308,7 +308,7 @@ impl PyMixedTriQuadMesh3d { #[pyo3(signature = (path, *, file_format = Some("vtk42")))] pub fn write_to_file<'py>( this: Bound<'py, Self>, - path: &str, + path: PathBuf, file_format: Option<&str>, ) -> PyResult<()> { let py = this.py(); @@ -727,9 +727,9 @@ impl PyMeshWithData { /// /// There has to be exactly one attribute value per vertex in the mesh. /// As attribute data, the following numpy array types are supported: - /// - 1D array with shape (N,) of `np.uint64` - /// - 1D array with shape (N,) of the mesh scalar type (`np.float32` or `np.float64`) - /// - 2D array with shape (N,3) of the mesh scalar type (`np.float32` or `np.float64`) + /// - 1D array with shape (N,) of ``np.uint64`` + /// - 1D array with shape (N,) of the mesh scalar type (``np.float32`` or ``np.float64``) + /// - 2D array with shape (N,3) of the mesh scalar type (``np.float32`` or ``np.float64``) /// The data is copied into the mesh object. pub fn add_point_attribute<'py>( &mut self, @@ -763,9 +763,9 @@ impl PyMeshWithData { /// /// There has to be exactly one attribute value per cell in the mesh. /// As attribute data, the following numpy array types are supported: - /// - 1D array with shape (N,) of `np.uint64` - /// - 1D array with shape (N,) of the mesh scalar type (`np.float32` or `np.float64`) - /// - 2D array with shape (N,3) of the mesh scalar type (`np.float32` or `np.float64`) + /// - 1D array with shape (N,) of ``np.uint64`` + /// - 1D array with shape (N,) of the mesh scalar type (``np.float32`` or ``np.float64``) + /// - 2D array with shape (N,3) of the mesh scalar type (``np.float32`` or ``np.float64``) /// The data is copied into the mesh object. pub fn add_cell_attribute<'py>( &mut self, @@ -799,7 +799,7 @@ impl PyMeshWithData { #[pyo3(signature = (path, *, file_format = Some("vtk42")))] pub fn write_to_file<'py>( this: Bound<'py, Self>, - path: &str, + path: PathBuf, file_format: Option<&str>, ) -> PyResult<()> { let py = this.py(); diff --git a/pysplashsurf/tests/main.rs b/pysplashsurf/tests/main.rs deleted file mode 100644 index 31e1bb2..0000000 --- a/pysplashsurf/tests/main.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[cfg(test)] -mod tests { - #[test] - fn it_works() { - assert_eq!(2 + 2, 4); - } -} diff --git a/pysplashsurf/tests/test_calling.py b/pysplashsurf/tests/test_calling.py index b146c10..916b9ba 100644 --- a/pysplashsurf/tests/test_calling.py +++ b/pysplashsurf/tests/test_calling.py @@ -12,200 +12,276 @@ BGEO_PATH = DIR.joinpath("ParticleData_Fluid_50.bgeo") VTK_PATH = DIR.joinpath("ParticleData_Fluid_5.vtk") + def now_s(): - return time.process_time_ns() / (10 ** 9) + return time.process_time_ns() / (10**9) + def test_bgeo(): particles = np.array(meshio.read(BGEO_PATH).points, dtype=np.float32) - - assert(len(particles) == 4732) + + assert len(particles) == 4732 + def test_aabb_class(): print("\nTesting AABB class") - - aabb = pysplashsurf.Aabb3dF64.par_from_points(np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 0.5, 4.2]])) - - assert(aabb.min() == np.array([0.0, 0.0, 0.0])).all() - assert(aabb.max() == np.array([2.0, 1.0, 4.2])).all() - - aabb.join_with_point([3.0, 2.0, 1.0]) - - assert(aabb.min() == np.array([0.0, 0.0, 0.0])).all() - assert(aabb.max() == np.array([3.0, 2.0, 4.2])).all() - - assert(aabb.contains_point([1.0, 1.0, 4.1])) - assert(aabb.contains_point([0.0, 0.0, 0.0])) - assert(not aabb.contains_point([4.0, 2.0, 1.0])) - assert(not aabb.contains_point([1.0, -1.0, 5.0])) + + aabb = pysplashsurf.Aabb3d.from_min_max(min=[0.0, 0.0, 0.0], max=[1.0, 2.0, 3.0]) + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([1.0, 2.0, 3.0])).all() + + aabb = pysplashsurf.Aabb3d.from_min_max(min=np.array([0.0, 0.0, 0.0]), max=np.array([1.0, 2.0, 3.0])) + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([1.0, 2.0, 3.0])).all() + + aabb = pysplashsurf.Aabb3d.from_points( + np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 0.5, 4.2]]) + ) + + print("AABB min:", aabb.min) + print("AABB max:", aabb.max) + + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([2.0, 1.0, 4.2])).all() + + assert aabb.contains_point([1.0, 0.9, 4.1]) + assert aabb.contains_point([0.0, 0.0, 0.0]) + assert not aabb.contains_point([2.0, 1.0, 4.2]) + assert not aabb.contains_point([1.0, -1.0, 5.0]) + def test_marching_cubes_calls(): print("\nTesting marching cubes calls") - + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) - reconstruction = pysplashsurf.reconstruct_surface(particles, particle_radius=0.025, rest_density=1000.0, - smoothing_length=2.0, cube_size=0.5, - iso_surface_threshold=0.6) + reconstruction = pysplashsurf.reconstruct_surface( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=0.5, + iso_surface_threshold=0.6, + ) mesh = reconstruction.mesh - verts_before = len(mesh.get_vertices()) + verts_before = len(mesh.vertices) print("# of vertices before:", verts_before) - - mesh_with_data = pysplashsurf.create_mesh_with_data_object(mesh) + + mesh_with_data = pysplashsurf.MeshWithData(mesh) pysplashsurf.marching_cubes_cleanup(mesh_with_data, reconstruction.grid) - mesh = mesh_with_data.take_mesh() - verts_after = len(mesh.get_vertices()) + mesh = mesh_with_data.mesh + verts_after = len(mesh.vertices) print("# of vertices after:", verts_after) - assert(verts_after < verts_before) - -def test_memory_access(): - print("\nTesting memory copy vs take") - - particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float64) - reconstruction = pysplashsurf.reconstruct_surface(particles, particle_radius=0.025, rest_density=1000.0, - smoothing_length=2.0, cube_size=0.5, - iso_surface_threshold=0.6, - aabb_min=np.array([0.0, 0.0, 0.0]), aabb_max=np.array([2.0, 2.0, 2.0])) - mesh = reconstruction.mesh - - start = now_s() - triangles_copy = mesh.get_triangles() - vertices_copy = mesh.get_vertices() - copy_time = now_s() - start - print("Copy time:", copy_time) - - start = now_s() - vertices, triangles = mesh.take_vertices_and_triangles() - take_time = now_s() - start - print("Take time:", take_time) - - print("Copy time / Take time (Speedup):", copy_time / take_time) - - assert(np.allclose(vertices, vertices_copy)) - assert(np.allclose(triangles, triangles_copy)) - -def reconstruction_pipeline(input_file, output_file, *, attributes_to_interpolate=None, multi_threading=True, particle_radius=0.025, - rest_density=1000.0, smoothing_length=2.0, cube_size=0.5, - iso_surface_threshold=0.6, mesh_smoothing_weights=False, output_mesh_smoothing_weights=False, sph_normals=False, - mesh_smoothing_weights_normalization=13.0, mesh_smoothing_iters=5, normals_smoothing_iters=5, - mesh_aabb_min=None, mesh_aabb_max=None, mesh_cleanup=False, decimate_barnacles=False, keep_vertices=False, - compute_normals=False, output_raw_normals=False, output_raw_mesh=False, mesh_aabb_clamp_vertices=False, - check_mesh_closed=False, check_mesh_manifold=False, check_mesh_orientation=False, check_mesh_debug=False, - generate_quads=False, quad_max_edge_diag_ratio=1.75, quad_max_normal_angle=10.0, quad_max_interior_angle=135.0, - subdomain_grid=False, subdomain_num_cubes_per_dim=64): - + assert verts_after < verts_before + + +def reconstruction_pipeline( + input_file, + output_file, + *, + attributes_to_interpolate=None, + multi_threading=True, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=0.5, + iso_surface_threshold=0.6, + mesh_smoothing_weights=False, + output_mesh_smoothing_weights=False, + sph_normals=False, + mesh_smoothing_weights_normalization=13.0, + mesh_smoothing_iters=5, + normals_smoothing_iters=5, + mesh_aabb_min=None, + mesh_aabb_max=None, + mesh_cleanup=False, + decimate_barnacles=False, + keep_vertices=False, + compute_normals=False, + output_raw_normals=False, + output_raw_mesh=False, + mesh_aabb_clamp_vertices=False, + check_mesh_closed=False, + check_mesh_manifold=False, + check_mesh_orientation=False, + check_mesh_debug=False, + generate_quads=False, + quad_max_edge_diag_ratio=1.75, + quad_max_normal_angle=10.0, + quad_max_interior_angle=135.0, + subdomain_grid=False, + subdomain_num_cubes_per_dim=64, +): mesh = meshio.read(input_file) particles = np.array(mesh.points, dtype=np.float64) if attributes_to_interpolate is None: attributes_to_interpolate = [] - # Prepare attributes dictionary + # Prepare attributes dictionary attrs = {} for attr in attributes_to_interpolate: if attr in mesh.point_data: - if mesh.point_data[attr].dtype.kind == 'f': + if mesh.point_data[attr].dtype.kind == "f": attrs[attr] = mesh.point_data[attr].astype(np.float64) else: attrs[attr] = mesh.point_data[attr].astype(np.int64) - - mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline(particles, attributes_to_interpolate=attrs, multi_threading=multi_threading, particle_radius=particle_radius, - rest_density=rest_density, smoothing_length=smoothing_length, cube_size=cube_size, iso_surface_threshold=iso_surface_threshold, - mesh_smoothing_weights=mesh_smoothing_weights, sph_normals=sph_normals, - mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, - mesh_smoothing_iters=mesh_smoothing_iters, normals_smoothing_iters=normals_smoothing_iters, - mesh_aabb_min=mesh_aabb_min, mesh_aabb_max=mesh_aabb_max, mesh_cleanup=mesh_cleanup, decimate_barnacles=decimate_barnacles, - keep_vertices=keep_vertices, compute_normals=compute_normals, output_raw_normals=output_raw_normals, output_raw_mesh=output_raw_mesh, - mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, subdomain_grid=subdomain_grid, subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, output_mesh_smoothing_weights=output_mesh_smoothing_weights, - check_mesh_closed=check_mesh_closed, check_mesh_manifold=check_mesh_manifold, check_mesh_orientation=check_mesh_orientation, check_mesh_debug=check_mesh_debug, - generate_quads=generate_quads, quad_max_edge_diag_ratio=quad_max_edge_diag_ratio, quad_max_normal_angle=quad_max_normal_angle, quad_max_interior_angle=quad_max_interior_angle) - pysplashsurf.write_to_file(mesh_with_data, output_file, consume_object=True) + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + attributes_to_interpolate=attrs, + multi_threading=multi_threading, + particle_radius=particle_radius, + rest_density=rest_density, + smoothing_length=smoothing_length, + cube_size=cube_size, + iso_surface_threshold=iso_surface_threshold, + mesh_smoothing_weights=mesh_smoothing_weights, + sph_normals=sph_normals, + mesh_smoothing_weights_normalization=mesh_smoothing_weights_normalization, + mesh_smoothing_iters=mesh_smoothing_iters, + normals_smoothing_iters=normals_smoothing_iters, + mesh_aabb_min=mesh_aabb_min, + mesh_aabb_max=mesh_aabb_max, + mesh_cleanup=mesh_cleanup, + decimate_barnacles=decimate_barnacles, + keep_vertices=keep_vertices, + compute_normals=compute_normals, + output_raw_normals=output_raw_normals, + output_raw_mesh=output_raw_mesh, + mesh_aabb_clamp_vertices=mesh_aabb_clamp_vertices, + subdomain_grid=subdomain_grid, + subdomain_num_cubes_per_dim=subdomain_num_cubes_per_dim, + output_mesh_smoothing_weights=output_mesh_smoothing_weights, + check_mesh_closed=check_mesh_closed, + check_mesh_manifold=check_mesh_manifold, + check_mesh_orientation=check_mesh_orientation, + check_mesh_debug=check_mesh_debug, + generate_quads=generate_quads, + quad_max_edge_diag_ratio=quad_max_edge_diag_ratio, + quad_max_normal_angle=quad_max_normal_angle, + quad_max_interior_angle=quad_max_interior_angle, + ) + + mesh_with_data.write_to_file(output_file) def test_no_post_processing(): start = now_s() - subprocess.run([BINARY_PATH] + f"reconstruct {VTK_PATH} -o {DIR.joinpath("test_bin.vtk")} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --mesh-cleanup=off --mesh-smoothing-weights=off --mesh-smoothing-iters=0 --normals=off --normals-smoothing-iters=0".split(), check=True) + subprocess.run( + [BINARY_PATH] + + f"reconstruct {VTK_PATH} -o {DIR.joinpath('test_bin.vtk')} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --mesh-cleanup=off --mesh-smoothing-weights=off --mesh-smoothing-iters=0 --normals=off --normals-smoothing-iters=0".split(), + check=True, + ) print("Binary done in", now_s() - start) - + start = now_s() - reconstruction_pipeline(VTK_PATH, DIR.joinpath("test.vtk"), particle_radius=np.float64(0.025), smoothing_length=np.float64(2.0), - cube_size=np.float64(0.5), iso_surface_threshold=np.float64(0.6), mesh_smoothing_weights=False, - mesh_smoothing_iters=0, normals_smoothing_iters=0, mesh_cleanup=False, compute_normals=False, subdomain_grid=True) + reconstruction_pipeline( + VTK_PATH, + DIR.joinpath("test.vtk"), + particle_radius=np.float64(0.025), + smoothing_length=np.float64(2.0), + cube_size=np.float64(0.5), + iso_surface_threshold=np.float64(0.6), + mesh_smoothing_weights=False, + mesh_smoothing_iters=0, + normals_smoothing_iters=0, + mesh_cleanup=False, + compute_normals=False, + subdomain_grid=True, + ) print("Python done in", now_s() - start) - + binary_mesh = meshio.read(DIR.joinpath("test_bin.vtk")) python_mesh = meshio.read(DIR.joinpath("test.vtk")) - + binary_verts = np.array(binary_mesh.points, dtype=np.float64) python_verts = np.array(python_mesh.points, dtype=np.float64) - + print("# of vertices binary:", len(binary_verts)) print("# of vertices python:", len(python_verts)) - - assert(len(binary_verts) == len(python_verts)) - + + assert len(binary_verts) == len(python_verts) + binary_verts.sort(axis=0) python_verts.sort(axis=0) - - assert(np.allclose(binary_verts, python_verts)) - + + assert np.allclose(binary_verts, python_verts) + + def test_with_post_processing(): start = now_s() - subprocess.run([BINARY_PATH] + f"reconstruct {VTK_PATH} -o {DIR.joinpath("test_bin.vtk")} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --interpolate_attribute velocity --decimate-barnacles=on --mesh-cleanup=on --mesh-smoothing-weights=on --mesh-smoothing-iters=25 --normals=on --normals-smoothing-iters=10 --output-smoothing-weights=on --generate-quads=off".split(), check=True) + subprocess.run( + [BINARY_PATH] + + f"reconstruct {VTK_PATH} -o {DIR.joinpath('test_bin.vtk')} -r=0.025 -l=2.0 -c=0.5 -t=0.6 -d=on --subdomain-grid=on --interpolate_attribute velocity --decimate-barnacles=on --mesh-cleanup=on --mesh-smoothing-weights=on --mesh-smoothing-iters=25 --normals=on --normals-smoothing-iters=10 --output-smoothing-weights=on --generate-quads=off".split(), + check=True, + ) print("Binary done in", now_s() - start) - + start = now_s() - reconstruction_pipeline(VTK_PATH, DIR.joinpath("test.vtk"), attributes_to_interpolate=["velocity"], particle_radius=np.float64(0.025), smoothing_length=np.float64(2.0), - cube_size=np.float64(0.5), iso_surface_threshold=np.float64(0.6), mesh_smoothing_weights=True, - mesh_smoothing_weights_normalization=np.float64(13.0), mesh_smoothing_iters=25, normals_smoothing_iters=10, - generate_quads=False, mesh_cleanup=True, compute_normals=True, subdomain_grid=True, decimate_barnacles=True, - output_mesh_smoothing_weights=True, output_raw_normals=True) + reconstruction_pipeline( + VTK_PATH, + DIR.joinpath("test.vtk"), + attributes_to_interpolate=["velocity"], + particle_radius=np.float64(0.025), + smoothing_length=np.float64(2.0), + cube_size=np.float64(0.5), + iso_surface_threshold=np.float64(0.6), + mesh_smoothing_weights=True, + mesh_smoothing_weights_normalization=np.float64(13.0), + mesh_smoothing_iters=25, + normals_smoothing_iters=10, + generate_quads=False, + mesh_cleanup=True, + compute_normals=True, + subdomain_grid=True, + decimate_barnacles=True, + output_mesh_smoothing_weights=True, + output_raw_normals=True, + ) print("Python done in", now_s() - start) - + binary_mesh = meshio.read(DIR.joinpath("test_bin.vtk")) python_mesh = meshio.read(DIR.joinpath("test.vtk")) - + # Compare number of vertices binary_verts = np.array(binary_mesh.points, dtype=np.float64) python_verts = np.array(python_mesh.points, dtype=np.float64) - + print("# of vertices binary:", len(binary_verts)) print("# of vertices python:", len(python_verts)) - - assert(len(binary_verts) == len(python_verts)) - + + assert len(binary_verts) == len(python_verts) + # Compare interpolated attribute binary_vels = binary_mesh.point_data["velocity"] python_vels = python_mesh.point_data["velocity"] - + binary_vels.sort(axis=0) python_vels.sort(axis=0) - - assert(np.allclose(binary_vels, python_vels)) - + + assert np.allclose(binary_vels, python_vels) + # Trimesh similarity test # TODO: Replace load_mesh call: the function tries to create temporary files which may fail on some CI runners binary_mesh = trimesh.load_mesh(DIR.joinpath("test_bin.vtk"), "vtk") python_mesh = trimesh.load_mesh(DIR.joinpath("test.vtk"), "vtk") - + (_, distance_bin, _) = trimesh.proximity.closest_point(binary_mesh, python_verts) (_, distance_py, _) = trimesh.proximity.closest_point(python_mesh, binary_verts) - distance = (np.sum(distance_bin) + np.sum(distance_py)) / (len(distance_bin) + len(python_verts)) + distance = (np.sum(distance_bin) + np.sum(distance_py)) / ( + len(distance_bin) + len(python_verts) + ) print("Distance:", distance) - assert(distance < 1e-5) - + assert distance < 1e-5 + # Naïve similarity test - + binary_verts.sort(axis=0) python_verts.sort(axis=0) - + print("Binary verts:", binary_verts) print("Python verts:", python_verts) - - assert(np.allclose(binary_verts, python_verts)) - -# test_bgeo() -# test_aabb_class() -# test_marching_cubes_calls() -# test_memory_access() -# test_with_post_processing() + + assert np.allclose(binary_verts, python_verts) + From 24be122ba6fc1cbdaf53e9668328fb87e3a74c69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 14:02:34 +0200 Subject: [PATCH 48/63] Py: Fix CI stub gen --- .github/workflows/pysplashsurf_CI.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pysplashsurf_CI.yml b/.github/workflows/pysplashsurf_CI.yml index 06596c0..fa5d15f 100644 --- a/.github/workflows/pysplashsurf_CI.yml +++ b/.github/workflows/pysplashsurf_CI.yml @@ -29,7 +29,7 @@ jobs: - uses: actions/checkout@v3 - uses: moonrepo/setup-rust@v1 - run: | - cargo run --bin stub_gen + cargo run --bin stub_gen --no-default-features working-directory: pysplashsurf - name: Upload stub as artifact uses: actions/upload-artifact@v4 From 2ad2c73243559584058d26480dc2c6cc98f6504e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 14:06:34 +0200 Subject: [PATCH 49/63] Py: Fix splashsurf path in CI --- .github/workflows/pysplashsurf_CI.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pysplashsurf_CI.yml b/.github/workflows/pysplashsurf_CI.yml index fa5d15f..220ea4d 100644 --- a/.github/workflows/pysplashsurf_CI.yml +++ b/.github/workflows/pysplashsurf_CI.yml @@ -426,7 +426,7 @@ jobs: path: dist/ - run: pip install dist/${{ needs.build_wheel_dev.outputs.filename }} - name: Install splashsurf CLI - run: cargo install splashsurf + run: cargo install splashsurf --path ./splashsurf - name: Run pytest uses: pavelzw/pytest-action@v2 with: From b577429f611f74f3d70f6d000f82e4de221fe69b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 14:16:57 +0200 Subject: [PATCH 50/63] CI: Only publish on main --- .github/workflows/build.yml | 2 +- .github/workflows/pysplashsurf_CI.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 96c5fd4..ada02e0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -86,7 +86,7 @@ jobs: publish: name: Publish to crates.io runs-on: ubuntu-latest - if: ${{ startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch' }} + if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/main' }} needs: [check_format, build_workspace, build_lib_all_features, build_lib_no_default_features] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pysplashsurf_CI.yml b/.github/workflows/pysplashsurf_CI.yml index 220ea4d..e93370a 100644 --- a/.github/workflows/pysplashsurf_CI.yml +++ b/.github/workflows/pysplashsurf_CI.yml @@ -316,7 +316,7 @@ jobs: publish: name: Publish to PyPI runs-on: ubuntu-latest - if: ${{ startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch' }} + if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.event_name == 'workflow_dispatch') && github.ref == 'refs/heads/main' }} needs: [linux_wheels, macos_wheels, windows_wheels, sdist, tests, docs] steps: - uses: actions/download-artifact@v4 From f2a36b53f972787ed1b7a3b07e74bf93fc08e18d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 14:23:56 +0200 Subject: [PATCH 51/63] Py: Fix build.rs on CI --- pysplashsurf/build.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/pysplashsurf/build.rs b/pysplashsurf/build.rs index b5c01d6..8ab6ad3 100644 --- a/pysplashsurf/build.rs +++ b/pysplashsurf/build.rs @@ -1,11 +1,9 @@ fn main() { + // Required to run cargo check or stub gen outside of maturing build if std::env::var_os("CARGO_CFG_TARGET_OS=macos").is_some() { - println!( - "cargo:rustc-link-arg=-Wl,-rpath,{}", - pyo3_build_config::get() - .lib_dir - .clone() - .expect("Python lib dir not found") - ); + pyo3_build_config::get() + .lib_dir + .clone() + .map(|lib_dir| println!("cargo:rustc-link-arg=-Wl,-rpath,{}", lib_dir)); } } From 02fc0dea166482ed785c087b78b84b7cdd95e7b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 14:33:32 +0200 Subject: [PATCH 52/63] Py: Documentation fixes --- pysplashsurf/pysplashsurf/docs/source/api.rst | 22 ++++++++++--------- .../pysplashsurf/docs/source/functions.rst | 20 +++++------------ pysplashsurf/pysplashsurf/pysplashsurf.pyi | 16 +++++++------- pysplashsurf/src/mesh.rs | 16 +++++++------- 4 files changed, 33 insertions(+), 41 deletions(-) diff --git a/pysplashsurf/pysplashsurf/docs/source/api.rst b/pysplashsurf/pysplashsurf/docs/source/api.rst index 64e0152..b3f1248 100644 --- a/pysplashsurf/pysplashsurf/docs/source/api.rst +++ b/pysplashsurf/pysplashsurf/docs/source/api.rst @@ -14,32 +14,34 @@ Functions --------- .. autosummary:: + barnacle_decimation check_mesh_consistency convert_tris_to_quads - create_aabb_object - create_aabb_object_from_points - create_mesh_with_data_object - create_sph_interpolator_object - decimation + laplacian_smoothing_normals_parallel + laplacian_smoothing_parallel marching_cubes_cleanup neighborhood_search_spatial_hashing_parallel - par_laplacian_smoothing_inplace - par_laplacian_smoothing_normals_inplace reconstruct_surface reconstruction_pipeline - write_to_file + triangulate_density_map Classes ------- .. autosummary:: Aabb3d + MeshAttribute MeshWithData MixedTriQuadMesh3d NeighborhoodLists SphInterpolator SurfaceReconstruction TriMesh3d - TriMeshWithData UniformGrid - VertexVertexConnectivity \ No newline at end of file + VertexVertexConnectivity + +Enums +----- + +.. autosummary:: + MeshType diff --git a/pysplashsurf/pysplashsurf/docs/source/functions.rst b/pysplashsurf/pysplashsurf/docs/source/functions.rst index 766268c..fb972eb 100644 --- a/pysplashsurf/pysplashsurf/docs/source/functions.rst +++ b/pysplashsurf/pysplashsurf/docs/source/functions.rst @@ -5,32 +5,22 @@ All functions infer float precision based on the input (``np.float32`` or ``np.f .. currentmodule:: pysplashsurf +.. autofunction:: barnacle_decimation + .. autofunction:: check_mesh_consistency .. autofunction:: convert_tris_to_quads -.. autofunction:: create_aabb_object - -.. autofunction:: create_aabb_object_from_points - -.. autofunction:: create_mesh_with_data_object - -.. autofunction:: create_sph_interpolator_object +.. autofunction:: laplacian_smoothing_normals_parallel -.. autofunction:: decimation +.. autofunction:: laplacian_smoothing_parallel .. autofunction:: marching_cubes_cleanup .. autofunction:: neighborhood_search_spatial_hashing_parallel -.. autofunction:: par_laplacian_smoothing_inplace - -.. autofunction:: par_laplacian_smoothing_normals_inplace - .. autofunction:: reconstruct_surface .. autofunction:: reconstruction_pipeline -.. autofunction:: reconstruction_pipeline_multi - -.. autofunction:: write_to_file \ No newline at end of file +.. autofunction:: triangulate_density_map diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 631057a..207362f 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -84,7 +84,7 @@ class MeshWithData: @property def mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" - The contained mesh without associated data and attributes + The wrapped mesh without associated data and attributes """ @property def point_attributes(self) -> dict[str, numpy.typing.NDArray]: @@ -102,7 +102,7 @@ class MeshWithData: """ def copy_mesh(self) -> typing.Union[TriMesh3d, MixedTriQuadMesh3d]: r""" - Returns a copy of the contained mesh without associated data and attributes + Returns a copy of the wrapped mesh without associated data and attributes """ def copy(self) -> MeshWithData: r""" @@ -132,7 +132,7 @@ class MeshWithData: """ def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" - Writes the mesh and its attributes to a file using `meshio.write_points_cells` + Writes the mesh and its attributes to a file using ``meshio.write_points_cells`` """ class MixedTriQuadMesh3d: @@ -163,7 +163,7 @@ class MixedTriQuadMesh3d: """ def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" - Writes the mesh to a file using `meshio.write_points_cells` + Writes the mesh to a file using ``meshio.write_points_cells`` """ class NeighborhoodLists: @@ -263,7 +263,7 @@ class TriMesh3d: """ def write_to_file(self, path:builtins.str | os.PathLike | pathlib.Path, *, file_format:typing.Optional[builtins.str]='vtk42') -> None: r""" - Writes the mesh to a file using `meshio.write_points_cells` + Writes the mesh to a file using ``meshio.write_points_cells`` """ class UniformGrid: @@ -278,16 +278,16 @@ class VertexVertexConnectivity: """ def copy_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: r""" - Returns a copy of the contained connectivity data + Returns a copy of the wrapped connectivity data """ def take_connectivity(self) -> builtins.list[builtins.list[builtins.int]]: r""" - Returns the contained connectivity data by moving it out of this object (zero copy) + Returns the wrapped connectivity data by moving it out of this object (zero copy) """ class MeshType(Enum): r""" - Enum specifying the type of mesh contained in a `MeshWithData` + Enum specifying the type of mesh wrapped by a ``MeshWithData`` """ Tri3d = ... r""" diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index be16a08..111e9fc 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -76,12 +76,12 @@ impl PyVertexVertexConnectivity { #[gen_stub_pymethods] #[pymethods] impl PyVertexVertexConnectivity { - /// Returns a copy of the contained connectivity data + /// Returns a copy of the wrapped connectivity data pub fn copy_connectivity(&self) -> Vec> { self.connectivity.clone() } - /// Returns the contained connectivity data by moving it out of this object (zero copy) + /// Returns the wrapped connectivity data by moving it out of this object (zero copy) pub fn take_connectivity(&mut self) -> Vec> { // TODO: Check if this is actually zero-copy with the conversion to Python lists std::mem::take(&mut self.connectivity) @@ -205,7 +205,7 @@ impl PyTriMesh3d { PyVertexVertexConnectivity::new(connectivity) } - /// Writes the mesh to a file using `meshio.write_points_cells` + /// Writes the mesh to a file using ``meshio.write_points_cells`` #[pyo3(signature = (path, *, file_format = Some("vtk42")))] pub fn write_to_file<'py>( this: Bound<'py, Self>, @@ -304,7 +304,7 @@ impl PyMixedTriQuadMesh3d { }) } - /// Writes the mesh to a file using `meshio.write_points_cells` + /// Writes the mesh to a file using ``meshio.write_points_cells`` #[pyo3(signature = (path, *, file_format = Some("vtk42")))] pub fn write_to_file<'py>( this: Bound<'py, Self>, @@ -336,7 +336,7 @@ where Ok(pyarray) } -/// Enum specifying the type of mesh contained in a `MeshWithData` +/// Enum specifying the type of mesh wrapped by a ``MeshWithData`` #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[gen_stub_pyclass_enum] #[pyclass] @@ -644,7 +644,7 @@ impl PyMeshWithData { } } - /// The contained mesh without associated data and attributes + /// The wrapped mesh without associated data and attributes #[getter] #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] pub fn mesh<'py>(&self, py: Python<'py>) -> Py { @@ -686,7 +686,7 @@ impl PyMeshWithData { .into_py_dict(py) } - /// Returns a copy of the contained mesh without associated data and attributes + /// Returns a copy of the wrapped mesh without associated data and attributes #[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, MixedTriQuadMesh3d]", imports=()))] pub fn copy_mesh<'py>(&self, py: Python<'py>) -> PyResult> { match &self.mesh { @@ -795,7 +795,7 @@ impl PyMeshWithData { Ok(()) } - /// Writes the mesh and its attributes to a file using `meshio.write_points_cells` + /// Writes the mesh and its attributes to a file using ``meshio.write_points_cells`` #[pyo3(signature = (path, *, file_format = Some("vtk42")))] pub fn write_to_file<'py>( this: Bound<'py, Self>, From 6ad0b1fc9bb15f832ab93fa838a46b546af35c69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 14:54:02 +0200 Subject: [PATCH 53/63] Update changelog --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d699165..042da74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,14 @@ The following changes are present in the `main` branch of the repository and are not yet part of a release: - - N/A + - Py: Major refactor of the Python bindings, interface is simplified and more "pythonic" + - Merged distinct F64/F32 classes and functions and infer data type automatically + - Nearly all inputs and outputs are now zero-copy (e.g. mesh vertices and faces can be accessed as attributes without copies) + - CLI: Add some tests for the `reconstruction_pipeline` function + - CLI: Fix post-processing when particle AABB filtering is enabled + - Lib: Support subdomain "ghost particle" margins to be up to the size of the subdomain itself (previously limited to half the size) + - CLI/Lib: Option to automatically disable subdomain decomposition for very small grids + - Lib: Support for non-owned data in `MeshAttribute`, avoids copies in CLI and Python package ## Version 0.12.0 From 05e8a22d6c8b111b0443aa01fe07501a20421c4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 15:00:33 +0200 Subject: [PATCH 54/63] Py: Documentation fixes --- pysplashsurf/pysplashsurf/docs/source/api.rst | 1 - pysplashsurf/pysplashsurf/docs/source/classes.rst | 3 +++ pysplashsurf/pysplashsurf/docs/source/functions.rst | 2 -- pysplashsurf/src/lib.rs | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pysplashsurf/pysplashsurf/docs/source/api.rst b/pysplashsurf/pysplashsurf/docs/source/api.rst index b3f1248..3d51afe 100644 --- a/pysplashsurf/pysplashsurf/docs/source/api.rst +++ b/pysplashsurf/pysplashsurf/docs/source/api.rst @@ -23,7 +23,6 @@ Functions neighborhood_search_spatial_hashing_parallel reconstruct_surface reconstruction_pipeline - triangulate_density_map Classes ------- diff --git a/pysplashsurf/pysplashsurf/docs/source/classes.rst b/pysplashsurf/pysplashsurf/docs/source/classes.rst index f2def96..1405e5c 100644 --- a/pysplashsurf/pysplashsurf/docs/source/classes.rst +++ b/pysplashsurf/pysplashsurf/docs/source/classes.rst @@ -36,6 +36,9 @@ Helper and return types See `Aabb3d `_ for more information. +.. autoclass:: MeshType + :members: + .. autoclass:: NeighborhoodLists :members: diff --git a/pysplashsurf/pysplashsurf/docs/source/functions.rst b/pysplashsurf/pysplashsurf/docs/source/functions.rst index fb972eb..1fae54f 100644 --- a/pysplashsurf/pysplashsurf/docs/source/functions.rst +++ b/pysplashsurf/pysplashsurf/docs/source/functions.rst @@ -22,5 +22,3 @@ All functions infer float precision based on the input (``np.float32`` or ``np.f .. autofunction:: reconstruct_surface .. autofunction:: reconstruction_pipeline - -.. autofunction:: triangulate_density_map diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index af413cd..da7e3ce 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -32,6 +32,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; m.add_class::()?; + m.add_class::()?; m.add_class::()?; m.add_class::()?; From 6105a269d259e2f886ad0d32fa379c6f0d4984de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 15:03:53 +0200 Subject: [PATCH 55/63] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 042da74..71e950c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ The following changes are present in the `main` branch of the repository and are - Py: Major refactor of the Python bindings, interface is simplified and more "pythonic" - Merged distinct F64/F32 classes and functions and infer data type automatically - Nearly all inputs and outputs are now zero-copy (e.g. mesh vertices and faces can be accessed as attributes without copies) + - Lib: Enforce that `Index` types are signed integers implementing the `num_traits::Signed` trait. Currently the reconstruction does not work (correctly) with unsigned integers. + - Lib: Make most fields of `SurfaceReconstruction` public - CLI: Add some tests for the `reconstruction_pipeline` function - CLI: Fix post-processing when particle AABB filtering is enabled - Lib: Support subdomain "ghost particle" margins to be up to the size of the subdomain itself (previously limited to half the size) From 481691a829a196fbf471500ded1e8a98470f790e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 15:09:53 +0200 Subject: [PATCH 56/63] Py: Refactor --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 5 ++++- pysplashsurf/src/lib.rs | 12 ++++++------ .../src/{post_processing.rs => postprocessing.rs} | 5 ++++- 3 files changed, 14 insertions(+), 8 deletions(-) rename pysplashsurf/src/{post_processing.rs => postprocessing.rs} (94%) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 207362f..e4a72ec 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -336,9 +336,12 @@ def laplacian_smoothing_parallel(mesh:typing.Union[TriMesh3d, MeshWithData], ver def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> typing.Union[TriMesh3d, MeshWithData]: r""" - Performs simplification on the given mesh designed for marching cubes reconstructions inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren + Performs simplification on the given mesh inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren The simplification is performed inplace and modifies the given mesh. + The method is designed specifically for meshes generated by Marching Cubes. + See Moore and Warren: `Mesh Displacement: An Improved Contouring Method for Trivariate Data `_ (1991) + or Moore and Warren: "Compact Isocontours from Sampled Data" in "Graphics Gems III" (1992). """ def neighborhood_search_spatial_hashing_parallel(particle_positions:numpy.typing.NDArray[typing.Any], domain:Aabb3d, search_radius:builtins.float) -> NeighborhoodLists: diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index da7e3ce..1604706 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -18,7 +18,7 @@ mod uniform_grid; mod marching_cubes; mod neighborhood_search; mod pipeline; -mod post_processing; +mod postprocessing; mod reconstruction; pub(crate) mod utils; @@ -44,12 +44,12 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap!(reconstruction::reconstruct_surface, m)?)?; m.add_function(wrap!(marching_cubes::check_mesh_consistency, m)?)?; - m.add_function(wrap!(post_processing::marching_cubes_cleanup, m)?)?; - m.add_function(wrap!(post_processing::convert_tris_to_quads, m)?)?; - m.add_function(wrap!(post_processing::barnacle_decimation, m)?)?; - m.add_function(wrap!(post_processing::laplacian_smoothing_parallel, m)?)?; + m.add_function(wrap!(postprocessing::marching_cubes_cleanup, m)?)?; + m.add_function(wrap!(postprocessing::convert_tris_to_quads, m)?)?; + m.add_function(wrap!(postprocessing::barnacle_decimation, m)?)?; + m.add_function(wrap!(postprocessing::laplacian_smoothing_parallel, m)?)?; m.add_function(wrap!( - post_processing::laplacian_smoothing_normals_parallel, + postprocessing::laplacian_smoothing_normals_parallel, m )?)?; diff --git a/pysplashsurf/src/post_processing.rs b/pysplashsurf/src/postprocessing.rs similarity index 94% rename from pysplashsurf/src/post_processing.rs rename to pysplashsurf/src/postprocessing.rs index d88c70a..9e9d0d8 100644 --- a/pysplashsurf/src/post_processing.rs +++ b/pysplashsurf/src/postprocessing.rs @@ -197,9 +197,12 @@ pub fn barnacle_decimation<'py>( } } -/// Performs simplification on the given mesh designed for marching cubes reconstructions inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren +/// Performs simplification on the given mesh inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren /// /// The simplification is performed inplace and modifies the given mesh. +/// The method is designed specifically for meshes generated by Marching Cubes. +/// See Moore and Warren: `Mesh Displacement: An Improved Contouring Method for Trivariate Data `_ (1991) +/// or Moore and Warren: "Compact Isocontours from Sampled Data" in "Graphics Gems III" (1992). #[gen_stub_pyfunction] #[pyfunction] #[pyo3(name = "marching_cubes_cleanup")] From 86d79aa3cfe1fe62356cc7704c222be7a0912ba0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 15:12:51 +0200 Subject: [PATCH 57/63] Py: Refactor tests --- pysplashsurf/tests/test_basic.py | 32 ++++++++++++++++++++++++++ pysplashsurf/tests/test_bgeo.py | 13 +++++++++++ pysplashsurf/tests/test_calling.py | 36 ------------------------------ 3 files changed, 45 insertions(+), 36 deletions(-) create mode 100644 pysplashsurf/tests/test_basic.py create mode 100644 pysplashsurf/tests/test_bgeo.py diff --git a/pysplashsurf/tests/test_basic.py b/pysplashsurf/tests/test_basic.py new file mode 100644 index 0000000..19cdd70 --- /dev/null +++ b/pysplashsurf/tests/test_basic.py @@ -0,0 +1,32 @@ +import pysplashsurf +import numpy as np +import meshio + + +def test_aabb_class(): + print("\nTesting AABB class") + + aabb = pysplashsurf.Aabb3d.from_min_max(min=[0.0, 0.0, 0.0], max=[1.0, 2.0, 3.0]) + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([1.0, 2.0, 3.0])).all() + + aabb = pysplashsurf.Aabb3d.from_min_max( + min=np.array([0.0, 0.0, 0.0]), max=np.array([1.0, 2.0, 3.0]) + ) + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([1.0, 2.0, 3.0])).all() + + aabb = pysplashsurf.Aabb3d.from_points( + np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 0.5, 4.2]]) + ) + + print("AABB min:", aabb.min) + print("AABB max:", aabb.max) + + assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() + assert (aabb.max == np.array([2.0, 1.0, 4.2])).all() + + assert aabb.contains_point([1.0, 0.9, 4.1]) + assert aabb.contains_point([0.0, 0.0, 0.0]) + assert not aabb.contains_point([2.0, 1.0, 4.2]) + assert not aabb.contains_point([1.0, -1.0, 5.0]) diff --git a/pysplashsurf/tests/test_bgeo.py b/pysplashsurf/tests/test_bgeo.py new file mode 100644 index 0000000..a7e2e02 --- /dev/null +++ b/pysplashsurf/tests/test_bgeo.py @@ -0,0 +1,13 @@ +import pysplashsurf +import numpy as np +import meshio +import pathlib + +DIR = pathlib.Path(__file__).parent.resolve() +BGEO_PATH = DIR.joinpath("ParticleData_Fluid_50.bgeo") + + +def test_bgeo(): + particles = np.array(meshio.read(BGEO_PATH).points, dtype=np.float32) + + assert len(particles) == 4732 diff --git a/pysplashsurf/tests/test_calling.py b/pysplashsurf/tests/test_calling.py index 916b9ba..0cc75fe 100644 --- a/pysplashsurf/tests/test_calling.py +++ b/pysplashsurf/tests/test_calling.py @@ -1,6 +1,5 @@ import pysplashsurf import numpy as np -import math import meshio import subprocess import time @@ -9,7 +8,6 @@ BINARY_PATH = "splashsurf" DIR = pathlib.Path(__file__).parent.resolve() -BGEO_PATH = DIR.joinpath("ParticleData_Fluid_50.bgeo") VTK_PATH = DIR.joinpath("ParticleData_Fluid_5.vtk") @@ -17,39 +15,6 @@ def now_s(): return time.process_time_ns() / (10**9) -def test_bgeo(): - particles = np.array(meshio.read(BGEO_PATH).points, dtype=np.float32) - - assert len(particles) == 4732 - - -def test_aabb_class(): - print("\nTesting AABB class") - - aabb = pysplashsurf.Aabb3d.from_min_max(min=[0.0, 0.0, 0.0], max=[1.0, 2.0, 3.0]) - assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() - assert (aabb.max == np.array([1.0, 2.0, 3.0])).all() - - aabb = pysplashsurf.Aabb3d.from_min_max(min=np.array([0.0, 0.0, 0.0]), max=np.array([1.0, 2.0, 3.0])) - assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() - assert (aabb.max == np.array([1.0, 2.0, 3.0])).all() - - aabb = pysplashsurf.Aabb3d.from_points( - np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 0.5, 4.2]]) - ) - - print("AABB min:", aabb.min) - print("AABB max:", aabb.max) - - assert (aabb.min == np.array([0.0, 0.0, 0.0])).all() - assert (aabb.max == np.array([2.0, 1.0, 4.2])).all() - - assert aabb.contains_point([1.0, 0.9, 4.1]) - assert aabb.contains_point([0.0, 0.0, 0.0]) - assert not aabb.contains_point([2.0, 1.0, 4.2]) - assert not aabb.contains_point([1.0, -1.0, 5.0]) - - def test_marching_cubes_calls(): print("\nTesting marching cubes calls") @@ -284,4 +249,3 @@ def test_with_post_processing(): print("Python verts:", python_verts) assert np.allclose(binary_verts, python_verts) - From 93b50e4e085c9876dfe269cbfdee961f11c34e59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 16:31:51 +0200 Subject: [PATCH 58/63] Py: Methods for UniformGrid, add tests for many functions --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 23 +- pysplashsurf/src/mesh.rs | 2 +- pysplashsurf/src/uniform_grid.rs | 46 +++- pysplashsurf/tests/test_basic.py | 275 +++++++++++++++++++++ 4 files changed, 339 insertions(+), 7 deletions(-) diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index e4a72ec..2b32897 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -270,7 +270,26 @@ class UniformGrid: r""" Struct containing the parameters of the uniform grid used for the surface reconstruction """ - ... + @property + def aabb(self) -> Aabb3d: + r""" + The AABB of the grid containing all marching cubes vertices influenced by the particle kernels + """ + @property + def cell_size(self) -> builtins.float: + r""" + Returns the cell size of the uniform grid (the marching cubes voxel size) + """ + @property + def npoints_per_dim(self) -> builtins.list[builtins.int]: + r""" + Returns the number of points (marching cubes vertices) per dimension in the uniform grid + """ + @property + def ncells_per_dim(self) -> builtins.list[builtins.int]: + r""" + Returns the number of cells (marching cubes voxels) per dimension in the uniform grid + """ class VertexVertexConnectivity: r""" @@ -362,5 +381,3 @@ def reconstruction_pipeline(particles:numpy.typing.NDArray[typing.Any], *, attri Note that smoothing length and cube size are given in multiples of the particle radius. """ - -def triangulate_density_map(values:numpy.typing.NDArray[typing.Any], grid:UniformGrid, *, iso_surface_threshold:builtins.float) -> TriMesh3d: ... diff --git a/pysplashsurf/src/mesh.rs b/pysplashsurf/src/mesh.rs index 111e9fc..6c33baa 100644 --- a/pysplashsurf/src/mesh.rs +++ b/pysplashsurf/src/mesh.rs @@ -339,7 +339,7 @@ where /// Enum specifying the type of mesh wrapped by a ``MeshWithData`` #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[gen_stub_pyclass_enum] -#[pyclass] +#[pyclass(eq)] pub enum MeshType { /// 3D triangle mesh Tri3d, diff --git a/pysplashsurf/src/uniform_grid.rs b/pysplashsurf/src/uniform_grid.rs index fb8bd78..31403f6 100644 --- a/pysplashsurf/src/uniform_grid.rs +++ b/pysplashsurf/src/uniform_grid.rs @@ -1,11 +1,11 @@ +use crate::aabb::PyAabb3d; +use crate::utils; +use crate::utils::{IndexT, enum_wrapper_impl_from}; use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; use splashsurf_lib::{Real, UniformGrid}; -use crate::utils; -use crate::utils::{IndexT, enum_wrapper_impl_from}; - enum PyUniformGridData { F32(UniformGrid), F64(UniformGrid), @@ -45,3 +45,43 @@ impl PyUniformGrid { } } } + +#[gen_stub_pymethods] +#[pymethods] +impl PyUniformGrid { + /// The AABB of the grid containing all marching cubes vertices influenced by the particle kernels + #[getter] + pub fn aabb(&self) -> PyAabb3d { + match &self.inner { + PyUniformGridData::F32(grid) => PyAabb3d::from(grid.aabb().clone()), + PyUniformGridData::F64(grid) => PyAabb3d::from(grid.aabb().clone()), + } + } + + /// Returns the cell size of the uniform grid (the marching cubes voxel size) + #[getter] + pub fn cell_size(&self) -> f64 { + match &self.inner { + PyUniformGridData::F32(grid) => grid.cell_size() as f64, + PyUniformGridData::F64(grid) => grid.cell_size(), + } + } + + /// Returns the number of points (marching cubes vertices) per dimension in the uniform grid + #[getter] + pub fn npoints_per_dim(&self) -> [IndexT; 3] { + match &self.inner { + PyUniformGridData::F32(grid) => grid.points_per_dim().clone(), + PyUniformGridData::F64(grid) => grid.points_per_dim().clone(), + } + } + + /// Returns the number of cells (marching cubes voxels) per dimension in the uniform grid + #[getter] + pub fn ncells_per_dim(&self) -> [IndexT; 3] { + match &self.inner { + PyUniformGridData::F32(grid) => grid.cells_per_dim().clone(), + PyUniformGridData::F64(grid) => grid.cells_per_dim().clone(), + } + } +} diff --git a/pysplashsurf/tests/test_basic.py b/pysplashsurf/tests/test_basic.py index 19cdd70..3f70103 100644 --- a/pysplashsurf/tests/test_basic.py +++ b/pysplashsurf/tests/test_basic.py @@ -1,6 +1,12 @@ import pysplashsurf import numpy as np import meshio +import os.path +import pathlib +import tempfile + +DIR = pathlib.Path(__file__).parent.resolve() +VTK_PATH = DIR.joinpath("ParticleData_Random_1000.vtk") def test_aabb_class(): @@ -30,3 +36,272 @@ def test_aabb_class(): assert aabb.contains_point([0.0, 0.0, 0.0]) assert not aabb.contains_point([2.0, 1.0, 4.2]) assert not aabb.contains_point([1.0, -1.0, 5.0]) + + +def impl_basic_test(dtype): + particles = np.array(meshio.read(VTK_PATH).points, dtype=dtype) + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=1.0, + iso_surface_threshold=0.6, + mesh_smoothing_iters=5, + output_mesh_smoothing_weights=True, + ) + + assert type(mesh_with_data) is pysplashsurf.MeshWithData + assert type(reconstruction) is pysplashsurf.SurfaceReconstruction + assert type(mesh_with_data.mesh) is pysplashsurf.TriMesh3d + + mesh = mesh_with_data.mesh + + assert mesh_with_data.dtype == mesh.dtype + assert mesh_with_data.dtype == dtype + + assert type(mesh_with_data.mesh_type) is pysplashsurf.MeshType + assert mesh_with_data.mesh_type == pysplashsurf.MeshType.Tri3d + + assert mesh.vertices.dtype == dtype + assert mesh.triangles.dtype in [np.uint32, np.uint64] + + assert mesh_with_data.nvertices == len(mesh.vertices) + assert mesh_with_data.ncells == len(mesh.triangles) + + assert mesh_with_data.nvertices in range(21000, 25000) + assert mesh_with_data.ncells in range(45000, 49000) + + assert mesh.vertices.shape == (mesh_with_data.nvertices, 3) + assert mesh.triangles.shape == (mesh_with_data.ncells, 3) + + assert len(mesh_with_data.point_attributes) == 2 + assert len(mesh_with_data.cell_attributes) == 0 + + assert "sw" in mesh_with_data.point_attributes + assert "wnn" in mesh_with_data.point_attributes + + sw = mesh_with_data.point_attributes["sw"] + wnn = mesh_with_data.point_attributes["wnn"] + + assert len(sw) == mesh_with_data.nvertices + assert len(wnn) == mesh_with_data.nvertices + + assert sw.dtype == dtype + assert wnn.dtype == dtype + + assert sw.shape == (mesh_with_data.nvertices,) + assert wnn.shape == (mesh_with_data.nvertices,) + + assert sw.min() >= 0.0 + assert sw.max() <= 1.0 + + assert wnn.min() >= 0.0 + + +def test_pipeline_f32(): + impl_basic_test(np.float32) + + +def test_pipeline_f64(): + impl_basic_test(np.float64) + + +def test_reconstruct(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + reconstruction = pysplashsurf.reconstruct_surface( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0 * 0.025, + cube_size=1.0 * 0.025, + iso_surface_threshold=0.6, + global_neighborhood_list=True, + ) + + assert type(reconstruction) is pysplashsurf.SurfaceReconstruction + assert type(reconstruction.mesh) is pysplashsurf.TriMesh3d + assert type(reconstruction.grid) is pysplashsurf.UniformGrid + assert type(reconstruction.particle_densities) is np.ndarray + assert type(reconstruction.particle_inside_aabb) is type(None) + assert type(reconstruction.particle_neighbors) is pysplashsurf.NeighborhoodLists + + mesh = reconstruction.mesh + + assert mesh.dtype == np.float32 + + assert reconstruction.particle_densities.dtype == np.float32 + assert len(reconstruction.particle_densities) == len(particles) + + assert len(mesh.vertices) in range(25000, 30000) + assert len(mesh.triangles) in range(49000, 53000) + + +def test_neighborhood_search(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + reconstruction = pysplashsurf.reconstruct_surface( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0 * 0.025, + cube_size=1.0 * 0.025, + iso_surface_threshold=0.6, + global_neighborhood_list=True, + ) + + neighbors_reconstruct = reconstruction.particle_neighbors.get_neighborhood_lists() + + assert type(neighbors_reconstruct) is list + assert len(neighbors_reconstruct) == len(particles) + + aabb = reconstruction.grid.aabb + + neighbor_lists = pysplashsurf.neighborhood_search_spatial_hashing_parallel( + particles, domain=aabb, search_radius=4.0 * 0.025 + ) + + assert type(neighbor_lists) is pysplashsurf.NeighborhoodLists + + neighbors = neighbor_lists.get_neighborhood_lists() + + assert type(neighbors) is list + assert len(neighbors) == len(particles) + assert len(neighbors) == len(neighbors_reconstruct) + + # TODO: Compare with naive neighbor search + + +def test_check_consistency(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + reconstruction = pysplashsurf.reconstruct_surface( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0 * 0.025, + cube_size=1.0 * 0.025, + iso_surface_threshold=0.6, + global_neighborhood_list=True, + ) + mesh = reconstruction.mesh + + assert pysplashsurf.check_mesh_consistency(mesh, reconstruction.grid) is None + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=1.0, + iso_surface_threshold=0.6, + mesh_smoothing_iters=5, + output_mesh_smoothing_weights=True, + ) + + assert ( + pysplashsurf.check_mesh_consistency(mesh_with_data, reconstruction.grid) is None + ) + + +def test_tris_to_quads(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=1.0, + iso_surface_threshold=0.6, + mesh_smoothing_iters=5, + output_mesh_smoothing_weights=True, + ) + + mesh_with_data_quads = pysplashsurf.convert_tris_to_quads(mesh_with_data) + + assert type(mesh_with_data_quads.mesh) is pysplashsurf.MixedTriQuadMesh3d + assert mesh_with_data_quads.mesh_type == pysplashsurf.MeshType.MixedTriQuad3d + + assert mesh_with_data_quads.nvertices == mesh_with_data.nvertices + assert mesh_with_data_quads.ncells < mesh_with_data.ncells + + tris = mesh_with_data_quads.mesh.get_triangles() + quads = mesh_with_data_quads.mesh.get_quads() + + assert tris.dtype in [np.uint32, np.uint64] + assert quads.dtype in [np.uint32, np.uint64] + + assert len(tris) + len(quads) == mesh_with_data_quads.ncells + + assert tris.shape == (len(tris), 3) + assert quads.shape == (len(quads), 4) + + assert len(tris) in range(35000, 39000) + assert len(quads) in range(4600, 5000) + + assert len(mesh_with_data.point_attributes) == 2 + assert len(mesh_with_data.cell_attributes) == 0 + + assert "sw" in mesh_with_data.point_attributes + assert "wnn" in mesh_with_data.point_attributes + + +def test_interpolator(): + particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) + + mesh_with_data, reconstruction = pysplashsurf.reconstruction_pipeline( + particles, + particle_radius=0.025, + rest_density=1000.0, + smoothing_length=2.0, + cube_size=1.0, + iso_surface_threshold=0.6, + mesh_smoothing_iters=5, + output_mesh_smoothing_weights=True, + ) + + compact_support = 4.0 * 0.025 + rest_mass = 1000.0 * 0.025**3 + + interpolator = pysplashsurf.SphInterpolator( + particles, reconstruction.particle_densities, rest_mass, compact_support + ) + + assert type(interpolator) is pysplashsurf.SphInterpolator + + mesh = mesh_with_data.mesh + mesh_densities = interpolator.interpolate_quantity( + reconstruction.particle_densities, mesh.vertices + ) + + assert type(mesh_densities) is np.ndarray + assert mesh_densities.dtype == np.float32 + assert mesh_densities.shape == (len(mesh.vertices),) + assert mesh_densities.min() >= 0.0 + + mesh_particles = interpolator.interpolate_quantity(particles, mesh.vertices) + + assert type(mesh_particles) is np.ndarray + assert mesh_particles.dtype == np.float32 + assert mesh_particles.shape == (len(mesh.vertices), 3) + + mesh_sph_normals = interpolator.interpolate_normals(mesh.vertices) + + assert type(mesh_sph_normals) is np.ndarray + assert mesh_sph_normals.dtype == np.float32 + assert mesh_sph_normals.shape == (len(mesh.vertices), 3) + + mesh_with_data.add_point_attribute("density", mesh_densities) + mesh_with_data.add_point_attribute("position", mesh_particles) + mesh_with_data.add_point_attribute("normal", mesh_sph_normals) + + assert "density" in mesh_with_data.point_attributes + assert "position" in mesh_with_data.point_attributes + assert "normal" in mesh_with_data.point_attributes + + assert np.array_equal(mesh_with_data.point_attributes["density"], mesh_densities) + assert np.array_equal(mesh_with_data.point_attributes["position"], mesh_particles) + assert np.array_equal(mesh_with_data.point_attributes["normal"], mesh_sph_normals) From 13b4aea04dff9de201ce0b879782631bf473910a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 16:40:43 +0200 Subject: [PATCH 59/63] Add missing test data --- pysplashsurf/tests/ParticleData_Random_1000.vtk | Bin 0 -> 12129 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 pysplashsurf/tests/ParticleData_Random_1000.vtk diff --git a/pysplashsurf/tests/ParticleData_Random_1000.vtk b/pysplashsurf/tests/ParticleData_Random_1000.vtk new file mode 100644 index 0000000000000000000000000000000000000000..94bdbed55ad6db4ce01ad577c5dfc72dc3b08d58 GIT binary patch literal 12129 zcmW++cU({3|F2ZikhCNz35hgR8t?Obwo@uiw9wQZN<|uyl}*`X&m`G1o1)0aijckc z-rx7{_usweJ|6dR?>VpYd_BkMZIqjnV&op36YZIl8gDcretdRPM!J#BAZz8k@ku#3 z@##h}6OGd1vlEjtjB;%U4IX5x?CS0B5)`KF?h@h>>=9xV>K_~u6zUcd8sy% zOwf4x)D*E}{8nnDjArVmXJTPNxTwo)m26t4#?Zct>18{VqYw4ur0GZKTe%gly%gzn z<+S+daF@MX?xO2O8)jXK_q+B`P1NC zEVGtRX{JxLSM~&z+D2V;6^)Y2vCf=U##w4G;Fuw3wJXDRMslpJb6<>$9?;{*-n8Tof ztHrsyoj79Zd~xP7IPnMZ?volPsmZhZO;ZZzXkj!p8VfFIim&}wV`){b_|$J2);I6Q zweDVG>X_5C^0-Y6)r*+%z?cf3mWUIv52=~u#I&ao*r1{{F%aT!yY{VWoX z->l)N8o|LDnpoz&l>wohaZ#>`(er;%tJ#myhmvUJeS!|#-6=ETB6ChYrAgatw04+5 zeaRBZYNyUvktompvc{;GrpBIjuDCN}5tI)dz_Nq6)OcY}UAIhXD*j?W`vL6xUZCya z1ICwW;iy$t-0xq@xKLgC#~h)JwTftRJIRz-JKEiv#^Sx1&{O)!(V`T6R>IfmI5=E|j_uoE zQNNoCFe*8BtP8SN zm@~t4GYtmbXD34^>dKF#;|niT9dNo@MD+(Ffb{j_#lh?9x681k~Cq;Y_Xn045k=C60*xzA^IigIE69A_3zzm3t|!T$9{ zn10ntvTdL@UHf#$hE^Y(35}-C%1;cbU&>zcpE)L>6XvMxrLXTLKsSupFZ87bW#6E&PU;fEU& zwG`;t?;Ex+k`bG`O~#ceFOk;VfR5X5itrn`bT~0r;^Ziyhx-NSOdKTEw@nlWrd;Ac zm0%RO%*M@tPwc$w5F`Dx>1enZ<%ua!+>j<>u2^72N^eZJ3CD}st{n7g2CcP9v9Ny( zPEBioL-7WB-|Hz7TczXaN%R_3NiTT|l>OHU^>LZhNqvjiy8>{mJq6wumgCiUCt-Ia zmcI3=V)e?Al)oB{)jC6{*?NZ)R;RL$yRPVR%LZ*x`zW^{kDXlKa!|%G%DWo*{=gG4T!yXE>uJqa1zjPG|3FkMXOM9}B&|ifPB~IJ8s^i?@D9o?RCE zFMo=HZ+~bu;W*{fL(yt%#m+JcSoA%GHV?IMEzuAMm!~nrxQ(fe>xE=|f27-jf-^Hyb!OYHDhYW>-w5-@7GCFF}N&N()4<=*w zw3r@zl<`gmNCn(KaKsWIa;xXnY%0L zY;DBBy5Y>bw3I~))$lBIpjc>Pj>;E$=plC>*H4^-V&;B!-;#&Z|56xlFUP!-aa5Jr zj5+&uQg>A{;}@CW3d5-uDudZNUGc)=Cz^I9plGZfWlM6g=A{J3|E5tfE|1QA|6%*d zBBoaEVC0Dx!YgAErrgcIcdqQpQu{DRGdUH4Q_S(DsQX0&kX~=2K^y_jF{m z_Dre_nv4a<!|~7OIa!&GW1pHD>h1BT&Vh~0&F?8Gx67loZ#4t_ zK)KRG*#1Y#H*!kE`F?(Q7*tFJ{pGaU(n}mR_C=CI1FmYS(r|%btomK{Oo@Zib|V&a z^y3)waLUvTXKK%XR9^aysZk9K$?3gUwq^E_=V zOdO9K<5lc%Kp#h=W%1(ZPgFPDVE%YdC~XWA=Un<>SBo-r{mN-t--wIxg2h*MVefz- z+Sll_w|x}@Onf=<#T&*pl+o>QD6LNA^=sG?UC0_o>+oz4YAu?20H-{e2n>is< zRlN5&DDIg2q0UzcaW#=)MlsA-w3`Jdui{eOWt=)P47w4fv>17proHAevesCfw-}7S zZL6tuvH^MFM_{h?kCAGHBI8sb-hXirZyd+sWrvlVbngmoy;o*LYPoptW`gnubJ2Y4 zH7@Nn=Yac-s4aRzyF+R0z3wC~jlPY$>f31$SS#j_AIhLvWIxYUbofxgPT39Oq04`` zJoXwc&o#ovG4+x!;W3mPtrP7xc2a*x2ihohq4AUmPACr(|IPi)sJ}ZT?~Ed;^gaO1 zKNP5F`4WfUj)7b6ACgsDswlrHk0VbwpkAjx;_Q;e#y8(Y&4D7s>^mVYT^ooQd%VSr ziDSg{h+B-Xd5OMqcc|_aMo(D_yr?OIf$9>u)Dj1Js#f>n>Xn*`t*0&J9K1Td?;l1x`odIuopaH-!Wv~MA5KM9v0VUam=l1@zi-3!e++9DNPH_VQIwqB~*Nrh#$549n}qW(Aw-G zN=*w8>O7L^yT>4MVHhKh%G0kr6~^D3sIUHBylyw9dZ(8gYzz za)(BH9MLr68?KM4;_$_WsPi0xQ8QMGY^Mf}b@OFPpEmJTb`f?sn_zIoQSm0yoC#4o zMB?{eERgLX&d%(}DAi-KiEZ6ZGax)0 z>L;!sv1PBAE9Xj^ZOLei?0~AiDID_cDXJBm={Ve;0kzLD`jH$b`dXl`OMg^I`>TBq zSJXc3i7$=iRM;8F@M%)qtZPGb%ySw}_$?lfQo+l=df24&4C_^6@lAUu9k2fub9ZfE zw8j)ldYpvKQ5R;Wwo-O=4U0eJ!2543mEIjk#mYFWJi39p0bQ8# zV5rLrLP5G!vV=Um`{GFlA4krDT1QxS%|T9a=sx)9Ead zje}?&W=P%hG7^)AgYl`Vh_+cjg^gh)qfLXTVA5IgYv2RKU)seS6J2U|eJm5IlY8F!^}X3G4LLP z`I%W7CYV>U1&H`SE5)7SzS3*VDCx(ERn6?Z=Qp zv*%f1aeN#Hr+6``NtTK}yRa|vi})HW<{A)cTkwJgIayRo z-Grsnk4gEiI%b+&V2ErACtGTB=$u`YZ<{V=1b)Pac6+qPYGe4eGq}}WfCB?n&~H^M z9{jq9>2nWIVbep=rNcjt`+I=_f1If{;XA{-^kVLq>EdD7RQ4*eX7S`%ocz?E-LuD{ zgYGkG`x-fQ^uK{&yONo6rV!gKO4&PY7Uo}S;rNXc=vew6wqEE&pW1eDciJOdxYrRe zGh}I@R3}<4N8qC8cg)??iLzC@v8PWXZEglj8bVLf*!Me9yFb9J3McyBoy(lHy|BLP zJqB-^Dz5$LD}7!?XSXddJXnL{pJ9gTd}dw!3*{zT+Q*J!>eE`9^_z}cE0$u|nV~q^ z=P+%)%w=D%cBaj5Wz@^%40z|lq>x*fJGl#vSzF=MlpLy0+|2=9!m;Sm3`tdnltU!6 zWA(SO%o?n~*!U5Q6jHy(>@R9PcTjaoECcG6iiXb;`e{O#R+}(T<~#k5mWY5i$~2n0 zk}_-0!2F3Bbl-2L$LHbH2nk@prt2)ok@9oHH1YBEZf07|!=%g!c)8*dU7o}vpj4J! zen@q@XT7*l{7}5zKNJUg!5rb7-b?6;0bU#jbr?q9j%U^WuGAKGvUkUGmVq zO^^Q84b)K?NR6Y7?4Wyy`DgQR*!wQM_qMWgzmsTOP|Td=$m0@21&`5YS}wXKyrM_H zo|KvOm)>rh$)Z#a>*s{!Z$HsbwzqVStnhK<6%M?vCaL+JC|YJ0pmLd~bPfd6^~$i# z?Ue{LP{6iTn%F3Nnd75Bb5x`PPXDNuG(In2$Z;uG?)DTt6f#)!cZO*4vSidRdo0=B z8TX!U=CBRQG|#eRrtSk8c2E-+0_qqOpGEg)V`(Q$M5OLMG~V5Z0Xe=5N*u<)p|=>L zF_w8*dGrizW6^YV8br?*-L&l_NyV1fWhxkSS%pzYWI5s6Wif4ZqOe;qm`RsDW0i$A zJKu6+!IV-obzDlb+&O4+)}i~>M0V^JfNTeMls{jOGNTbx^6Wz!d243N%h77retP|O z!N8y2Q9rFW{<|YXi~Ul6uecvpdTU|-knS9w@r9agAH@o@A7U+wa9l5#?hK%%-+bC4 zlIdpJz1l%}7qQL8+ko};HRtJ*;5cprn#do{YO zQWiT_G*RAY7ovMxU`}Qs2k&T+d>e6tV;0`Uqeb;n9ob4{=Suc*jYI816>(W(JT}NJ zz(DhCb{>5k>#uF*1h>b`bCh~4Cl;}%mLDg(oMo((>PxaIs5)xIH-pEk4ay zx<(zpt8UQLMjo#_pJPTs4P%Nwaq!+|{5SI%SdicHK_s;Bz9*U-J)o#y4!vEigUyv?#X;DzAGCOyeClZ7;ycNz~*Nd3DPTG+U(J3d|rXNU9W zp?*1L8jX zZNWihFrlb`1qYr*Rf12{RX`+-t7EpJMM&}Gc}~IxastW1+%_#tU-lXYAwO9F&{Z( z_b*Dml`yf341G`kLY1{U{#oDR;H^$jI~5DhP1Espm^GH|sb=JrUDVO>KwTLb=4VKM z)iM@WSa8$}Q(POo8FKSAIMN0p=gdLu8Bx!a>`$EV`67G&R-xrES7HA66g%wwO_hK1 zP}6Idm@~N;$q6TMz`Gjv8sAda^9SwUZo&Gso3J%?on&3WXwfe<0cEnb6eV>uYBi@v zmnU?;`HT@mWoe&qi<8#oGC?niI)nA3J*PydMl5G>RVEaVv{B56XV4%u)XU1kd+`#{ z=(?8NxCzfr+0bskEt5x?LZ`bbQ~Voo^~7GR8uo=*sWVu(G7Q7EkE3bd`Qmo>A?&-} zkHL*iv~cpp_3GU?)5{YLUL&dVErQWC@l3lpR7AM6!{L{Mcyj$NJbsj6cWwn`tJT@d zWDK3ssu_DPoP{<|*}wKHM`dR2TRGwJCHl^^7FCD$(7%hTm>IhjS?lf5<7@_e zT(jAc+c@CUJ-ly{u0hRecIEy|Rm&$v?k(L%ha{KsxpvvFt6XLjyoiR<~J(WE?@ z)^d*GxaoPAmCG{hc^`Vj2V+6^y*Sxl`k!4i8JJc|1CJ)eGzSS=mw9Mwy2gQ4!x*CL z%W&^&LeW_fljQxdb9FOZ+@xA-{zbayZ=n3}fh<_p6`Ez*Oc=gcye;!aKRta~JEe;D z1Wk6i6$~sh5F1Zf zQSW6JhHlA0_AX7jsjXpLCu91L?kDb?*d)=)jDve{Fv=&((ct(1oT<;DP1niTdk*N? zYY*0Td&rnrO-_gzi;yCFdbf7L?9ZbatKS*@%Nl7mIZRaNe&NJGO%9IS$B|Pc)X)Gv z8xE)8;9cw(u@dit`_MHd6n2vWVBvKbce@v0qeEA*w0RjvS=iHK#%Zb`lauy!5EZ*d z(cL{0Go$z5=)r#&TYipG%>vCmJ2_Hi1p7zz6IrJsIeb+KjrK0*xa!wrwL8@}RdGbW zFPIm(f~xYbuq5Oo^V)!MYh@)RZwj#a#(2tXoFI7~7DIKjryO(TA8nFCXgKLMa}K@W zgusgo?$C?+72omWsFx&sz%}X*{w3W5?F`K5ikjFX(61Z9p$9dH@%yFxI+|m?>|kd; z7njp};@jK=tX;K%9n5X8B7Yqt*QRq+pBAhu+f2P8GqgExXX0Uh@Xjn+uL)s(av25} zq{07+53QEordRHI{OV8k&`L&FLkd1CUJy^FeZamBtotvJEpJ8dV1i>EK$*!j~}4!P5netumgH>+;aUMZWw zV_Hbd5_aexfR|GaQFU`2ZDMCK)x8G|q}gJucMlGpwn)tW^no&VURYE!5{u-rv0u^~ z>rA##%dmkKGNqWcd>Z82i$vywWN8ji4lH{v(Jgz*v?-44RN6@#3!TW34K*y-V~!^^ zzr~_m<7sEZE_p_YfXc`VPEw5n#IiA7{)3?6UX43BNu# z&~}*yxA!q&%1~H^E#z1=ZOOx#_t{_N6Ruf>(q~~h?bl}E%az?Ux#mfk92*X=--%Br zhf?9qT$H#=*!{5{R_ctT=7mQvKhYh%huy>Vve&phQwOs=)e4UO?;pLwaSm(7(qGy1$8Lr~Ff#v}6n#Z|!CGZokBo^Q+Ku>jv$=oIy^W zR5!2LMT?eBSS+;Ze%C;34;sdakYjc3i9FBC}g1?mpxNty<@99hEuk?U+Rat0(8RZZBqM3IpOix{s z+-rJ)_49ABTfQ59m}Jmum>UCcTH&X*F*+3vW7w%v)U;kmkvf|;anGc9a~Mmlr_pBK zZya%Wi^gnEr1p|Qt#NmbKfM^07h6&A_$#%C&P17k4?d1r&CofEk@8j@xAT12!@Dz8 zYDRG4j9d;HSWl7GlX>N7(sgvezR#n?;BC5)tnG~P4{pNg`7*jVl(Cb=I-0#yWl*Qf zNDYw1=KE7~yt^UFYZ_ zVPHpwDU7H3forr|>cfJF9NdxIVf5Ter?-`hgwhQi>h@TIs$5?bN$fDsZ#Fh4?5E?Z zKx*v2CdGC)hDh;$WMc)c`3l_J-Ir;Gr`bPgq}X`z7nLua!G6z5PQFks8FTmzdmMN~ z2S#D>tEHUiyoiy?%b1+#AXz{LsluG{ zJAPg42}QS16vYyA4|WlM6jks>s~?j~1317oR8*wdI_*`vkGYv6aH#GPJzF)HH+Lt_ zW!TW7@fC}9KPDc_()ezmsCu#=nuN#yJ=?IPb|ZUBeIo6K{Y;s~k$L*?ky) z+zK})YA~sf4`rSWX3uvM8H%IqGAmOY_BUohuRIjgzoPAiR> zTj{j?oM;&0j3e2n7+0nus^7|>e@+IC^heV$Lysets4`=64m9GPq3qHo+-`GlOl`~) z^L{pA+?x=54f=C3Be@KKv+9={w1?SC_Nm?4(6pT+TX39}m1>6oWV`JqxD z@oRVS+haA}wA6@?eHxJAunZa=TW|;+a5}+><4apG!>JG-9$Itw{sj!U6hLR4A}k*L z5r0x&12ryie$|ZoeJd!mqNUo^kuiU&8L~f9$mZ;$#?%+o|L|9Oo}y85Jp33teoaAO zovnyy+CZInsYlfiOV6lE$#=s9T#4`!cEb;&upt>9rPB~O>MC}weT#XWjxl%SPwY+K z$GGQ(aEOum0A(*lfyV^Aey8lTcfo6^pD~2l!JUQrrFv#`nMY539~?L;)!fy_Sh`CI zE%QyqIqA5mc|Xa=!c2Vkn8HC%wlPSu4tnJhx~OWmE0PX#q4AG9J^z=bWdNF z%q|^(lhchkX;BgfrR;#oZ5h1!Zv^uu8&T=u21&!lME2Y!JsZ{bL^Nj9quEEEoqvsy z%wG1B>IS1_xA} z$A`kt9DJ!nx?fJheSj7FmpL=+eJHZ*XGlG^02;eSF=wg?rcoINKmVetIT&S{1L;zK zkxo0+I3P10QHu&0Sv7_^58A1u8^pe17-h^OuyD{MESmBjYxB3lE!~0%kMkK({|m1p z{^6)nG&9F#B6i<>s_lIv#yCuXTVpA^uPc>)!#!#pHR0$rgIP4ZnQn8k@z!W6Lnj{> zU+3=NpqJMuzqe{J{{$MILy2VMe$OQDe`42}%3?YWiLF*`K{!tN0 zdwG9mMZKZY@of5qEySh}ZR}Q9jwbbexEec0{Fj?X`A0j%Ip;HsJ)a{A1HRxX!m#jk zDta!_<-SswJbprtAKk^#KRUE1Jk23~qZqmWE#=g$@mgyt^EB>q z*mFm!EB(VZ&y5@vU4|EfzKCNjNAdiH9mhBX;mI^1N$FP09_4;4dV7F@Lu}ZygFlmX zR2ks;82_Yrq#JHWCp%e=+7&C(eWK|ZHV01(2eQ9eH@Y4FL**WqU>tZ5EjvrZ>Wrnd z%HbU%5P*>UyF+QHeu;_Q188 z&6Mk@N$;iO@Fc*CdEp)$*5M6<7kprjjWN#arc~a1X(k(3Uv)~NAUb%v2e~sz4uMa*?+zbEb+=&g&VDNH77nNB^H<_z=^U)O?9UO> zEU-_iA%cU)!{lr^$IS`FqR#;s()^gke>P%`f1LQ2*MQe4#kdz!z%F{gVFMxL2TkEH9^n$s+vrdd-tUmr3hopC_*GNbDga7?a_yv)`f`=>FYPY-eY=^#3P7(~n6wT!x(FRl;jhgD_zv^u6E**iv+POoDav2ik<>*nL$ z*h0)(JlSbexhZfijFWDSm7X`s#msD9<{l)Dx?jVxs2=DsaU~Nj|K{W!<=C=5pEg?Z z*irpR+*F7`L+5W)8(b@Xy|-iFq+tBr9F0slDc7u**0rVzwj7V<;KiSDr%NPrFR9Zb zQkp?$uazu}8$i=w2l!OUh#xB(nLhIwi+^uL`Jj7PF(nbEAKUOUbQ`0#yyxJ`CmglS z0!iPx(ed8^rkE_i0_}6OY_3L=@pn`#Tf%$?psu8|Xloyab6?Ex#7>va3m!4sypn-t zo0%Ro0H2(+Y2T+8);QIRc~fg~#4wE3+ZSWbN)JX()1yzOEVq0t`gT0h6vGE$V zoj-+LoAc?Xp(}KEs*0rs>!_tY4i|&mXe@sYGt*WxQCiooO**3d_)HG1QDVN`TxPlD zQZjUh_|x+cH7B0I!s(xxKc<~ScJ^V92xD4(@IgD8aQtRCGZrkswvXYW|G{@?a(qpn z;Cl9zQ)F(yf1NG6H)cA0AnR8I;{ojK@_-Q<q^*P=|8$={=o0C0)ZDNaCoFNtBPqQ9+oKRE>6_*|Oo^VSvSalaFQhj08<2Xkf)uZ}`D#u#)5$_JoLiUbt z(z@D1IrF{LuPYY%hvd*9f0KwSd(Lq+y`bE2H^a*MW6$BM^!@Y&hoAnzEVYNCzQqFb z51zw^_(lc}b;Yw Date: Tue, 2 Sep 2025 19:35:01 +0200 Subject: [PATCH 60/63] Py: Add plain marching cubes functionality --- pysplashsurf/pysplashsurf/pysplashsurf.pyi | 5 ++ pysplashsurf/src/lib.rs | 1 + pysplashsurf/src/marching_cubes.rs | 72 ++++++++++++++++++++-- pysplashsurf/tests/test_basic.py | 2 + pysplashsurf/tests/test_sdf.py | 33 ++++++++++ splashsurf_lib/src/density_map.rs | 20 +++++- 6 files changed, 128 insertions(+), 5 deletions(-) create mode 100644 pysplashsurf/tests/test_sdf.py diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 2b32897..4aaa4b2 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -353,6 +353,11 @@ def laplacian_smoothing_parallel(mesh:typing.Union[TriMesh3d, MeshWithData], ver The smoothing is performed inplace and modifies the vertices of the given mesh. """ +def marching_cubes(values:numpy.typing.NDArray[typing.Any], *, cube_size:builtins.float, iso_surface_threshold:builtins.float, translation:typing.Optional[typing.Sequence[builtins.float]]=None) -> tuple[TriMesh3d, UniformGrid]: + r""" + Performs a standard marching cubes triangulation of a 3D array of values + """ + def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> typing.Union[TriMesh3d, MeshWithData]: r""" Performs simplification on the given mesh inspired by the "Compact Contouring"/"Mesh displacement" approach by Doug Moore and Joe Warren diff --git a/pysplashsurf/src/lib.rs b/pysplashsurf/src/lib.rs index 1604706..02183d1 100644 --- a/pysplashsurf/src/lib.rs +++ b/pysplashsurf/src/lib.rs @@ -44,6 +44,7 @@ fn pysplashsurf(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_function(wrap!(reconstruction::reconstruct_surface, m)?)?; m.add_function(wrap!(marching_cubes::check_mesh_consistency, m)?)?; + m.add_function(wrap!(marching_cubes::marching_cubes, m)?)?; m.add_function(wrap!(postprocessing::marching_cubes_cleanup, m)?)?; m.add_function(wrap!(postprocessing::convert_tris_to_quads, m)?)?; m.add_function(wrap!(postprocessing::barnacle_decimation, m)?)?; diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index 22af5ea..a687631 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -1,10 +1,14 @@ -use numpy::PyUntypedArray; +use numpy::prelude::*; +use numpy::{Element, PyArray3, PyUntypedArray}; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; +use splashsurf_lib::nalgebra::Vector3; +use splashsurf_lib::{DensityMap, Real, UniformGrid}; use crate::mesh::{PyTriMesh3d, get_triangle_mesh_generic}; use crate::uniform_grid::PyUniformGrid; -use crate::utils::*; +use crate::utils; +use crate::utils::IndexT; /// Checks the consistency of a reconstructed surface mesh (watertightness, manifoldness), optionally returns a string with details if problems are found #[gen_stub_pyfunction] @@ -22,7 +26,7 @@ pub fn check_mesh_consistency<'py>( let py = mesh.py(); // Try to extract the triangle mesh; - let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(pyerr_only_triangle_mesh)?; + let mesh = get_triangle_mesh_generic(&mesh).ok_or_else(utils::pyerr_only_triangle_mesh)?; let mesh = mesh.borrow(py); if let (Some(grid), Some(mesh)) = (grid.as_f32(), mesh.as_f32()) { @@ -44,6 +48,66 @@ pub fn check_mesh_consistency<'py>( ) .err()) } else { - Err(pyerr_scalar_type_mismatch()) + Err(utils::pyerr_scalar_type_mismatch()) + } +} + +/// Performs a standard marching cubes triangulation of a 3D array of values +#[gen_stub_pyfunction] +#[pyfunction] +#[pyo3(name = "marching_cubes")] +#[pyo3(signature = (values, *, cube_size, iso_surface_threshold, translation = None))] +pub fn marching_cubes<'py>( + values: &Bound<'py, PyUntypedArray>, + cube_size: f64, + iso_surface_threshold: f64, + translation: Option<[f64; 3]>, +) -> PyResult<(PyTriMesh3d, PyUniformGrid)> { + assert_eq!(values.shape().len(), 3, "values must be a 3D array"); + + fn triangulate_density_map_generic<'py, R: Real + Element>( + values: &Bound<'py, PyArray3>, + cube_size: R, + iso_surface_threshold: R, + translation: Option<[R; 3]>, + ) -> PyResult<(PyTriMesh3d, PyUniformGrid)> { + let shape = values.shape(); + let translation = Vector3::from(translation.unwrap_or([R::zero(); 3])); + let n_cells_per_dim = [ + shape[0] as IndexT - 1, + shape[1] as IndexT - 1, + shape[2] as IndexT - 1, + ]; + + let grid = UniformGrid::new(&translation, &n_cells_per_dim, cube_size) + .map_err(anyhow::Error::from)?; + + // TODO: Replace with borrow + let values = values.try_readonly()?.as_slice()?.to_vec(); + let density_map = DensityMap::from(values); + + let mesh = splashsurf_lib::marching_cubes::triangulate_density_map( + &grid, + &density_map, + iso_surface_threshold, + ) + .map_err(anyhow::Error::from)?; + Ok(( + PyTriMesh3d::try_from_generic(mesh)?, + PyUniformGrid::try_from_generic(grid)?, + )) + } + + if let Ok(values) = values.downcast::>() { + triangulate_density_map_generic( + &values, + cube_size as f32, + iso_surface_threshold as f32, + translation.map(|t| t.map(|t| t as f32)), + ) + } else if let Ok(values) = values.downcast::>() { + triangulate_density_map_generic(&values, cube_size, iso_surface_threshold, translation) + } else { + Err(utils::pyerr_unsupported_scalar()) } } diff --git a/pysplashsurf/tests/test_basic.py b/pysplashsurf/tests/test_basic.py index 3f70103..9fc1809 100644 --- a/pysplashsurf/tests/test_basic.py +++ b/pysplashsurf/tests/test_basic.py @@ -205,6 +205,8 @@ def test_check_consistency(): pysplashsurf.check_mesh_consistency(mesh_with_data, reconstruction.grid) is None ) + # TODO: Delete some triangles and check for failure + def test_tris_to_quads(): particles = np.array(meshio.read(VTK_PATH).points, dtype=np.float32) diff --git a/pysplashsurf/tests/test_sdf.py b/pysplashsurf/tests/test_sdf.py new file mode 100644 index 0000000..a53dce9 --- /dev/null +++ b/pysplashsurf/tests/test_sdf.py @@ -0,0 +1,33 @@ +import pysplashsurf +import numpy as np + + +def test_sphere_sdf_mc(): + radius = 1.0 + num_verts = 100 + + grid_size = radius * 2.2 + dx = grid_size / (num_verts - 1) + + translation = -0.5 * grid_size + + def make_sdf(): + coords = np.arange(num_verts, dtype=np.float32) * dx + translation + x, y, z = np.meshgrid(coords, coords, coords, indexing="ij") + sdf = np.sqrt(x**2 + y**2 + z**2) - radius + return sdf + + sdf = make_sdf() + + # Note: Currently this reconstruction assumes that inside the surface values get bigger (like a density function) + mesh, grid = pysplashsurf.marching_cubes( + sdf, cube_size=dx, iso_surface_threshold=0.0, translation=[translation] * 3 + ) + + assert len(mesh.vertices) > 0 + + norms = np.linalg.norm(mesh.vertices, axis=1) + assert norms.min() > radius - 1e-4 + assert norms.max() < radius + 1e-4 + + assert pysplashsurf.check_mesh_consistency(mesh, grid) is None diff --git a/splashsurf_lib/src/density_map.rs b/splashsurf_lib/src/density_map.rs index 41b9265..f9f24a8 100644 --- a/splashsurf_lib/src/density_map.rs +++ b/splashsurf_lib/src/density_map.rs @@ -217,11 +217,12 @@ pub fn parallel_compute_particle_densities( /// A sparse density map /// /// The density map contains values for all points of the background grid where the density is not -/// trivially zero (which is the case when a point is outside of the compact support of any particles). +/// trivially zero (which is the case when a point is outside the compact support of any particles). #[derive(Clone, Debug)] pub enum DensityMap { Standard(MapType), DashMap(ReadDashMap), + Dense(Vec), } impl Default for DensityMap { @@ -242,12 +243,24 @@ impl From> for DensityMap { } } +impl From> for DensityMap { + fn from(values: Vec) -> Self { + Self::Dense(values) + } +} + impl DensityMap { /// Converts the contained map into a vector of tuples of (flat_point_index, density) pub fn to_vec(&self) -> Vec<(I, R)> { match self { DensityMap::Standard(map) => map.iter().map(|(&i, &r)| (i, r)).collect(), DensityMap::DashMap(map) => map.iter().map(|(&i, &r)| (i, r)).collect(), + DensityMap::Dense(values) => values + .iter() + .copied() + .enumerate() + .map(|(i, r)| (I::from_usize(i).unwrap(), r)) + .collect(), } } @@ -256,6 +269,7 @@ impl DensityMap { match self { DensityMap::Standard(map) => map.len(), DensityMap::DashMap(map) => map.len(), + DensityMap::Dense(values) => values.len(), } } @@ -264,6 +278,7 @@ impl DensityMap { match self { DensityMap::Standard(map) => map.get(&flat_point_index).copied(), DensityMap::DashMap(map) => map.get(&flat_point_index).copied(), + DensityMap::Dense(values) => values.get(flat_point_index.to_usize()?).copied(), } } @@ -273,6 +288,9 @@ impl DensityMap { match self { DensityMap::Standard(map) => map.iter().for_each(|(&i, &r)| f(i, r)), DensityMap::DashMap(map) => map.iter().for_each(|(&i, &r)| f(i, r)), + DensityMap::Dense(values) => values.iter().copied().enumerate().for_each(|(i, r)| { + f(I::from_usize(i).unwrap(), r); + }), } } } From cc75e5c77687d9585432cde329e11f20b8d701a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 20:52:23 +0200 Subject: [PATCH 61/63] Support borrowed data for DensityMap --- pysplashsurf/pysplashsurf/docs/source/api.rst | 1 + .../pysplashsurf/docs/source/functions.rst | 2 + pysplashsurf/pysplashsurf/pysplashsurf.pyi | 21 ++++++- pysplashsurf/src/marching_cubes.rs | 63 ++++++++++++++----- pysplashsurf/tests/test_sdf.py | 2 +- splashsurf_lib/src/density_map.rs | 32 ++++++---- 6 files changed, 94 insertions(+), 27 deletions(-) diff --git a/pysplashsurf/pysplashsurf/docs/source/api.rst b/pysplashsurf/pysplashsurf/docs/source/api.rst index 3d51afe..f3c4655 100644 --- a/pysplashsurf/pysplashsurf/docs/source/api.rst +++ b/pysplashsurf/pysplashsurf/docs/source/api.rst @@ -19,6 +19,7 @@ Functions convert_tris_to_quads laplacian_smoothing_normals_parallel laplacian_smoothing_parallel + marching_cubes marching_cubes_cleanup neighborhood_search_spatial_hashing_parallel reconstruct_surface diff --git a/pysplashsurf/pysplashsurf/docs/source/functions.rst b/pysplashsurf/pysplashsurf/docs/source/functions.rst index 1fae54f..411811a 100644 --- a/pysplashsurf/pysplashsurf/docs/source/functions.rst +++ b/pysplashsurf/pysplashsurf/docs/source/functions.rst @@ -15,6 +15,8 @@ All functions infer float precision based on the input (``np.float32`` or ``np.f .. autofunction:: laplacian_smoothing_parallel +.. autofunction:: marching_cubes + .. autofunction:: marching_cubes_cleanup .. autofunction:: neighborhood_search_spatial_hashing_parallel diff --git a/pysplashsurf/pysplashsurf/pysplashsurf.pyi b/pysplashsurf/pysplashsurf/pysplashsurf.pyi index 4aaa4b2..55a6e50 100644 --- a/pysplashsurf/pysplashsurf/pysplashsurf.pyi +++ b/pysplashsurf/pysplashsurf/pysplashsurf.pyi @@ -353,9 +353,28 @@ def laplacian_smoothing_parallel(mesh:typing.Union[TriMesh3d, MeshWithData], ver The smoothing is performed inplace and modifies the vertices of the given mesh. """ -def marching_cubes(values:numpy.typing.NDArray[typing.Any], *, cube_size:builtins.float, iso_surface_threshold:builtins.float, translation:typing.Optional[typing.Sequence[builtins.float]]=None) -> tuple[TriMesh3d, UniformGrid]: +def marching_cubes(values:numpy.typing.NDArray[typing.Any], *, iso_surface_threshold:builtins.float, cube_size:builtins.float, translation:typing.Optional[typing.Sequence[builtins.float]]=None, return_grid:builtins.bool=False) -> typing.Union[TriMesh3d, tuple[TriMesh3d, UniformGrid]]: r""" Performs a standard marching cubes triangulation of a 3D array of values + + The array of values has to be a contiguous array with shape ``(nx, ny, nz)``. + The iso-surface threshold defines which value is considered to be "on" the surface. + The cube size and translation parameters define the scaling and translation of the resulting + mesh. Without translation, the value ``values[0, 0, 0]`` is located at coordinates ``(0, 0, 0)``. + + The values are interpreted as a "density field", meaning that values higher than the iso-surface + threshold are considered to be "inside" the surface and values lower than the threshold are + considered to be "outside" the surface. This is the opposite convention to an SDF (signed distance field). + However, even if values of an SDF are provided as an input, the marching cubes algorithm + will still work and produce a watertight surface mesh (if the surface is fully contained in the + array). + + If ``return_grid`` is set to ``True``, the function will return a tuple of the mesh and the + uniform grid that was used for the triangulation. This can be used for other functions such as + :py:func:`check_mesh_consistency`. Otherwise, only the mesh is returned. + + The function is currently single-threaded. The SPH surface reconstruction functions :py:func:`reconstruction_pipeline` + and :py:func:`reconstruct_surface` improve performance by processing multiple patches in parallel. """ def marching_cubes_cleanup(mesh:typing.Union[TriMesh3d, MeshWithData], grid:UniformGrid, *, max_rel_snap_dist:typing.Optional[builtins.float]=None, max_iter:builtins.int=5, keep_vertices:builtins.bool=False) -> typing.Union[TriMesh3d, MeshWithData]: diff --git a/pysplashsurf/src/marching_cubes.rs b/pysplashsurf/src/marching_cubes.rs index a687631..9af30f2 100644 --- a/pysplashsurf/src/marching_cubes.rs +++ b/pysplashsurf/src/marching_cubes.rs @@ -1,5 +1,6 @@ use numpy::prelude::*; use numpy::{Element, PyArray3, PyUntypedArray}; +use pyo3::IntoPyObjectExt; use pyo3::prelude::*; use pyo3_stub_gen::derive::*; use splashsurf_lib::nalgebra::Vector3; @@ -53,24 +54,47 @@ pub fn check_mesh_consistency<'py>( } /// Performs a standard marching cubes triangulation of a 3D array of values +/// +/// The array of values has to be a contiguous array with shape ``(nx, ny, nz)``. +/// The iso-surface threshold defines which value is considered to be "on" the surface. +/// The cube size and translation parameters define the scaling and translation of the resulting +/// mesh. Without translation, the value ``values[0, 0, 0]`` is located at coordinates ``(0, 0, 0)``. +/// +/// The values are interpreted as a "density field", meaning that values higher than the iso-surface +/// threshold are considered to be "inside" the surface and values lower than the threshold are +/// considered to be "outside" the surface. This is the opposite convention to an SDF (signed distance field). +/// However, even if values of an SDF are provided as an input, the marching cubes algorithm +/// will still work and produce a watertight surface mesh (if the surface is fully contained in the +/// array). +/// +/// If ``return_grid`` is set to ``True``, the function will return a tuple of the mesh and the +/// uniform grid that was used for the triangulation. This can be used for other functions such as +/// :py:func:`check_mesh_consistency`. Otherwise, only the mesh is returned. +/// +/// The function is currently single-threaded. The SPH surface reconstruction functions :py:func:`reconstruction_pipeline` +/// and :py:func:`reconstruct_surface` improve performance by processing multiple patches in parallel. #[gen_stub_pyfunction] #[pyfunction] #[pyo3(name = "marching_cubes")] -#[pyo3(signature = (values, *, cube_size, iso_surface_threshold, translation = None))] +#[pyo3(signature = (values, *, iso_surface_threshold, cube_size, translation = None, return_grid = false))] +#[gen_stub(override_return_type(type_repr="typing.Union[TriMesh3d, tuple[TriMesh3d, UniformGrid]]", imports=()))] pub fn marching_cubes<'py>( values: &Bound<'py, PyUntypedArray>, - cube_size: f64, iso_surface_threshold: f64, + cube_size: f64, translation: Option<[f64; 3]>, -) -> PyResult<(PyTriMesh3d, PyUniformGrid)> { + return_grid: bool, +) -> PyResult> { assert_eq!(values.shape().len(), 3, "values must be a 3D array"); fn triangulate_density_map_generic<'py, R: Real + Element>( values: &Bound<'py, PyArray3>, - cube_size: R, iso_surface_threshold: R, + cube_size: R, translation: Option<[R; 3]>, - ) -> PyResult<(PyTriMesh3d, PyUniformGrid)> { + return_grid: bool, + ) -> PyResult> { + let py = values.py(); let shape = values.shape(); let translation = Vector3::from(translation.unwrap_or([R::zero(); 3])); let n_cells_per_dim = [ @@ -82,9 +106,8 @@ pub fn marching_cubes<'py>( let grid = UniformGrid::new(&translation, &n_cells_per_dim, cube_size) .map_err(anyhow::Error::from)?; - // TODO: Replace with borrow - let values = values.try_readonly()?.as_slice()?.to_vec(); - let density_map = DensityMap::from(values); + let values = values.try_readonly()?; + let density_map = DensityMap::from(values.as_slice()?); let mesh = splashsurf_lib::marching_cubes::triangulate_density_map( &grid, @@ -92,21 +115,33 @@ pub fn marching_cubes<'py>( iso_surface_threshold, ) .map_err(anyhow::Error::from)?; - Ok(( - PyTriMesh3d::try_from_generic(mesh)?, - PyUniformGrid::try_from_generic(grid)?, - )) + + let mesh = PyTriMesh3d::try_from_generic(mesh)?; + let grid = PyUniformGrid::try_from_generic(grid)?; + + if return_grid { + (mesh, grid).into_py_any(py) + } else { + mesh.into_py_any(py) + } } if let Ok(values) = values.downcast::>() { triangulate_density_map_generic( &values, - cube_size as f32, iso_surface_threshold as f32, + cube_size as f32, translation.map(|t| t.map(|t| t as f32)), + return_grid, ) } else if let Ok(values) = values.downcast::>() { - triangulate_density_map_generic(&values, cube_size, iso_surface_threshold, translation) + triangulate_density_map_generic( + &values, + iso_surface_threshold, + cube_size, + translation, + return_grid, + ) } else { Err(utils::pyerr_unsupported_scalar()) } diff --git a/pysplashsurf/tests/test_sdf.py b/pysplashsurf/tests/test_sdf.py index a53dce9..5df83ec 100644 --- a/pysplashsurf/tests/test_sdf.py +++ b/pysplashsurf/tests/test_sdf.py @@ -21,7 +21,7 @@ def make_sdf(): # Note: Currently this reconstruction assumes that inside the surface values get bigger (like a density function) mesh, grid = pysplashsurf.marching_cubes( - sdf, cube_size=dx, iso_surface_threshold=0.0, translation=[translation] * 3 + sdf, iso_surface_threshold=0.0, cube_size=dx, translation=[translation] * 3, return_grid=True ) assert len(mesh.vertices) > 0 diff --git a/splashsurf_lib/src/density_map.rs b/splashsurf_lib/src/density_map.rs index f9f24a8..4d2ec8d 100644 --- a/splashsurf_lib/src/density_map.rs +++ b/splashsurf_lib/src/density_map.rs @@ -12,7 +12,7 @@ //! In case of a sparse density map, the values are stored in a hashmap. The keys are so called //! "flat point indices". These are computed from the background grid point coordinates `(i,j,k)` //! analogous to multidimensional array index flattening. That means for a grid with dimensions -//! `[n_x, n_y, n_z]`, the flat point index is given by the expression `i*n_x + j*n_y + k*n_z`. +//! `[n_x, n_y, n_z]`, the flat point index is given by the expression `i*n_y*n_z + j*n_z + k`. //! For these point index operations, the [`UniformGrid`] is used. //! //! Note that all density mapping functions always use the global background grid for flat point @@ -29,6 +29,7 @@ use dashmap::ReadOnlyView as ReadDashMap; use log::{info, trace, warn}; use nalgebra::Vector3; use rayon::prelude::*; +use std::borrow::Cow; use std::cell::RefCell; use thiserror::Error as ThisError; use thread_local::ThreadLocal; @@ -219,37 +220,46 @@ pub fn parallel_compute_particle_densities( /// The density map contains values for all points of the background grid where the density is not /// trivially zero (which is the case when a point is outside the compact support of any particles). #[derive(Clone, Debug)] -pub enum DensityMap { +pub enum DensityMap<'a, I: Index, R: Real> { Standard(MapType), DashMap(ReadDashMap), - Dense(Vec), + Dense(Cow<'a, [R]>), } -impl Default for DensityMap { +/// Owned version of [`DensityMap`] (with static lifetime) +pub type OwnedDensityMap = DensityMap<'static, I, R>; + +impl Default for OwnedDensityMap { fn default() -> Self { DensityMap::Standard(MapType::default()) } } -impl From> for DensityMap { +impl From> for OwnedDensityMap { fn from(map: MapType) -> Self { Self::Standard(map) } } -impl From> for DensityMap { +impl From> for OwnedDensityMap { fn from(map: ParallelMapType) -> Self { Self::DashMap(map.into_read_only()) } } -impl From> for DensityMap { +impl From> for DensityMap<'static, I, R> { fn from(values: Vec) -> Self { - Self::Dense(values) + Self::Dense(values.into()) + } +} + +impl<'a, I: Index, R: Real> From<&'a [R]> for DensityMap<'a, I, R> { + fn from(values: &'a [R]) -> Self { + Self::Dense(values.into()) } } -impl DensityMap { +impl<'a, I: Index, R: Real> DensityMap<'a, I, R> { /// Converts the contained map into a vector of tuples of (flat_point_index, density) pub fn to_vec(&self) -> Vec<(I, R)> { match self { @@ -357,7 +367,7 @@ pub fn sequential_generate_sparse_density_map( particle_rest_mass: R, compact_support_radius: R, cube_size: R, -) -> Result, DensityMapError> { +) -> Result, DensityMapError> { profile!("sequential_generate_sparse_density_map"); let mut sparse_densities = new_map(); @@ -404,7 +414,7 @@ pub fn parallel_generate_sparse_density_map( particle_rest_mass: R, compact_support_radius: R, cube_size: R, -) -> Result, DensityMapError> { +) -> Result, DensityMapError> { profile!("parallel_generate_sparse_density_map"); // Each thread will write to its own local density map From fd747d6357cdff3108d64b761b7bed44550abb85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 21:16:05 +0200 Subject: [PATCH 62/63] Update comment --- splashsurf_lib/src/density_map.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/splashsurf_lib/src/density_map.rs b/splashsurf_lib/src/density_map.rs index 4d2ec8d..9b82a70 100644 --- a/splashsurf_lib/src/density_map.rs +++ b/splashsurf_lib/src/density_map.rs @@ -3,7 +3,6 @@ //! This module provides functions for the computation of per-particle densities and the discretization //! of the resulting fluid density field by mapping onto a discrete background grid. //! -//! Currently, only sparse density maps are implemented. //! //! ## Sparse density maps //! The [`DensityMap`] stores fluid density values for each point of an implicit background grid @@ -15,6 +14,12 @@ //! `[n_x, n_y, n_z]`, the flat point index is given by the expression `i*n_y*n_z + j*n_z + k`. //! For these point index operations, the [`UniformGrid`] is used. //! +//! ## Dense density maps +//! For some applications, it might be desirable to allocate the storage for all grid points +//! in a contiguous array. This is supported by the [`DensityMap::Dense`] variant. The values +//! can either be borrowed (a slice) or owned (a vector). Background grid coordinates are mapped +//! to indices in this array (and vice versa) using the same flattening scheme as for the sparse maps. +//! //! Note that all density mapping functions always use the global background grid for flat point //! indices, even if the density map is only generated for a smaller subdomain. From fb7eedbadf5e36dae9489ac21a57a2d5a0f6f39b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20L=C3=B6schner?= Date: Tue, 2 Sep 2025 21:19:56 +0200 Subject: [PATCH 63/63] Update changelog --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71e950c..398534d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,9 @@ The following changes are present in the `main` branch of the repository and are - Py: Major refactor of the Python bindings, interface is simplified and more "pythonic" - Merged distinct F64/F32 classes and functions and infer data type automatically - Nearly all inputs and outputs are now zero-copy (e.g. mesh vertices and faces can be accessed as attributes without copies) - - Lib: Enforce that `Index` types are signed integers implementing the `num_traits::Signed` trait. Currently the reconstruction does not work (correctly) with unsigned integers. + - Py: Add a function for a plain marching cubes reconstruction without any SPH interpolation + - Lib: Add support for "dense" density maps (borrowed & owned) as input for the marching cubes triangulation, useful for the Python bindings + - Lib: Enforce that `Index` types are signed integers implementing the `num_traits::Signed` trait. Currently, the reconstruction does not work (correctly) with unsigned integers. - Lib: Make most fields of `SurfaceReconstruction` public - CLI: Add some tests for the `reconstruction_pipeline` function - CLI: Fix post-processing when particle AABB filtering is enabled