|
227 | 227 | "#| export\n",
|
228 | 228 | "def drr_to_mesh(\n",
|
229 | 229 | " drr: DRR,\n",
|
| 230 | + " method: str, # Either `surface_nets` or `marching_cubes`\n", |
230 | 231 | " threshold: float = 300, # Min value for marching cubes (Hounsfield units)\n",
|
231 | 232 | " verbose: bool = True, # Display progress bars for mesh processing steps\n",
|
232 | 233 | "):\n",
|
233 | 234 | " \"\"\"\n",
|
234 | 235 | " Convert the CT in a DRR object into a mesh.\n",
|
235 | 236 | "\n",
|
236 |
| - " Mesh processing steps:\n", |
| 237 | + " If using marching cubes, mesh processing steps are:\n", |
237 | 238 | "\n",
|
238 | 239 | " 1. Keep only largest connected components\n",
|
239 | 240 | " 2. Smooth\n",
|
|
247 | 248 | " spacing=drr.spacing,\n",
|
248 | 249 | " origin=(0, 0, 0),\n",
|
249 | 250 | " )\n",
|
250 |
| - " mesh = grid.contour(\n", |
251 |
| - " isosurfaces=1,\n", |
252 |
| - " scalars=drr.volume.cpu().numpy().flatten(order=\"F\"),\n", |
253 |
| - " rng=[threshold, torch.inf],\n", |
254 |
| - " method=\"marching_cubes\",\n", |
255 |
| - " progress_bar=verbose,\n", |
256 |
| - " )\n", |
257 | 251 | "\n",
|
258 |
| - " # Process the mesh\n", |
259 |
| - " mesh.extract_largest(inplace=True, progress_bar=verbose)\n", |
260 |
| - " mesh.point_data.clear()\n", |
261 |
| - " mesh.cell_data.clear()\n", |
262 |
| - " mesh.smooth_taubin(\n", |
263 |
| - " n_iter=100,\n", |
264 |
| - " feature_angle=120.0,\n", |
265 |
| - " boundary_smoothing=False,\n", |
266 |
| - " feature_smoothing=False,\n", |
267 |
| - " non_manifold_smoothing=True,\n", |
268 |
| - " normalize_coordinates=True,\n", |
269 |
| - " inplace=True,\n", |
270 |
| - " progress_bar=verbose,\n", |
271 |
| - " )\n", |
272 |
| - " mesh.decimate_pro(0.25, inplace=True, progress_bar=verbose)\n", |
273 |
| - " mesh.fill_holes(100, inplace=True, progress_bar=verbose)\n", |
274 |
| - " mesh.clean(inplace=True, progress_bar=verbose)\n", |
| 252 | + " if method == \"marching_cubes\":\n", |
| 253 | + " mesh = grid.contour(\n", |
| 254 | + " isosurfaces=1,\n", |
| 255 | + " scalars=drr.volume.cpu().numpy().flatten(order=\"F\"),\n", |
| 256 | + " rng=[threshold, torch.inf],\n", |
| 257 | + " method=\"marching_cubes\",\n", |
| 258 | + " progress_bar=verbose,\n", |
| 259 | + " )\n", |
| 260 | + " \n", |
| 261 | + " # Process the mesh\n", |
| 262 | + " mesh.extract_largest(inplace=True, progress_bar=verbose)\n", |
| 263 | + " mesh.point_data.clear()\n", |
| 264 | + " mesh.cell_data.clear()\n", |
| 265 | + " mesh.smooth_taubin(\n", |
| 266 | + " n_iter=100,\n", |
| 267 | + " feature_angle=120.0,\n", |
| 268 | + " boundary_smoothing=False,\n", |
| 269 | + " feature_smoothing=False,\n", |
| 270 | + " non_manifold_smoothing=True,\n", |
| 271 | + " normalize_coordinates=True,\n", |
| 272 | + " inplace=True,\n", |
| 273 | + " progress_bar=verbose,\n", |
| 274 | + " )\n", |
| 275 | + " mesh.decimate_pro(0.25, inplace=True, progress_bar=verbose)\n", |
| 276 | + " mesh.fill_holes(100, inplace=True, progress_bar=verbose)\n", |
| 277 | + " mesh.clean(inplace=True, progress_bar=verbose)\n", |
| 278 | + " \n", |
| 279 | + " elif method == \"surface_nets\":\n", |
| 280 | + " grid.point_data[\"values\"] = drr.volume.cpu().numpy().flatten(order=\"F\") > threshold\n", |
| 281 | + " try:\n", |
| 282 | + " mesh = grid.contour_labeled(smoothing=True, progress_bar=verbose)\n", |
| 283 | + " except AttributeError as e:\n", |
| 284 | + " raise AttributeError(f\"{e}, ensure you are using pyvista>=0.43 and vtk>=9.3\")\n", |
| 285 | + " mesh.clear_cell_data()\n", |
| 286 | + "\n", |
| 287 | + " else:\n", |
| 288 | + " raise ValueError(f\"method must be `marching_cubes` or `surface_nets`, not {method}\")\n", |
| 289 | + " \n", |
275 | 290 | " return mesh"
|
276 | 291 | ]
|
277 | 292 | },
|
|
283 | 298 | "outputs": [],
|
284 | 299 | "source": [
|
285 | 300 | "#| export\n",
|
286 |
| - "def img_to_mesh(drr: DRR, rotations, translations, parameterization, convention=None):\n", |
| 301 | + "def img_to_mesh(drr: DRR, rotations, translations, parameterization, convention=None, **kwargs):\n", |
287 | 302 | " \"\"\"\n",
|
288 | 303 | " For a given pose (not batched), turn the camera and detector into a mesh.\n",
|
289 | 304 | " Additionally, render the DRR for the pose. Convert into a texture that\n",
|
|
296 | 311 | " img = (255.0 * img).astype(np.uint8)\n",
|
297 | 312 | " texture = pyvista.numpy_to_texture(img)\n",
|
298 | 313 | "\n",
|
299 |
| - " # Make a mesh for the source and detector plane\n", |
| 314 | + " # Make a mesh for the camera and the principal ray\n", |
300 | 315 | " source, target = drr.detector(rotations, translations, parameterization, convention)\n",
|
301 |
| - " camera = pyvista.Sphere(radius=10, center=source.squeeze().cpu().numpy())\n", |
| 316 | + " source = source.squeeze().cpu().numpy()\n", |
302 | 317 | " target = target.reshape(drr.detector.height, drr.detector.width, 3).cpu().numpy()\n",
|
| 318 | + " principal_ray = pyvista.Line(source, target.mean(axis=0).mean(axis=0))\n", |
| 319 | + " camera = _make_camera_frustum_mesh(source, target, size=0.125)\n", |
| 320 | + "\n", |
| 321 | + " # Make a mesh for the detector plane\n", |
303 | 322 | " detector = pyvista.StructuredGrid(\n",
|
304 | 323 | " target[..., 0],\n",
|
305 | 324 | " target[..., 1],\n",
|
|
314 | 333 | " inplace=True,\n",
|
315 | 334 | " )\n",
|
316 | 335 | "\n",
|
317 |
| - " return camera, detector, texture" |
| 336 | + " return camera, detector, texture, principal_ray" |
| 337 | + ] |
| 338 | + }, |
| 339 | + { |
| 340 | + "cell_type": "code", |
| 341 | + "execution_count": null, |
| 342 | + "id": "fbb0de96-4efa-46ff-a337-3771c9a343e0", |
| 343 | + "metadata": {}, |
| 344 | + "outputs": [], |
| 345 | + "source": [ |
| 346 | + "#| exporti\n", |
| 347 | + "import numpy as np\n", |
| 348 | + "\n", |
| 349 | + "\n", |
| 350 | + "def _make_camera_frustum_mesh(source, target, size=0.125):\n", |
| 351 | + " vertices = np.stack(\n", |
| 352 | + " [\n", |
| 353 | + " source + size * (target[0, 0] - source),\n", |
| 354 | + " source + size * (target[-1, 0] - source),\n", |
| 355 | + " source + size * (target[-1, -1] - source),\n", |
| 356 | + " source + size * (target[0, -1] - source),\n", |
| 357 | + " source,\n", |
| 358 | + " ]\n", |
| 359 | + " )\n", |
| 360 | + " faces = np.hstack(\n", |
| 361 | + " [\n", |
| 362 | + " [4, 0, 1, 2, 3],\n", |
| 363 | + " [3, 0, 1, 4],\n", |
| 364 | + " [3, 1, 2, 4],\n", |
| 365 | + " [3, 0, 3, 4],\n", |
| 366 | + " [3, 2, 3, 4],\n", |
| 367 | + " ]\n", |
| 368 | + " )\n", |
| 369 | + " return pyvista.PolyData(vertices, faces)" |
318 | 370 | ]
|
319 | 371 | },
|
320 | 372 | {
|
|
344 | 396 | "display_name": "python3",
|
345 | 397 | "language": "python",
|
346 | 398 | "name": "python3"
|
| 399 | + }, |
| 400 | + "widgets": { |
| 401 | + "application/vnd.jupyter.widget-state+json": { |
| 402 | + "state": {}, |
| 403 | + "version_major": 2, |
| 404 | + "version_minor": 0 |
| 405 | + } |
347 | 406 | }
|
348 | 407 | },
|
349 | 408 | "nbformat": 4,
|
|
0 commit comments