|
7 | 7 |
|
8 | 8 | import acquire |
9 | 9 | from acquire import DeviceKind, DeviceState, Runtime, Trigger, PropertyType |
10 | | -import dask.array as da |
11 | | -import numcodecs.blosc as blosc |
12 | 10 | import pytest |
13 | 11 | import tifffile |
14 | | -import zarr |
15 | | -from ome_zarr.io import parse_url |
16 | | -from ome_zarr.reader import Reader |
17 | | -from skimage.transform import downscale_local_mean |
18 | | -import numpy as np |
19 | 12 |
|
20 | 13 |
|
21 | 14 | @pytest.fixture(scope="module") |
@@ -282,308 +275,6 @@ def meta(iframe: int) -> Dict[Any, Any]: |
282 | 275 | assert meta(i)["frame_id"] == i |
283 | 276 |
|
284 | 277 |
|
285 | | -def test_write_external_metadata_to_zarr( |
286 | | - runtime: Runtime, request: pytest.FixtureRequest |
287 | | -): |
288 | | - dm = runtime.device_manager() |
289 | | - p = runtime.get_configuration() |
290 | | - p.video[0].camera.identifier = dm.select( |
291 | | - DeviceKind.Camera, "simulated.*sin.*" |
292 | | - ) |
293 | | - p.video[0].camera.settings.shape = (33, 47) |
294 | | - p.video[0].storage.identifier = dm.select(DeviceKind.Storage, "Zarr") |
295 | | - p.video[0].max_frame_count = 4 |
296 | | - p.video[0].storage.settings.filename = f"{request.node.name}.zarr" |
297 | | - metadata = {"hello": "world"} |
298 | | - p.video[0].storage.settings.external_metadata_json = json.dumps(metadata) |
299 | | - p.video[0].storage.settings.pixel_scale_um = (0.5, 4) |
300 | | - p.video[0].storage.settings.chunk_dims_px.width = 33 |
301 | | - p.video[0].storage.settings.chunk_dims_px.height = 47 |
302 | | - p.video[0].storage.settings.chunk_dims_px.planes = 4 |
303 | | - |
304 | | - p = runtime.set_configuration(p) |
305 | | - |
306 | | - nframes = 0 |
307 | | - runtime.start() |
308 | | - while nframes < p.video[0].max_frame_count: |
309 | | - if packet := runtime.get_available_data(0): |
310 | | - nframes += packet.get_frame_count() |
311 | | - packet = None |
312 | | - runtime.stop() |
313 | | - |
314 | | - assert p.video[0].storage.settings.filename |
315 | | - store = parse_url(p.video[0].storage.settings.filename) |
316 | | - assert store |
317 | | - reader = Reader(store) |
318 | | - nodes = list(reader()) |
319 | | - |
320 | | - # ome-ngff supports multiple images, in separate directories but we only |
321 | | - # wrote one. |
322 | | - multi_scale_image_node = nodes[0] |
323 | | - |
324 | | - # ome-ngff always stores multi-scale images, but we only have a single |
325 | | - # scale/level. |
326 | | - image_data = multi_scale_image_node.data[0] |
327 | | - assert image_data.shape == ( |
328 | | - p.video[0].max_frame_count, |
329 | | - 1, |
330 | | - p.video[0].camera.settings.shape[1], |
331 | | - p.video[0].camera.settings.shape[0], |
332 | | - ) |
333 | | - |
334 | | - multi_scale_image_metadata = multi_scale_image_node.metadata |
335 | | - |
336 | | - axes = multi_scale_image_metadata["axes"] |
337 | | - axis_names = tuple(a["name"] for a in axes) |
338 | | - assert axis_names == ("t", "c", "y", "x") |
339 | | - |
340 | | - axis_types = tuple(a["type"] for a in axes) |
341 | | - assert axis_types == ("time", "channel", "space", "space") |
342 | | - |
343 | | - axis_units = tuple(a.get("unit") for a in axes) |
344 | | - assert axis_units == (None, None, "micrometer", "micrometer") |
345 | | - |
346 | | - # We only have one multi-scale level and one transform. |
347 | | - transform = multi_scale_image_metadata["coordinateTransformations"][0][0] |
348 | | - pixel_scale_um = tuple( |
349 | | - transform["scale"][axis_names.index(axis)] for axis in ("x", "y") |
350 | | - ) |
351 | | - assert pixel_scale_um == p.video[0].storage.settings.pixel_scale_um |
352 | | - |
353 | | - # ome-zarr only reads attributes it recognizes, so use a plain zarr reader |
354 | | - # to read external metadata instead. |
355 | | - group = zarr.open(p.video[0].storage.settings.filename) |
356 | | - assert group["0"].attrs.asdict() == metadata |
357 | | - |
358 | | - |
359 | | -@pytest.mark.parametrize( |
360 | | - ("compressor_name",), |
361 | | - [ |
362 | | - ("zstd",), |
363 | | - ("lz4",), |
364 | | - ], |
365 | | -) |
366 | | -def test_write_compressed_zarr( |
367 | | - runtime: Runtime, request: pytest.FixtureRequest, compressor_name |
368 | | -): |
369 | | - filename = f"{request.node.name}.zarr" |
370 | | - filename = filename.replace("[", "_").replace("]", "_") |
371 | | - |
372 | | - dm = runtime.device_manager() |
373 | | - p = runtime.get_configuration() |
374 | | - p.video[0].camera.identifier = dm.select( |
375 | | - DeviceKind.Camera, "simulated.*empty.*" |
376 | | - ) |
377 | | - p.video[0].camera.settings.shape = (64, 48) |
378 | | - p.video[0].camera.settings.exposure_time_us = 1e4 |
379 | | - p.video[0].storage.identifier = dm.select( |
380 | | - DeviceKind.Storage, |
381 | | - f"ZarrBlosc1{compressor_name.capitalize()}ByteShuffle", |
382 | | - ) |
383 | | - p.video[0].max_frame_count = 70 |
384 | | - p.video[0].storage.settings.filename = filename |
385 | | - metadata = {"foo": "bar"} |
386 | | - p.video[0].storage.settings.external_metadata_json = json.dumps(metadata) |
387 | | - runtime.set_configuration(p) |
388 | | - |
389 | | - runtime.start() |
390 | | - runtime.stop() |
391 | | - |
392 | | - # load from Zarr |
393 | | - group = zarr.open(p.video[0].storage.settings.filename) |
394 | | - data = group["0"] |
395 | | - |
396 | | - assert data.compressor.cname == compressor_name |
397 | | - assert data.compressor.clevel == 1 |
398 | | - assert data.compressor.shuffle == blosc.SHUFFLE |
399 | | - |
400 | | - assert data.shape == ( |
401 | | - p.video[0].max_frame_count, |
402 | | - 1, |
403 | | - p.video[0].camera.settings.shape[1], |
404 | | - p.video[0].camera.settings.shape[0], |
405 | | - ) |
406 | | - assert data.attrs.asdict() == metadata |
407 | | - |
408 | | - # load from Dask |
409 | | - data = da.from_zarr(p.video[0].storage.settings.filename, component="0") |
410 | | - assert data.shape == ( |
411 | | - p.video[0].max_frame_count, |
412 | | - 1, |
413 | | - p.video[0].camera.settings.shape[1], |
414 | | - p.video[0].camera.settings.shape[0], |
415 | | - ) |
416 | | - |
417 | | - |
418 | | -@pytest.mark.parametrize( |
419 | | - ("number_of_frames", "expected_number_of_chunks", "compression"), |
420 | | - [ |
421 | | - (64, 4, None), |
422 | | - (64, 4, {"codec": "zstd", "clevel": 1, "shuffle": 1}), |
423 | | - (65, 8, None), # rollover |
424 | | - (65, 8, {"codec": "blosclz", "clevel": 2, "shuffle": 2}), # rollover |
425 | | - ], |
426 | | -) |
427 | | -def test_write_zarr_with_chunking( |
428 | | - runtime: acquire.Runtime, |
429 | | - request: pytest.FixtureRequest, |
430 | | - number_of_frames: int, |
431 | | - expected_number_of_chunks: int, |
432 | | - compression: Optional[dict], |
433 | | -): |
434 | | - dm = runtime.device_manager() |
435 | | - |
436 | | - p = runtime.get_configuration() |
437 | | - p.video[0].camera.identifier = dm.select( |
438 | | - DeviceKind.Camera, "simulated.*empty.*" |
439 | | - ) |
440 | | - p.video[0].camera.settings.shape = (1920, 1080) |
441 | | - p.video[0].camera.settings.exposure_time_us = 1e4 |
442 | | - p.video[0].camera.settings.pixel_type = acquire.SampleType.U8 |
443 | | - p.video[0].storage.identifier = dm.select( |
444 | | - DeviceKind.Storage, |
445 | | - "Zarr", |
446 | | - ) |
447 | | - p.video[0].storage.settings.filename = f"{request.node.name}.zarr" |
448 | | - p.video[0].max_frame_count = number_of_frames |
449 | | - |
450 | | - p.video[0].storage.settings.chunk_dims_px.width = 1920 // 2 |
451 | | - p.video[0].storage.settings.chunk_dims_px.height = 1080 // 2 |
452 | | - p.video[0].storage.settings.chunk_dims_px.planes = 64 |
453 | | - |
454 | | - runtime.set_configuration(p) |
455 | | - |
456 | | - runtime.start() |
457 | | - runtime.stop() |
458 | | - |
459 | | - group = zarr.open(p.video[0].storage.settings.filename) |
460 | | - data = group["0"] |
461 | | - |
462 | | - assert data.chunks == (64, 1, 1080 // 2, 1920 // 2) |
463 | | - |
464 | | - assert data.shape == ( |
465 | | - number_of_frames, |
466 | | - 1, |
467 | | - p.video[0].camera.settings.shape[1], |
468 | | - p.video[0].camera.settings.shape[0], |
469 | | - ) |
470 | | - assert data.nchunks == expected_number_of_chunks |
471 | | - |
472 | | - |
473 | | -def test_write_zarr_multiscale( |
474 | | - runtime: acquire.Runtime, |
475 | | - request: pytest.FixtureRequest, |
476 | | -): |
477 | | - filename = f"{request.node.name}.zarr" |
478 | | - filename = filename.replace("[", "_").replace("]", "_") |
479 | | - |
480 | | - dm = runtime.device_manager() |
481 | | - |
482 | | - p = runtime.get_configuration() |
483 | | - p.video[0].camera.identifier = dm.select( |
484 | | - DeviceKind.Camera, "simulated.*empty.*" |
485 | | - ) |
486 | | - p.video[0].camera.settings.shape = (1920, 1080) |
487 | | - p.video[0].camera.settings.exposure_time_us = 1e4 |
488 | | - p.video[0].camera.settings.pixel_type = acquire.SampleType.U8 |
489 | | - p.video[0].storage.identifier = dm.select( |
490 | | - DeviceKind.Storage, |
491 | | - "Zarr", |
492 | | - ) |
493 | | - p.video[0].storage.settings.filename = filename |
494 | | - p.video[0].storage.settings.pixel_scale_um = (1, 1) |
495 | | - p.video[0].max_frame_count = 100 |
496 | | - |
497 | | - p.video[0].storage.settings.chunk_dims_px.width = ( |
498 | | - p.video[0].camera.settings.shape[0] // 3 |
499 | | - ) |
500 | | - p.video[0].storage.settings.chunk_dims_px.height = ( |
501 | | - p.video[0].camera.settings.shape[1] // 3 |
502 | | - ) |
503 | | - p.video[0].storage.settings.chunk_dims_px.planes = 64 |
504 | | - |
505 | | - p.video[0].storage.settings.enable_multiscale = True |
506 | | - |
507 | | - runtime.set_configuration(p) |
508 | | - |
509 | | - runtime.start() |
510 | | - runtime.stop() |
511 | | - |
512 | | - reader = Reader(parse_url(filename)) |
513 | | - zgroup = list(reader())[0] |
514 | | - # loads each layer as a dask array from the Zarr dataset |
515 | | - data = [ |
516 | | - da.from_zarr(filename, component=str(i)) |
517 | | - for i in range(len(zgroup.data)) |
518 | | - ] |
519 | | - assert len(data) == 3 |
520 | | - |
521 | | - image = data[0][0, 0, :, :].compute() # convert dask array to numpy array |
522 | | - |
523 | | - for d in data: |
524 | | - assert ( |
525 | | - np.linalg.norm(image - d[0, 0, :, :].compute()) == 0 |
526 | | - ) # validate against the same method from scikit-image |
527 | | - image = downscale_local_mean(image, (2, 2)).astype(np.uint8) |
528 | | - |
529 | | - |
530 | | -@pytest.mark.parametrize( |
531 | | - ("number_of_frames", "expected_number_of_chunks", "codec"), |
532 | | - [ |
533 | | - (64, 4, None), |
534 | | - (64, 4, "zstd"), |
535 | | - (65, 8, None), # rollover |
536 | | - (65, 8, "lz4"), # rollover |
537 | | - ], |
538 | | -) |
539 | | -def test_write_zarr_v3( |
540 | | - runtime: acquire.Runtime, |
541 | | - request: pytest.FixtureRequest, |
542 | | - number_of_frames: int, |
543 | | - expected_number_of_chunks: int, |
544 | | - codec: Optional[str], |
545 | | -): |
546 | | - dm = runtime.device_manager() |
547 | | - |
548 | | - p = runtime.get_configuration() |
549 | | - p.video[0].camera.identifier = dm.select( |
550 | | - DeviceKind.Camera, "simulated.*empty.*" |
551 | | - ) |
552 | | - |
553 | | - p.video[0].camera.settings.shape = (1920, 1080) |
554 | | - p.video[0].camera.settings.exposure_time_us = 1e4 |
555 | | - p.video[0].camera.settings.pixel_type = acquire.SampleType.U8 |
556 | | - p.video[0].storage.identifier = dm.select( |
557 | | - DeviceKind.Storage, |
558 | | - f"ZarrV3Blosc1{codec.capitalize()}ByteShuffle" if codec else "ZarrV3", |
559 | | - ) |
560 | | - p.video[0].storage.settings.filename = f"{request.node.name}.zarr" |
561 | | - p.video[0].max_frame_count = number_of_frames |
562 | | - |
563 | | - p.video[0].storage.settings.chunk_dims_px.width = 1920 // 2 |
564 | | - p.video[0].storage.settings.chunk_dims_px.height = 1080 // 2 |
565 | | - p.video[0].storage.settings.chunk_dims_px.planes = 64 |
566 | | - |
567 | | - runtime.set_configuration(p) |
568 | | - |
569 | | - runtime.start() |
570 | | - runtime.stop() |
571 | | - |
572 | | - store = zarr.DirectoryStoreV3(p.video[0].storage.settings.filename) |
573 | | - group = zarr.open(store=store, mode="r") |
574 | | - data = group["0"] |
575 | | - |
576 | | - assert data.chunks == (64, 1, 1080 // 2, 1920 // 2) |
577 | | - |
578 | | - assert data.shape == ( |
579 | | - number_of_frames, |
580 | | - 1, |
581 | | - p.video[0].camera.settings.shape[1], |
582 | | - p.video[0].camera.settings.shape[0], |
583 | | - ) |
584 | | - assert data.nchunks == expected_number_of_chunks |
585 | | - |
586 | | - |
587 | 278 | @pytest.mark.skip( |
588 | 279 | reason="Runs into memory limitations on github ci." |
589 | 280 | + " See https://github.com/acquire-project/cpx/issues/147" |
|
0 commit comments