diff --git a/packages/core/src/schemas/index.ts b/packages/core/src/schemas/index.ts index 0a74ce4..54e5bc2 100644 --- a/packages/core/src/schemas/index.ts +++ b/packages/core/src/schemas/index.ts @@ -106,7 +106,12 @@ const transformationSchema: z.ZodType = z.lazy(() => export const coordinateTransformationSchema = z.array(transformationSchema).min(1); -const spaceUnitSchema = z.enum([ +/** + * Spatial units from OME-NGFF specification. + * Valid UDUNITS-2 spatial units. + * @see https://github.com/ome/ngff/blob/26039d997f16509f4ef7f4006ea641bef73733f7/rfc/5/versions/1/index.md?plain=1#L131 + */ +export const spaceUnitSchema = z.enum([ 'angstrom', 'attometer', 'centimeter', @@ -135,7 +140,13 @@ const spaceUnitSchema = z.enum([ 'zeptometer', 'zettameter', ]); -const timeUnitSchema = z.enum([ + +/** + * Time units from OME-NGFF specification. + * Valid UDUNITS-2 time units. + * @see https://github.com/ome/ngff/blob/26039d997f16509f4ef7f4006ea641bef73733f7/rfc/5/versions/1/index.md?plain=1#L132 + */ +export const timeUnitSchema = z.enum([ 'attosecond', 'centisecond', 'day', @@ -160,14 +171,37 @@ const timeUnitSchema = z.enum([ 'zeptosecond', 'zettasecond', ]); + /** - * SHOULD contain the field “unit” to specify the physical unit of this dimension. + * Extract enum values from a zod enum schema as a Set of strings. + * Accesses the internal _def.values array from zod enum schemas. + */ +function getEnumValues(schema: z.ZodEnum>): Set { + // zod enum schemas have an internal _def.values array + const values = (schema as unknown as { _def: { values: readonly string[] } })._def.values; + return new Set(values); +} + +/** + * Set of all valid spatial unit strings from OME-NGFF specification. + * Derived from spaceUnitSchema to ensure consistency. + * @see spaceUnitSchema + */ +export const SPATIAL_UNITS: Set = getEnumValues(spaceUnitSchema); + +/** + * Set of all valid time unit strings from OME-NGFF specification. + * Derived from timeUnitSchema to ensure consistency. + * @see timeUnitSchema + */ +export const TIME_UNITS: Set = getEnumValues(timeUnitSchema); +/** + * SHOULD contain the field "unit" to specify the physical unit of this dimension. * The value SHOULD be one of the following strings, which are valid units according to UDUNITS-2. - * https://github.com/ome/ngff/blob/26039d997f16509f4ef7f4006ea641bef73733f7/rfc/5/versions/1/index.md?plain=1#L130 - * -- we could try to be better about distinguishing time/space units etc, - * formally expressing that relationship. - * For now, this whole type basically resolves to string, because other arbitrary values are also allowed - * but we'll certainly care about units more in future. + * @see https://github.com/ome/ngff/blob/26039d997f16509f4ef7f4006ea641bef73733f7/rfc/5/versions/1/index.md?plain=1#L130 + * + * Note: This schema is relaxed to allow arbitrary string values (e.g., generic "unit" placeholder) + * for backward compatibility, but prefers validated spatial and time units when available. */ const axisUnitSchema = z.union([spaceUnitSchema, timeUnitSchema, z.string()]); const axisSchema = z.union([ diff --git a/packages/core/src/transformations/transformations.ts b/packages/core/src/transformations/transformations.ts index ab1febe..62a48ee 100644 --- a/packages/core/src/transformations/transformations.ts +++ b/packages/core/src/transformations/transformations.ts @@ -1,12 +1,91 @@ import { Matrix4 } from '@math.gl/core'; -import type { CoordinateTransformation } from '../schemas'; +import type { CoordinateTransformation, Axis } from '../schemas'; /** * Coordinate system reference from NGFF transformations. + * @see https://github.com/ome/ngff/blob/main/rfc/5/versions/1/index.md + * + * Note: `axes` is optional for backward compatibility with older NGFF data, but should be provided + * for proper transformation handling when coordinate systems include non-spatial axes (e.g., "cyx"). + * Without axes, transformations fall back to direct mapping which assumes all values are spatial. + * ^^ Do we really need to be supporting this? ^^ */ export interface CoordinateSystemRef { name: string; - axes?: Array<{ name: string; type?: string; unit?: string }>; + axes?: Axis[]; +} + +/** + * Map spatial axis values to XYZ coordinates based on axis names. + * + * Axes should be validated to have proper `type` fields by the time they reach this function. + * We check `type === 'space'` to identify spatial axes and map them by name to x, y, z coordinates. + * + * Note: Future work could handle mixed units (e.g., different spatial units like 'micrometer' vs 'meter') + * by converting to a common unit for order-of-magnitude consistency in transformations. + * + * @param values - Full transformation value array (ordered according to input axes) + * @param axes - Array of axis definitions (should be validated Axis types) + * @param defaultValue - Default value to use when padding (1 for scale, 0 for translation) + * @returns Array of [x, y, z] values mapped based on axis names + */ +function mapSpatialValuesToXYZ( + values: number[], + axes?: Axis[], + defaultValue = 1 +): [number, number, number] { + if (!axes || axes.length === 0) { + // No axes specified - use direct mapping (backward compatibility) + console.warn("legacy data with no input axis specification - not really expecting to get here?") + const [x = defaultValue, y = defaultValue, z = defaultValue] = values; + return [x, y, z]; + } + + // Map values to Matrix4 dimensions based on axis name + // Matrix4 uses standard x, y, z ordering + let xValue = defaultValue; + let yValue = defaultValue; + let zValue = defaultValue; + + // Track spatial axes in order for fallback mapping + const spatialAxesInOrder: Array<{ name: string; value: number }> = []; + + for (let i = 0; i < axes.length && i < values.length; i++) { + const axis = axes[i]; + if (axis.type === 'space') { + const axisName = axis.name.toLowerCase(); + const value = values[i] ?? defaultValue; + + // Map by exact axis name match (most common case: "x", "y", "z") + if (axisName === 'x' && xValue === defaultValue) { + xValue = value; + } else if (axisName === 'y' && yValue === defaultValue) { + yValue = value; + } else if (axisName === 'z' && zValue === defaultValue) { + zValue = value; + } else { + // Store for fallback mapping if name doesn't match exactly + spatialAxesInOrder.push({ name: axisName, value }); + } + } + } + + // Fallback: if we have unmapped spatial axes, map them in order + // This handles cases where axis names don't match x/y/z exactly + // but preserves the spatial ordering (first → x, second → y, third → z) + let fallbackIndex = 0; + for (const { value } of spatialAxesInOrder) { + if (fallbackIndex === 0 && xValue === defaultValue) { + xValue = value; + } else if (fallbackIndex === 1 && yValue === defaultValue) { + yValue = value; + } else if (fallbackIndex === 2 && zValue === defaultValue) { + zValue = value; + } + fallbackIndex++; + } + + return [xValue, yValue, zValue]; } /** @@ -55,7 +134,12 @@ export class Translation extends BaseTransformation { get type() { return 'translation' as const; } toArray(): number[] { - const [tx, ty, tz = 0] = this.translation; + // Transformation values are ordered according to the input coordinate system axes. + // For example, if input axes are ["c", "y", "x"], then translation[0] corresponds to "c", + // translation[1] to "y", and translation[2] to "x". We map spatial values to Matrix4 + // dimensions based on axis names (x→x, y→y, z→z) to preserve correct orientation. + const [tx, ty, tz] = mapSpatialValuesToXYZ(this.translation, this.input?.axes, 0); + return [ 1, 0, 0, 0, 0, 1, 0, 0, @@ -76,7 +160,12 @@ export class Scale extends BaseTransformation { get type() { return 'scale' as const; } toArray(): number[] { - const [sx, sy, sz = 1] = this.scale; + // Transformation values are ordered according to the input coordinate system axes. + // For example, if input axes are ["c", "y", "x"], then scale[0] corresponds to "c", + // scale[1] to "y", and scale[2] to "x". We map spatial values to Matrix4 + // dimensions based on axis names (x→x, y→y, z→z) to preserve correct orientation. + const [sx, sy, sz] = mapSpatialValuesToXYZ(this.scale, this.input?.axes); + return [ sx, 0, 0, 0, 0, sy, 0, 0, @@ -99,6 +188,26 @@ export class Affine extends BaseTransformation { toArray(): number[] { const { affine } = this; + // Validate that affine matrix dimensions match expected spatial dimensions + // when axes are specified. We assume affine matrices already represent spatial dimensions only. + if (this.input?.axes) { + const expectedSpatialDims = this.input.axes.filter(axis => axis.type === 'space').length; + const actualDims = affine.length; + + // For 2D affine: 2x3 or 3x3 matrices + // For 3D affine: 3x4 or 4x4 matrices + // Warn if there's a mismatch + if (expectedSpatialDims === 2 && actualDims !== 2 && actualDims !== 3) { + console.warn( + `Affine matrix dimensions (${actualDims}x${affine[0]?.length}) don't match expected 2D spatial dimensions. Input axes indicate ${expectedSpatialDims} spatial dimensions. Assuming affine matrix represents spatial dimensions only.` + ); + } else if (expectedSpatialDims === 3 && actualDims !== 3 && actualDims !== 4) { + console.warn( + `Affine matrix dimensions (${actualDims}x${affine[0]?.length}) don't match expected 3D spatial dimensions. Input axes indicate ${expectedSpatialDims} spatial dimensions. Assuming affine matrix represents spatial dimensions only.` + ); + } + } + if (affine.length === 2 && affine[0].length === 3) { // 2x3 affine (2D) - common spatialdata format: [[a, b, tx], [c, d, ty]] const [[a, b, tx], [c, d, ty]] = affine; diff --git a/packages/vis/package.json b/packages/vis/package.json index d879488..36be4e0 100644 --- a/packages/vis/package.json +++ b/packages/vis/package.json @@ -37,6 +37,7 @@ "@uiw/react-json-view": "2.0.0-alpha.39", "@hms-dbmi/viv": "catalog:", "deck.gl": "catalog:", + "fast-deep-equal": "^3.1.3", "geotiff": "2.1.4-beta.0", "zustand": "^5.0.8" }, diff --git a/packages/vis/src/SpatialCanvas/SpatialViewer.tsx b/packages/vis/src/SpatialCanvas/SpatialViewer.tsx index 66925e5..eecf1c9 100644 --- a/packages/vis/src/SpatialCanvas/SpatialViewer.tsx +++ b/packages/vis/src/SpatialCanvas/SpatialViewer.tsx @@ -4,19 +4,19 @@ * This component handles the composition of Viv image layers with additional * deck.gl layers (shapes, points, etc.) following the pattern established in MDV. * - * The key insight from MDV is that Viv's VivViewer needs special handling: - * - Viv views have their own layer rendering via view.getLayers() - * - Extra layers need to be composed in the right order - * - Layer filtering is needed for multi-view support - * - * For now, this provides a simpler deck.gl-only implementation that can be - * extended to use Viv's view system when image layer support is added. + * Uses a unified Viv-compatible pattern: + * - Always uses Viv's DetailView (even without images) + * - If image layers present: uses VivSpatialViewer class component + * - Otherwise: uses simplified functional component with DetailView */ import { useCallback, useMemo, useId } from 'react'; -import { DeckGL, OrthographicView } from 'deck.gl'; +import { DeckGL } from 'deck.gl'; +import { DetailView } from '@hms-dbmi/viv'; import type { Layer, PickingInfo } from 'deck.gl'; import type { ViewState } from './types'; +import type { ImageLayerConfig } from './useLayerData'; +import VivSpatialViewer from './VivSpatialViewer'; export interface SpatialViewerProps { /** Viewport width */ @@ -29,8 +29,8 @@ export interface SpatialViewerProps { onViewStateChange: (vs: ViewState) => void; /** deck.gl layers to render (shapes, points, etc.) */ layers: Layer[]; - /** Optional: Viv layer configuration for images (to be implemented) */ - vivLayerProps?: unknown[]; + /** Optional: Viv layer props for image layers */ + vivLayerProps?: ImageLayerConfig[]; /** Optional: Callback on hover */ onHover?: (info: PickingInfo) => void; /** Optional: Callback on click */ @@ -38,14 +38,11 @@ export interface SpatialViewerProps { } /** - * SpatialViewer renders spatial data using deck.gl with an orthographic view. - * - * This is a simplified version that handles deck.gl layers directly. - * For full Viv image support, this will need to be extended to follow - * MDV's MDVivViewer pattern of composing Viv's view.getLayers() output - * with additional deck.gl layers. + * SpatialViewer renders spatial data using deck.gl with Viv-compatible rendering. * - * @see MDV/src/react/components/avivatorish/MDVivViewer.tsx for the full pattern + * Uses unified Viv pattern: + * - If image layers present: uses VivSpatialViewer (class component) + * - Otherwise: uses DetailView with deck.gl layers (functional component) */ export function SpatialViewer({ width, @@ -53,25 +50,69 @@ export function SpatialViewer({ viewState, onViewStateChange, layers, - vivLayerProps: _vivLayerProps, // Reserved for future Viv integration + vivLayerProps, onHover, onClick, }: SpatialViewerProps) { + const hasImageLayers = vivLayerProps && vivLayerProps.length > 0; + + // If we have image layers, use VivSpatialViewer + if (hasImageLayers) { + return ( + + ); + } + + // Otherwise, use simplified DetailView approach (for backward compatibility) + return ( + + ); +} + +/** + * Simplified viewer for non-image layers (backward compatibility) + */ +function SpatialViewerSimple({ + width, + height, + viewState, + onViewStateChange, + layers, + onHover, + onClick, +}: Omit) { const viewId = useId(); - // Create orthographic view for 2D spatial data - const view = useMemo(() => { - return new OrthographicView({ + // Use DetailView for consistency with Viv pattern + const detailView = useMemo(() => { + return new DetailView({ id: `spatial-${viewId}`, - flipY: false, // Spatial data typically has Y increasing upward - controller: true, + snapScaleBar: true, + width, + height, }); - }, [viewId]); + }, [viewId, width, height]); // Convert our ViewState to deck.gl's expected format const deckViewState = useMemo((): { target: [number, number, number]; zoom: number } => { if (!viewState) { - // Default view state centered at origin return { target: [0, 0, 0], zoom: 0, @@ -93,14 +134,8 @@ export function SpatialViewer({ }); }, [onViewStateChange]); - // Compose layers - // TODO: When Viv support is added, this will need to: - // 1. Get Viv layers via view.getLayers({ viewStates, props: vivLayerProps }) - // 2. Separate out scale bar layer - // 3. Compose: [vivImageLayers, ...extraLayers, scaleBarLayer] + // Filter out any null/undefined layers const composedLayers = useMemo(() => { - // For now, just use the provided layers directly - // Filter out any null/undefined layers return layers.filter(Boolean); }, [layers]); @@ -109,11 +144,13 @@ export function SpatialViewer({ return null; } + const deckGLView = detailView.getDeckGlView(); + return ( & { tile?: any }; + +const areViewStatesEqual = (viewState: VivViewState, otherViewState?: VivViewState): boolean => { + return ( + otherViewState === viewState || + (viewState?.zoom === otherViewState?.zoom && + // @ts-expect-error - CBA to discriminate between Orbit and Ortho viewStates + viewState?.rotationX === otherViewState?.rotationX && + // @ts-expect-error + viewState?.rotationOrbit === otherViewState?.rotationOrbit && + equal(viewState?.target, otherViewState?.target)) + ); +}; + +export interface VivSpatialViewerProps { + /** Viv layer props (loader + channel config) */ + vivLayerProps: ImageLayerConfig[]; + /** Extra deck.gl layers (shapes, points, etc.) */ + extraLayers?: Layer[]; + /** Viewport width */ + width: number; + /** Viewport height */ + height: number; + /** View state (pan/zoom) */ + viewState: ViewState | null; + /** Callback when view state changes */ + onViewStateChange: (vs: ViewState) => void; + /** Optional: Callback on hover */ + onHover?: (info: PickingInfo) => void; + /** Optional: Callback on click */ + onClick?: (info: PickingInfo) => void; + /** Optional: Additional deck.gl props */ + deckProps?: Partial; +} + +interface VivSpatialViewerState { + viewStates: Record; + // deckRef?: React.MutableRefObject; +} + +/** + * Pure function to compose layers: [vivImageLayers, ...extraLayers, scaleBarLayer] + * Note: extraLayers (shapes/points) render on top of images + * + * This matches MDVivViewer's pattern exactly: + * - When deckProps.layers exists: [otherLayers (images), ...deckProps.layers (shapes), scaleBar] + * - When deckProps.layers is undefined: [vivLayers (all), scaleBar] + */ +function composeLayers( + vivLayers: Layer[], + extraLayers: Layer[] = [], + deckPropsLayers?: Layer[] +): Layer[] { + // Separate scale bar from other Viv layers + const scaleBarLayer = vivLayers.find((layer) => layer instanceof ScaleBarLayer); + const otherVivLayers = vivLayers.filter((layer) => layer !== scaleBarLayer); + + // Follow MDV pattern: [otherLayers (images), ...deckProps.layers (shapes), scaleBar] + // In our case, extraLayers = shapes/points (equivalent to deckProps.layers in MDV) + // Always compose: [image layers, ...extraLayers, ...deckPropsLayers, scaleBar] + const layers: Layer[] = []; + + // Add image layers (without scale bar) first - these render at the bottom + if (otherVivLayers.length > 0) { + layers.push(...otherVivLayers); + } + + // Add extra layers (shapes/points) - these render on top of images + // This is equivalent to deckProps.layers in MDV + if (extraLayers.length > 0) { + layers.push(...extraLayers); + } + + // Add any additional deckProps layers + if (deckPropsLayers && deckPropsLayers.length > 0) { + layers.push(...deckPropsLayers); + } + + // Scale bar always on top + if (scaleBarLayer) { + layers.push(scaleBarLayer); + } + + return layers; +} + +/** + * Filter layers by viewport ID (for multi-view support within a canvas) + * MDV pattern: layers must include the Viv ID in their ID to be rendered + */ +function createLayerFilter(viewId: string) { + return ({ layer, viewport }: { layer: Layer; viewport: any }): boolean => { + const vivId = getVivId(viewport.id); + // All layers (Viv and extra) must include the Viv ID to be rendered + return layer.id.includes(vivId); + }; +} + +/** + * Convert SpatialCanvas ViewState to Viv ViewState format + */ +function toVivViewState(viewState: ViewState, viewId: string, width: number, height: number): VivViewState { + const [x, y, z = 0] = viewState.target; + return { + id: viewId, + target: [x, y, z], + zoom: viewState.zoom, + // @ts-expect-error - Viv ViewState may have additional properties + width, + height, + }; +} + +/** + * Convert Viv ViewState to SpatialCanvas ViewState format + */ +function fromVivViewState(vivViewState: VivViewState): ViewState { + const target = vivViewState.target as [number, number, number]; + return { + target: [target[0], target[1]], + //@ts-expect-error need to sort out how we represent types - probably 2d only temporarily while we do + zoom: vivViewState.zoom, + }; +} + +class VivSpatialViewer extends React.PureComponent { + private detailView: DetailView; + private viewId: string; + + constructor(props: VivSpatialViewerProps) { + super(props); + this.viewId = `spatial-detail-${Math.random().toString(36).substr(2, 9)}`; + + // Create DetailView + this.detailView = new DetailView({ + id: this.viewId, + snapScaleBar: true, + width: props.width, + height: props.height, + }); + + // Initialize view state + const initialViewState = props.viewState + ? toVivViewState(props.viewState, this.viewId, props.width, props.height) + : this.getDefaultViewState(); + + this.state = { + viewStates: { + [this.viewId]: initialViewState, + }, + // deckRef: React.createRef(), + }; + + this._onViewStateChange = this._onViewStateChange.bind(this); + this.layerFilter = this.layerFilter.bind(this); + } + + private getDefaultViewState(): VivViewState { + // If we have a loader, use Viv's default initial view state + if (this.props.vivLayerProps.length > 0 && this.props.vivLayerProps[0].loader) { + try { + const loader = this.props.vivLayerProps[0].loader as any; + const defaultState = getDefaultInitialViewState(loader, { + width: this.props.width, + height: this.props.height, + }); + return { + ...defaultState, + id: this.viewId, + } as VivViewState; + } catch (e) { + console.warn('Failed to get default view state from loader:', e); + } + } + + // Fallback to centered view + return { + id: this.viewId, + target: [0, 0, 0], + zoom: 0, + width: this.props.width, + height: this.props.height, + } as VivViewState; + } + + componentDidUpdate(prevProps: VivSpatialViewerProps) { + const { width, height, viewState } = this.props; + + // Update view dimensions if changed + if (width !== prevProps.width || height !== prevProps.height) { + this.detailView.width = width; + this.detailView.height = height; + } + + // Update view state if changed externally + if (viewState && !areViewStatesEqual( + toVivViewState(viewState, this.viewId, width, height), + this.state.viewStates[this.viewId] + )) { + this.setState((prevState) => ({ + viewStates: { + ...prevState.viewStates, + [this.viewId]: toVivViewState(viewState, this.viewId, width, height), + }, + })); + } + } + + layerFilter({ layer, viewport }: { layer: Layer; viewport: any }): boolean { + return layer.id.includes(getVivId(viewport.id)); + } + + _onViewStateChange({ viewId, viewState }: { viewId: string; viewState: VivViewState }): VivViewState { + const { onViewStateChange } = this.props; + + // Update internal state + this.setState((prevState) => ({ + viewStates: { + ...prevState.viewStates, + [viewId]: viewState, + }, + })); + + // Notify parent + if (onViewStateChange) { + onViewStateChange(fromVivViewState(viewState)); + } + + return viewState; + } + + _renderLayers(): Layer[] { + const { vivLayerProps, extraLayers, deckProps, onHover } = this.props; + const { viewStates } = this.state; + + // Viv typically handles one loader per view + // For now, use the first image layer (can be extended later for multiple images per view) + if (vivLayerProps.length === 0) { + //@ts-expect-error deckProps.layers LayersList type + return composeLayers([], extraLayers, deckProps?.layers); + } + + const firstLayerProps = vivLayerProps[0]; + + // Get Viv layers from view + const layerProps: any = { + loader: firstLayerProps.loader, + colors: firstLayerProps.colors, + contrastLimits: firstLayerProps.contrastLimits, + channelsVisible: firstLayerProps.channelsVisible, + selections: firstLayerProps.selections, + onHover, + }; + + // Apply modelMatrix transformation if provided + if (firstLayerProps.modelMatrix) { + layerProps.modelMatrix = firstLayerProps.modelMatrix; + } + + const vivLayersResult = this.detailView.getLayers({ + viewStates, + props: layerProps, + }); + + // getLayers returns an array of arrays (one per view) + // For a single view, take the first element (like MDVivViewer does at line 385) + const vivLayers = Array.isArray(vivLayersResult) && vivLayersResult.length > 0 + ? (Array.isArray(vivLayersResult[0]) ? vivLayersResult[0] : vivLayersResult) as Layer[] + : []; + + // Apply opacity and visibility to image layers if specified + if (firstLayerProps.opacity !== undefined || firstLayerProps.visible !== undefined) { + for (const layer of vivLayers) { + if (firstLayerProps.opacity !== undefined && layer.props.opacity !== firstLayerProps.opacity) { + layer.props = { ...layer.props, opacity: firstLayerProps.opacity }; + } + if (firstLayerProps.visible !== undefined && layer.props.visible !== firstLayerProps.visible) { + layer.props = { ...layer.props, visible: firstLayerProps.visible }; + } + } + } + + // Add Viv ID to extra layers (shapes/points) so they pass the layerFilter + // MDV pattern: layer IDs must include the Viv ID to be rendered + const vivId = getVivId(this.viewId); + const extraLayersWithVivId = (extraLayers || []).map((layer) => { + // If layer ID doesn't already include the Viv ID, add it + if (!layer.id.includes(vivId)) { + // Clone the layer with updated ID + const newLayer = layer.clone({ + id: `${layer.id}${vivId}`, + }); + return newLayer; + } + return layer; + }); + + // Compose with extra layers - following MDV pattern exactly + // MDV does: [otherLayers (images), ...deckProps.layers (shapes), scaleBar] + //@ts-expect-error deckProps.layers LayersList type + return composeLayers(vivLayers, extraLayersWithVivId, deckProps?.layers); + } + + render() { + const { width, height, onHover, onClick, deckProps } = this.props; + const { viewStates } = this.state; + + if (width <= 0 || height <= 0) { + return null; + } + + const layers = this._renderLayers(); + const deckGLView = this.detailView.getDeckGlView(); + + return ( + (isDragging ? 'grabbing' : 'crosshair')} + onHover={onHover} + onClick={onClick} + style={{ backgroundColor: '#111', ...deckProps?.style }} + /> + ); + } +} + +export { VivSpatialViewer }; +export default VivSpatialViewer; + diff --git a/packages/vis/src/SpatialCanvas/index.tsx b/packages/vis/src/SpatialCanvas/index.tsx index dfc8c72..4849a55 100644 --- a/packages/vis/src/SpatialCanvas/index.tsx +++ b/packages/vis/src/SpatialCanvas/index.tsx @@ -14,7 +14,6 @@ import { SpatialCanvasProvider, useSpatialCanvasStore, useSpatialCanvasActions, - useSpatialCanvasStoreApi, } from './context'; import { getAvailableElements, @@ -42,6 +41,8 @@ export { createSpatialCanvasStore } from './stores'; export type { SpatialCanvasStoreApi } from './stores'; export type * from './types'; export { useSpatialViewState, useViewStateUrl } from './hooks'; +export { VivSpatialViewer } from './VivSpatialViewer'; +export type { ImageLayerConfig } from './useLayerData'; // ============================================ // Styles @@ -348,8 +349,8 @@ function SpatialCanvasViewer({ viewState, onViewStateChange, }: SpatialCanvasViewerProps) { - // Load layer data and get deck.gl layers - const { getLayers, isLoading } = useLayerData( + // Load layer data and get deck.gl layers + Viv layer props + const { getLayers, getVivLayerProps, isLoading } = useLayerData( layers, layerOrder, availableElements, @@ -357,12 +358,15 @@ function SpatialCanvasViewer({ ); const deckLayers = getLayers(); + const vivLayerProps = getVivLayerProps(); // Handle view state change, converting null to default const handleViewStateChange = useCallback((vs: ViewState) => { onViewStateChange(vs); }, [onViewStateChange]); + const hasLayers = deckLayers.length > 0 || vivLayerProps.length > 0; + return (
0 ? vivLayerProps : undefined} /> {isLoading && (
)} - {deckLayers.length === 0 && !isLoading && ( + {!hasLayers && !isLoading && (
{ - // TODO: Use Viv's loadOmeZarr or similar based on element.url - // This requires dynamic import of Viv loader utilities - console.debug(`[ImageRenderer] Would create loader for ${element.url}`); - return null; + try { + // Use loadOmeZarr for OME-NGFF format (SpatialData standard) + const loader = await loadOmeZarr(element.url, { type: 'multiscales' }); + return loader.data; // Return just the data part (PixelSource) + } catch (error) { + console.error(`[ImageRenderer] Failed to create loader for ${element.url}:`, error); + throw error; + } } diff --git a/packages/vis/src/SpatialCanvas/renderers/pointsRenderer.ts b/packages/vis/src/SpatialCanvas/renderers/pointsRenderer.ts index 5661fe6..4516a43 100644 --- a/packages/vis/src/SpatialCanvas/renderers/pointsRenderer.ts +++ b/packages/vis/src/SpatialCanvas/renderers/pointsRenderer.ts @@ -19,6 +19,7 @@ export interface PointDataX { // not that we wouldn't also want to be able to have other data & accessors export interface PointData { shape: number[]; + // this should most definitely be TypedArray... data: number[][]; } @@ -39,6 +40,7 @@ export interface PointsLayerRenderConfig { color?: [number, number, number, number]; /** ndarray - if we want other data for properties like color/radius etc they will be handled differently */ pointData?: PointData; + use3d?: boolean; } /** @@ -57,6 +59,7 @@ export function renderPointsLayer(config: PointsLayerRenderConfig): Layer | null pointSize = 1, color = [255, 100, 100, 200], pointData, + use3d, } = config; if (!visible) return null; @@ -71,10 +74,11 @@ export function renderPointsLayer(config: PointsLayerRenderConfig): Layer | null id, data: d[0], //just for index really // todo: more robust ndarray handling, be more efficient with target - // probably more important will be having proper spatial data-structure (quad/oct-tree) + // see https://deck.gl/docs/developer-guide/performance#supply-attributes-directly + // spatial data-structure (quad/oct-tree) vs pushing raw attributes. // with ways of querying within view. // also allow accessors for other props - getPosition: (_d, {index, target}) => [d[0][index], d[1][index], d[2]?.[index]], + getPosition: (_d, {index, target}) => [d[0][index], d[1][index], use3d ? (d[2]?.[index] || 0) : 0], getRadius: pointSize, radiusUnits: 'pixels', getFillColor: color, diff --git a/packages/vis/src/SpatialCanvas/types.ts b/packages/vis/src/SpatialCanvas/types.ts index 32d2266..24055a5 100644 --- a/packages/vis/src/SpatialCanvas/types.ts +++ b/packages/vis/src/SpatialCanvas/types.ts @@ -9,11 +9,17 @@ import type { SpatialElement, AnyElement } from '@spatialdata/core'; // View State Types // ============================================ -export interface ViewState { - target: [number, number] | [number, number, number]; +export type ViewState2D = { + target: [number, number], zoom: number; - // Future: rotation, bearing, pitch for 3D } +export type ViewState3D = { + target: [number, number, number], + zoom: number, + // TODO pitch, bearing for 3d. + // do we really want this type to be different to OrbitViewState from deck? +} +export type ViewState = ViewState2D | ViewState3D; // ============================================ // Layer Configuration Types @@ -28,18 +34,31 @@ export interface BaseLayerConfig { visible: boolean; /** Layer opacity (0-1) */ opacity: number; + elementKey: string; +} + +export interface ChannelConfig { + /** Channel colors as RGB tuples */ + colors?: [number, number, number][]; + /** Contrast limits for each channel [min, max] */ + contrastLimits?: [number, number][]; + /** Visibility for each channel */ + channelsVisible?: boolean[]; + /** Selections for z, c, t dimensions */ + selections?: { z: number; c: number; t: number }[]; + //TODO: how do we pass channel-related extension props? } export interface ImageLayerConfig extends BaseLayerConfig { type: 'image'; - elementKey: string; - // Image-specific settings can go here (channels, contrast, etc.) + /** Optional: Advanced channel configuration (for full Viv controls) */ + channels?: ChannelConfig; } export interface ShapesLayerConfig extends BaseLayerConfig { type: 'shapes'; - elementKey: string; // Shapes-specific settings + // TODO: these should be accessors for getFillColor etc based on EntityID fillColor?: [number, number, number, number]; strokeColor?: [number, number, number, number]; strokeWidth?: number; @@ -47,16 +66,18 @@ export interface ShapesLayerConfig extends BaseLayerConfig { export interface PointsLayerConfig extends BaseLayerConfig { type: 'points'; - elementKey: string; // Points-specific settings + // TODO: these should be accessors for getColor etc based on e.g. transcript type + // should be able to filter etc. Some kind of LOD... pointSize?: number; color?: [number, number, number, number]; } export interface LabelsLayerConfig extends BaseLayerConfig { type: 'labels'; - elementKey: string; // Labels-specific settings (colormap, etc.) + // should also be able to associate with EntityID + // - so we'll need some kind of buffer lookup for color/filter/etc } export type LayerConfig = ImageLayerConfig | ShapesLayerConfig | PointsLayerConfig | LabelsLayerConfig; diff --git a/packages/vis/src/SpatialCanvas/useLayerData.ts b/packages/vis/src/SpatialCanvas/useLayerData.ts index 902679d..144bbc0 100644 --- a/packages/vis/src/SpatialCanvas/useLayerData.ts +++ b/packages/vis/src/SpatialCanvas/useLayerData.ts @@ -20,16 +20,48 @@ import { type PointsLayerRenderConfig, type PointData, } from './renderers/pointsRenderer'; +import { + createImageLoader, + extractChannelConfig, +} from './renderers/imageRenderer'; +import { + buildDefaultSelection, + getMultiSelectionStats, + guessRgb, + isInterleaved, +} from '../ImageView/avivatorish/utils'; +import { COLOR_PALLETE } from '../ImageView/avivatorish/constants'; + +interface ImageLoaderData { + loader: unknown; + colors?: [number, number, number][]; + contrastLimits?: [number, number][]; + channelsVisible?: boolean[]; + selections?: Array<{ z?: number; c?: number; t?: number }>; +} interface LoadedData { shapes: Map>>>; points: Map; - images: Map; // Viv loaders - to be implemented + images: Map; // Viv loaders with computed channel data +} + +export interface ImageLayerConfig { + loader: unknown; // Viv PixelSource + colors: [number, number, number][]; + contrastLimits: [number, number][]; + channelsVisible: boolean[]; + selections: Array<{ z?: number; c?: number; t?: number }>; + modelMatrix?: Matrix4; // Transformation matrix for coordinate system alignment + opacity?: number; // Layer opacity (0-1) + visible?: boolean; // Whether layer is visible } interface UseLayerDataResult { - /** Get deck.gl layers ready for rendering */ + /** Get deck.gl layers ready for rendering (shapes, points, etc.) */ getLayers: () => Layer[]; + /** Get Viv layer props for image layers */ + getVivLayerProps: () => ImageLayerConfig[]; /** Whether any layers are currently loading */ isLoading: boolean; /** Trigger a reload of data for a specific element */ @@ -92,8 +124,9 @@ export function useLayerData( toLoad.push({ layerId, element: elem }); } else if (config.type === 'points' && !loaded.points.has(elem.key)) { toLoad.push({ layerId, element: elem }); + } else if (config.type === 'image' && !loaded.images.has(elem.key)) { + toLoad.push({ layerId, element: elem }); } - // Images handled separately (Viv loader) } if (toLoad.length === 0) return; @@ -116,6 +149,85 @@ export function useLayerData( const e = element.element as PointsElement; const data = await e.loadPoints(); loadedDataRef.current.points.set(element.key, data); + } else if (element.type === 'image') { + const loader = await createImageLoader(element.element as ImageElement); + // Compute channel defaults from loader metadata + const imageElement = element.element as ImageElement; + const loaderToCheck = Array.isArray(loader) ? loader[0] : loader; + + const imageData: ImageLoaderData = { loader }; + + try { + if (loaderToCheck && typeof loaderToCheck === 'object' && 'labels' in loaderToCheck && 'shape' in loaderToCheck) { + const loaderObj = loaderToCheck as { labels: string[]; shape: number[] }; + + // Build selections + const selections = buildDefaultSelection({ + labels: loaderObj.labels, + shape: loaderObj.shape, + }); + + // Get metadata from image element + const metadata = imageElement.attrs.omero; + + if (metadata?.channels) { + const Channels = metadata.channels; + const isRgb = guessRgb({ Pixels: { Channels: Channels.map((c: any) => ({ Name: c.label })) } } as any); + + if (isRgb) { + if (isInterleaved(loaderObj.shape)) { + imageData.contrastLimits = [[0, 255]]; + imageData.colors = [[255, 0, 0]]; + } else { + imageData.contrastLimits = [[0, 255], [0, 255], [0, 255]]; + imageData.colors = [[255, 0, 0], [0, 255, 0], [0, 0, 255]]; + } + imageData.channelsVisible = imageData.colors.map(() => true); + } else { + // Compute stats for non-RGB images + const stats = await getMultiSelectionStats({ + loader: loader as any, + selections: selections as any, + use3d: false, + }); + imageData.contrastLimits = stats.contrastLimits; + // Use channel colors from metadata or palette + imageData.colors = stats.contrastLimits.length === 1 + ? [[255, 255, 255] as [number, number, number]] + : stats.contrastLimits.map((_, i): [number, number, number] => { + const channelColor = Channels[i]?.color; + if (Array.isArray(channelColor) && channelColor.length >= 3) { + return [channelColor[0], channelColor[1], channelColor[2]] as [number, number, number]; + } + return COLOR_PALLETE[i % COLOR_PALLETE.length] as [number, number, number]; + }); + imageData.channelsVisible = imageData.colors.map(() => true); + } + imageData.selections = selections; + } else { + // Fallback defaults + imageData.contrastLimits = [[0, 65535]]; + imageData.colors = [[255, 255, 255]]; + imageData.channelsVisible = [true]; + imageData.selections = [{}]; + } + } else { + // Fallback defaults + imageData.contrastLimits = [[0, 65535]]; + imageData.colors = [[255, 255, 255]]; + imageData.channelsVisible = [true]; + imageData.selections = [{}]; + } + } catch (error) { + console.warn(`Failed to compute channel defaults for ${element.key}:`, error); + // Fallback defaults + imageData.contrastLimits = [[0, 65535]]; + imageData.colors = [[255, 255, 255]]; + imageData.channelsVisible = [true]; + imageData.selections = [{}]; + } + + loadedDataRef.current.images.set(element.key, imageData); } } catch (error) { console.error(`Failed to load data for ${layerId}:`, error); @@ -187,14 +299,61 @@ export function useLayerData( if (layer) deckLayers.push(layer); } } - // Image layers need Viv integration - skip for now + // Image layers are handled separately via getVivLayerProps() } return deckLayers; }, [layers, layerOrder]); + const getVivLayerProps = useCallback((): ImageLayerConfig[] => { + const vivProps: ImageLayerConfig[] = []; + const loaded = loadedDataRef.current; + + for (const layerId of layerOrder) { + const config = layers[layerId]; + if (!config?.visible || config.type !== 'image') continue; + + const elem = elementMap.current.get(layerId); + if (!elem || elem.type !== 'image') continue; + + const imageData = loaded.images.get(elem.key); + if (!imageData) continue; // Skip if loader not ready yet + + // Extract channel config (user-provided overrides) + const channelConfig = extractChannelConfig(config); + + // Use user-provided config if available, otherwise use computed defaults + const colors: [number, number, number][] = channelConfig.colors.length > 0 && channelConfig.colors[0][0] !== 255 + ? channelConfig.colors + : (imageData.colors || [[255, 255, 255] as [number, number, number]]); + const contrastLimits: [number, number][] = channelConfig.contrastLimits.length > 0 && channelConfig.contrastLimits[0][1] !== 65535 + ? channelConfig.contrastLimits + : (imageData.contrastLimits || [[0, 65535] as [number, number]]); + const channelsVisible: boolean[] = channelConfig.channelsVisible.length > 0 + ? channelConfig.channelsVisible + : (imageData.channelsVisible || [true]); + const selections: Array<{ z?: number; c?: number; t?: number }> = channelConfig.selections.length > 0 + ? channelConfig.selections + : (imageData.selections || [{}]); + + vivProps.push({ + loader: imageData.loader, + colors, + contrastLimits, + channelsVisible, + selections, + modelMatrix: elem.transform, // Apply coordinate transformation + opacity: config.opacity, + visible: config.visible, + }); + } + + return vivProps; + }, [layers, layerOrder]); + return { getLayers, + getVivLayerProps, isLoading: loadingKeys.size > 0, reloadElement, }; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 8558466..a0150ad 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -263,6 +263,9 @@ importers: deck.gl: specifier: 'catalog:' version: 9.1.15(@arcgis/core@4.34.5)(@luma.gl/shadertools@9.1.10(@luma.gl/core@9.1.10))(@luma.gl/webgl@9.1.10(@luma.gl/core@9.1.10))(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + fast-deep-equal: + specifier: ^3.1.3 + version: 3.1.3 geotiff: specifier: 2.1.4-beta.0 version: 2.1.4-beta.0