From 397f022cc525960f3dfc9de78f8ab52d2b8091d8 Mon Sep 17 00:00:00 2001 From: Forrest Li Date: Tue, 5 Dec 2017 16:49:37 -0500 Subject: [PATCH 1/7] fix(XMLReader): Add vti appended data format support This adds initial capabilities to read data arrays out of a VTI appended data file format, as generated from VTK. --- Examples/Applications/VolumeViewer/index.js | 11 +-- Sources/IO/XML/XMLImageDataReader/index.js | 6 +- Sources/IO/XML/XMLReader/index.js | 79 +++++++++++++++++++-- 3 files changed, 84 insertions(+), 12 deletions(-) diff --git a/Examples/Applications/VolumeViewer/index.js b/Examples/Applications/VolumeViewer/index.js index 293a9e764b9..3546959a620 100644 --- a/Examples/Applications/VolumeViewer/index.js +++ b/Examples/Applications/VolumeViewer/index.js @@ -47,7 +47,7 @@ function preventDefaults(e) { // ---------------------------------------------------------------------------- -function createViewer(rootContainer, fileContentAsText, options) { +function createViewer(rootContainer, parsedFileContents, options) { const background = options.background ? options.background.split(',').map(s => Number(s)) : [0, 0, 0]; const containerStyle = options.containerStyle; const fullScreenRenderer = vtkFullScreenRenderWindow.newInstance({ background, rootContainer, containerStyle }); @@ -56,7 +56,7 @@ function createViewer(rootContainer, fileContentAsText, options) { renderWindow.getInteractor().setDesiredUpdateRate(15); const vtiReader = vtkXMLImageDataReader.newInstance(); - vtiReader.parse(fileContentAsText); + vtiReader.parse(parsedFileContents.text, parsedFileContents.binaryBuffer); const source = vtiReader.getOutputData(0); const mapper = vtkVolumeMapper.newInstance(); @@ -144,9 +144,12 @@ export function load(container, options) { if (options.ext === 'vti') { const reader = new FileReader(); reader.onload = function onLoad(e) { - createViewer(container, reader.result, options); + const prefixRegex = /^\s*\s*_/m; + const suffixRegex = /\n\s*<\/AppendedData>/m; + const result = extractBinary(reader.result, prefixRegex, suffixRegex); + createViewer(container, result, options); }; - reader.readAsText(options.file); + reader.readAsArrayBuffer(options.file); } else { console.error('Unkown file...'); } diff --git a/Sources/IO/XML/XMLImageDataReader/index.js b/Sources/IO/XML/XMLImageDataReader/index.js index 7d49f22fd73..4d9a42341fd 100644 --- a/Sources/IO/XML/XMLImageDataReader/index.js +++ b/Sources/IO/XML/XMLImageDataReader/index.js @@ -28,13 +28,15 @@ function vtkXMLImageDataReader(publicAPI, model) { imageData.getNumberOfPoints(), piece.getElementsByTagName('PointData')[0], imageData.getPointData(), - compressor, byteOrder, headerType); + compressor, byteOrder, headerType, + model.binaryBuffer); vtkXMLReader.processFieldData( imageData.getNumberOfCells(), piece.getElementsByTagName('CellData')[0], imageData.getCellData(), - compressor, byteOrder, headerType); + compressor, byteOrder, headerType, + model.binaryBuffer); // Add new output model.output[outputIndex++] = imageData; diff --git a/Sources/IO/XML/XMLReader/index.js b/Sources/IO/XML/XMLReader/index.js index 403b08e7a66..c2050ed9998 100644 --- a/Sources/IO/XML/XMLReader/index.js +++ b/Sources/IO/XML/XMLReader/index.js @@ -18,6 +18,48 @@ function stringToXML(xmlStr) { return (new DOMParser()).parseFromString(xmlStr, 'application/xml'); } +/** + * Extracts binary data out of a file bytearray given a prefix/suffix. + */ +function extractBinary(arrayBuffer, prefixRegex, suffixRegex = null) { + // convert array buffer to string via fromCharCode so length is preserved + const byteArray = new Uint8Array(arrayBuffer); + const strArr = []; + for (let i = 0; i < byteArray.length; ++i) { + strArr[i] = String.fromCharCode(byteArray[i]); + } + const str = strArr.join(''); + + const prefixMatch = prefixRegex.exec(str); + if (!prefixMatch) { + return { text: str }; + } + + const dataStartIndex = prefixMatch.index + prefixMatch[0].length; + const strFirstHalf = str.substring(0, dataStartIndex); + let retVal = null; + + const suffixMatch = suffixRegex ? suffixRegex.exec(str) : null; + if (suffixMatch) { + const strSecondHalf = str.substr(suffixMatch.index); + retVal = { + text: strFirstHalf + strSecondHalf, + binaryBuffer: arrayBuffer.slice(dataStartIndex, suffixMatch.index), + }; + } else { + // no suffix, so just take all the data starting from dataStartIndex + retVal = { + text: strFirstHalf, + binaryBuffer: arrayBuffer.slice(dataStartIndex), + }; + } + + // TODO Maybe delete the internal ref to strArr from the match objs? + retVal.prefixMatch = prefixMatch; + retVal.suffixMatch = suffixMatch; + return retVal; +} + // ---------------------------------------------------------------------------- const TYPED_ARRAY = { @@ -96,10 +138,10 @@ function uncompressBlock(compressedUint8, output) { // ---------------------------------------------------------------------------- -function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType) { +function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType, binaryBuffer) { const dataType = dataArrayElem.getAttribute('type'); const name = dataArrayElem.getAttribute('Name'); - const format = dataArrayElem.getAttribute('format'); // binary, ascii, [appended: not supported] + const format = dataArrayElem.getAttribute('format'); // binary, ascii, appended const numberOfComponents = Number(dataArrayElem.getAttribute('NumberOfComponents') || '1'); let values = null; @@ -151,6 +193,13 @@ function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType values = integer64to32(values); } } + } else if (format === 'appended') { + const offset = dataArrayElem.getAttribute('offset'); + // read header + const header = binaryBuffer.slice(offset, offset + TYPED_ARRAY_BYTES[headerType]); + const arraySize = (new TYPED_ARRAY[headerType](header))[0] / TYPED_ARRAY_BYTES[dataType]; + // read values + values = new TYPED_ARRAY[dataType](binaryBuffer, offset + header.byteLength, arraySize); } else { console.error('Format not supported', format); } @@ -191,7 +240,7 @@ function processCells(size, containerElem, compressor, byteOrder, headerType) { // ---------------------------------------------------------------------------- -function processFieldData(size, fieldElem, fieldContainer, compressor, byteOrder, headerType) { +function processFieldData(size, fieldElem, fieldContainer, compressor, byteOrder, headerType, binaryBuffer) { if (fieldElem) { const attributes = ['Scalars', 'Vectors', 'Normals', 'Tensors', 'TCoords']; const nameBinding = {}; @@ -206,7 +255,7 @@ function processFieldData(size, fieldElem, fieldContainer, compressor, byteOrder const nbArrays = arrays.length; for (let idx = 0; idx < nbArrays; idx++) { const array = arrays[idx]; - const dataArray = vtkDataArray.newInstance(processDataArray(size, array, compressor, byteOrder, headerType)); + const dataArray = vtkDataArray.newInstance(processDataArray(size, array, compressor, byteOrder, headerType, binaryBuffer)); const name = dataArray.getName(); (nameBinding[name] || fieldContainer.addArray)(dataArray); } @@ -255,7 +304,7 @@ function vtkXMLReader(publicAPI, model) { return promise; }; - publicAPI.parse = (content) => { + publicAPI.parse = (content, binaryBuffer) => { if (!content) { return; } @@ -266,6 +315,8 @@ function vtkXMLReader(publicAPI, model) { } model.parseData = content; + // TODO maybe name as "appendDataBuffer" + model.binaryBuffer = binaryBuffer; // Parse data here... const doc = stringToXML(content); @@ -290,11 +341,27 @@ function vtkXMLReader(publicAPI, model) { return; } + // appended format + if (rootElem.querySelector('AppendedData')) { + const appendedDataElem = rootElem.querySelector('AppendedData'); + const encoding = appendedDataElem.getAttribute('encoding'); + + if (encoding === 'base64') { + // substr(1) is to remove the '_' prefix + model.binaryBuffer = toByteArray(appendedDataElem.textContent.trim().substr(1)).buffer; + } + + if (!model.binaryBuffer) { + console.error('Processing appended data format: requires binaryBuffer to parse'); + return; + } + } + publicAPI.parseXML(rootElem, type, compressor, byteOrder, headerType); }; publicAPI.requestData = (inData, outData) => { - publicAPI.parse(model.parseData); + publicAPI.parse(model.parseData, model.binaryBuffer); }; } From ebaa5a7bc4fdf986208d140fa39d45623557a38a Mon Sep 17 00:00:00 2001 From: Forrest Li Date: Wed, 6 Dec 2017 12:45:06 -0500 Subject: [PATCH 2/7] fix(IO): Move extractBinary to IO/Core/BinaryHelper extractBinary is generic enough to be put in a general helper file. Also adds a new method named arrayBufferToString that was excised out of extractBinary. --- Examples/Applications/VolumeViewer/index.js | 4 +- Sources/IO/Core/BinaryHelper/index.js | 54 +++++++++++++++++++++ Sources/IO/Core/index.js | 2 + Sources/IO/XML/XMLReader/index.js | 42 ---------------- 4 files changed, 59 insertions(+), 43 deletions(-) create mode 100644 Sources/IO/Core/BinaryHelper/index.js diff --git a/Examples/Applications/VolumeViewer/index.js b/Examples/Applications/VolumeViewer/index.js index 3546959a620..bbcd642b56b 100644 --- a/Examples/Applications/VolumeViewer/index.js +++ b/Examples/Applications/VolumeViewer/index.js @@ -15,6 +15,8 @@ import vtkVolume from 'vtk.js/Sources/Rendering/Core/Volume'; import vtkVolumeMapper from 'vtk.js/Sources/Rendering/Core/VolumeMapper'; import vtkXMLImageDataReader from 'vtk.js/Sources/IO/XML/XMLImageDataReader'; +import BinaryHelper from 'vtk.js/Sources/IO/Core/BinaryHelper'; + import style from './VolumeViewer.mcss'; let autoInit = true; @@ -146,7 +148,7 @@ export function load(container, options) { reader.onload = function onLoad(e) { const prefixRegex = /^\s*\s*_/m; const suffixRegex = /\n\s*<\/AppendedData>/m; - const result = extractBinary(reader.result, prefixRegex, suffixRegex); + const result = BinaryHelper.extractBinary(reader.result, prefixRegex, suffixRegex); createViewer(container, result, options); }; reader.readAsArrayBuffer(options.file); diff --git a/Sources/IO/Core/BinaryHelper/index.js b/Sources/IO/Core/BinaryHelper/index.js new file mode 100644 index 00000000000..83fac42d675 --- /dev/null +++ b/Sources/IO/Core/BinaryHelper/index.js @@ -0,0 +1,54 @@ +/** + * Converts a binary buffer in an ArrayBuffer to a string. + * + * Note this does not take encoding into consideration, so don't + * expect proper Unicode or any other encoding. + */ +function arrayBufferToString(arrayBuffer) { + const byteArray = new Uint8Array(arrayBuffer); + const strArr = []; + for (let i = 0; i < byteArray.length; ++i) { + strArr[i] = String.fromCharCode(byteArray[i]); + } + return strArr.join(''); +} + +/** + * Extracts binary data out of a file ArrayBuffer given a prefix/suffix. + */ +function extractBinary(arrayBuffer, prefixRegex, suffixRegex = null) { + const str = arrayBufferToString(arrayBuffer); + + const prefixMatch = prefixRegex.exec(str); + if (!prefixMatch) { + return { text: str }; + } + + const dataStartIndex = prefixMatch.index + prefixMatch[0].length; + const strFirstHalf = str.substring(0, dataStartIndex); + let retVal = null; + + const suffixMatch = suffixRegex ? suffixRegex.exec(str) : null; + if (suffixMatch) { + const strSecondHalf = str.substr(suffixMatch.index); + retVal = { + text: strFirstHalf + strSecondHalf, + binaryBuffer: arrayBuffer.slice(dataStartIndex, suffixMatch.index), + }; + } else { + // no suffix, so just take all the data starting from dataStartIndex + retVal = { + text: strFirstHalf, + binaryBuffer: arrayBuffer.slice(dataStartIndex), + }; + } + + // TODO Maybe delete the internal ref to strArr from the match objs? + retVal.prefixMatch = prefixMatch; + retVal.suffixMatch = suffixMatch; + return retVal; +} + +export default { + arrayBufferToString, extractBinary, +}; diff --git a/Sources/IO/Core/index.js b/Sources/IO/Core/index.js index d9b3ba0570f..e7cb046370c 100644 --- a/Sources/IO/Core/index.js +++ b/Sources/IO/Core/index.js @@ -1,8 +1,10 @@ +import BinaryHelper from './BinaryHelper'; import DataAccessHelper from './DataAccessHelper'; import vtkHttpDataSetReader from './HttpDataSetReader'; import vtkHttpSceneLoader from './HttpSceneLoader'; export default { + BinaryHelper, DataAccessHelper, vtkHttpDataSetReader, vtkHttpSceneLoader, diff --git a/Sources/IO/XML/XMLReader/index.js b/Sources/IO/XML/XMLReader/index.js index c2050ed9998..19c5a74cfe4 100644 --- a/Sources/IO/XML/XMLReader/index.js +++ b/Sources/IO/XML/XMLReader/index.js @@ -18,48 +18,6 @@ function stringToXML(xmlStr) { return (new DOMParser()).parseFromString(xmlStr, 'application/xml'); } -/** - * Extracts binary data out of a file bytearray given a prefix/suffix. - */ -function extractBinary(arrayBuffer, prefixRegex, suffixRegex = null) { - // convert array buffer to string via fromCharCode so length is preserved - const byteArray = new Uint8Array(arrayBuffer); - const strArr = []; - for (let i = 0; i < byteArray.length; ++i) { - strArr[i] = String.fromCharCode(byteArray[i]); - } - const str = strArr.join(''); - - const prefixMatch = prefixRegex.exec(str); - if (!prefixMatch) { - return { text: str }; - } - - const dataStartIndex = prefixMatch.index + prefixMatch[0].length; - const strFirstHalf = str.substring(0, dataStartIndex); - let retVal = null; - - const suffixMatch = suffixRegex ? suffixRegex.exec(str) : null; - if (suffixMatch) { - const strSecondHalf = str.substr(suffixMatch.index); - retVal = { - text: strFirstHalf + strSecondHalf, - binaryBuffer: arrayBuffer.slice(dataStartIndex, suffixMatch.index), - }; - } else { - // no suffix, so just take all the data starting from dataStartIndex - retVal = { - text: strFirstHalf, - binaryBuffer: arrayBuffer.slice(dataStartIndex), - }; - } - - // TODO Maybe delete the internal ref to strArr from the match objs? - retVal.prefixMatch = prefixMatch; - retVal.suffixMatch = suffixMatch; - return retVal; -} - // ---------------------------------------------------------------------------- const TYPED_ARRAY = { From 76c307479b1181487dc7036b14b94e910658a24f Mon Sep 17 00:00:00 2001 From: Forrest Li Date: Wed, 6 Dec 2017 12:54:31 -0500 Subject: [PATCH 3/7] fix(XMLReader): Use TypedArray view rather than slice --- Sources/IO/XML/XMLReader/index.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Sources/IO/XML/XMLReader/index.js b/Sources/IO/XML/XMLReader/index.js index 19c5a74cfe4..b639fd707e8 100644 --- a/Sources/IO/XML/XMLReader/index.js +++ b/Sources/IO/XML/XMLReader/index.js @@ -154,10 +154,11 @@ function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType } else if (format === 'appended') { const offset = dataArrayElem.getAttribute('offset'); // read header - const header = binaryBuffer.slice(offset, offset + TYPED_ARRAY_BYTES[headerType]); - const arraySize = (new TYPED_ARRAY[headerType](header))[0] / TYPED_ARRAY_BYTES[dataType]; + const header = new TYPED_ARRAY[headerType](binaryBuffer, offset, 1); + const arraySize = header[0] / TYPED_ARRAY_BYTES[dataType]; + // read values - values = new TYPED_ARRAY[dataType](binaryBuffer, offset + header.byteLength, arraySize); + values = new TYPED_ARRAY[dataType](binaryBuffer, offset + TYPED_ARRAY_BYTES[headerType], arraySize); } else { console.error('Format not supported', format); } From 7198e09e2ae588e88ce104a77cf45fc6dbb2aa71 Mon Sep 17 00:00:00 2001 From: Forrest Li Date: Wed, 6 Dec 2017 13:08:59 -0500 Subject: [PATCH 4/7] fix(XMLReader): Basic handling of (U)Int64 arrays --- Sources/IO/XML/XMLReader/index.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Sources/IO/XML/XMLReader/index.js b/Sources/IO/XML/XMLReader/index.js index b639fd707e8..1a940398ca3 100644 --- a/Sources/IO/XML/XMLReader/index.js +++ b/Sources/IO/XML/XMLReader/index.js @@ -154,11 +154,17 @@ function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType } else if (format === 'appended') { const offset = dataArrayElem.getAttribute('offset'); // read header + // NOTE: this will incorrectly read the size if headerType is (U)Int64 and + // the value requires (U)Int64. const header = new TYPED_ARRAY[headerType](binaryBuffer, offset, 1); const arraySize = header[0] / TYPED_ARRAY_BYTES[dataType]; // read values values = new TYPED_ARRAY[dataType](binaryBuffer, offset + TYPED_ARRAY_BYTES[headerType], arraySize); + // remove higher order 32 bits assuming they're not used. + if (dataType.indexOf('Int64') !== -1) { + values = integer64to32(values); + } } else { console.error('Format not supported', format); } From ee79adc0577b59a3858d7d2d2c026fde4d95a0b8 Mon Sep 17 00:00:00 2001 From: Forrest Li Date: Wed, 6 Dec 2017 16:29:20 -0500 Subject: [PATCH 5/7] fix(XMLReader): Add appended vtp parsing support This adds core logic for parsing VTP files in appended format and base64 encoded. --- Examples/Applications/GeometryViewer/index.js | 15 ++++-- Examples/Applications/VolumeViewer/index.js | 2 +- Sources/IO/XML/XMLPolyDataReader/index.js | 18 ++++--- Sources/IO/XML/XMLReader/index.js | 52 ++++++++++++++++--- 4 files changed, 67 insertions(+), 20 deletions(-) diff --git a/Examples/Applications/GeometryViewer/index.js b/Examples/Applications/GeometryViewer/index.js index 4e35a69a740..9fd557465ac 100644 --- a/Examples/Applications/GeometryViewer/index.js +++ b/Examples/Applications/GeometryViewer/index.js @@ -14,6 +14,8 @@ import vtkURLExtract from 'vtk.js/Sources/Common/Core/URLExtract'; import vtkXMLPolyDataReader from 'vtk.js/Sources/IO/XML/XMLPolyDataReader'; import { ColorMode, ScalarMode } from 'vtk.js/Sources/Rendering/Core/Mapper/Constants'; +import BinaryHelper from 'vtk.js/Sources/IO/Core/BinaryHelper'; + import style from './GeometryViewer.mcss'; import icon from '../../../Documentation/content/icon/favicon-96x96.png'; @@ -111,7 +113,7 @@ function createViewer(container) { // ---------------------------------------------------------------------------- -function createPipeline(fileName, fileContentAsText) { +function createPipeline(fileName, parsedFileContents) { // Create UI const presetSelector = document.createElement('select'); presetSelector.setAttribute('class', selectorClass); @@ -156,7 +158,7 @@ function createPipeline(fileName, fileContentAsText) { // VTK pipeline const vtpReader = vtkXMLPolyDataReader.newInstance(); - vtpReader.parse(fileContentAsText); + vtpReader.parse(parsedFileContents.text, parsedFileContents.binaryBuffer); const lookupTable = vtkColorTransferFunction.newInstance(); const source = vtpReader.getOutputData(0); @@ -302,9 +304,12 @@ function createPipeline(fileName, fileContentAsText) { function loadFile(file) { const reader = new FileReader(); reader.onload = function onLoad(e) { - createPipeline(file.name, reader.result); + const prefixRegex = /^\s*\s*_/m; + const suffixRegex = /\n\s*<\/AppendedData>/m; + const result = BinaryHelper.extractBinary(reader.result, prefixRegex, suffixRegex); + createPipeline(file.name, result); }; - reader.readAsText(file); + reader.readAsArrayBuffer(file); } // ---------------------------------------------------------------------------- @@ -333,7 +338,7 @@ export function load(container, options) { HttpDataAccessHelper.fetchText({}, options.fileURL, { progressCallback }).then((txt) => { container.removeChild(progressContainer); createViewer(container); - createPipeline(defaultName, txt); + createPipeline(defaultName, { text: txt }); updateCamera(renderer.getActiveCamera()); }); } diff --git a/Examples/Applications/VolumeViewer/index.js b/Examples/Applications/VolumeViewer/index.js index bbcd642b56b..2f4d402d608 100644 --- a/Examples/Applications/VolumeViewer/index.js +++ b/Examples/Applications/VolumeViewer/index.js @@ -167,7 +167,7 @@ export function load(container, options) { HttpDataAccessHelper.fetchText({}, options.fileURL, { progressCallback }).then((txt) => { container.removeChild(progressContainer); - createViewer(container, txt, options); + createViewer(container, { text: txt }, options); }); } } diff --git a/Sources/IO/XML/XMLPolyDataReader/index.js b/Sources/IO/XML/XMLPolyDataReader/index.js index bf80f0620ea..3a3441c6fba 100644 --- a/Sources/IO/XML/XMLPolyDataReader/index.js +++ b/Sources/IO/XML/XMLPolyDataReader/index.js @@ -6,11 +6,11 @@ import vtkPolyData from 'vtk.js/Sources/Common/DataModel/PolyData'; // Global method // ---------------------------------------------------------------------------- -function handleArray(polydata, cellType, piece, compressor, byteOrder, headerType) { +function handleArray(polydata, cellType, piece, compressor, byteOrder, headerType, binaryBuffer) { const size = Number(piece.getAttribute(`NumberOf${cellType}`)); if (size > 0) { const dataArrayElem = piece.getElementsByTagName(cellType)[0].getElementsByTagName('DataArray')[0]; - const { values, numberOfComponents } = vtkXMLReader.processDataArray(size, dataArrayElem, compressor, byteOrder, headerType); + const { values, numberOfComponents } = vtkXMLReader.processDataArray(size, dataArrayElem, compressor, byteOrder, headerType, binaryBuffer); polydata[`get${cellType}`]().setData(values, numberOfComponents); } return size; @@ -18,10 +18,10 @@ function handleArray(polydata, cellType, piece, compressor, byteOrder, headerTyp // ---------------------------------------------------------------------------- -function handleCells(polydata, cellType, piece, compressor, byteOrder, headerType) { +function handleCells(polydata, cellType, piece, compressor, byteOrder, headerType, binaryBuffer) { const size = Number(piece.getAttribute(`NumberOf${cellType}`)); if (size > 0) { - const values = vtkXMLReader.processCells(size, piece.getElementsByTagName(cellType)[0], compressor, byteOrder, headerType); + const values = vtkXMLReader.processCells(size, piece.getElementsByTagName(cellType)[0], compressor, byteOrder, headerType, binaryBuffer); polydata[`get${cellType}`]().setData(values); } return size; @@ -46,17 +46,19 @@ function vtkXMLPolyDataReader(publicAPI, model) { const piece = pieces[outputIndex]; // Points - const nbPoints = handleArray(polydata, 'Points', piece, compressor, byteOrder, headerType); + const nbPoints = handleArray(polydata, 'Points', piece, compressor, byteOrder, headerType, model.binaryBuffer); // Cells let nbCells = 0; ['Verts', 'Lines', 'Strips', 'Polys'].forEach((cellType) => { - nbCells += handleCells(polydata, cellType, piece, compressor, byteOrder, headerType); + nbCells += handleCells(polydata, cellType, piece, compressor, byteOrder, headerType, model.binaryBuffer); }); // Fill data - vtkXMLReader.processFieldData(nbPoints, piece.getElementsByTagName('PointData')[0], polydata.getPointData(), compressor, byteOrder, headerType); - vtkXMLReader.processFieldData(nbCells, piece.getElementsByTagName('CellData')[0], polydata.getCellData(), compressor, byteOrder, headerType); + vtkXMLReader.processFieldData(nbPoints, piece.getElementsByTagName('PointData')[0], polydata.getPointData(), + compressor, byteOrder, headerType, model.binaryBuffer); + vtkXMLReader.processFieldData(nbCells, piece.getElementsByTagName('CellData')[0], polydata.getCellData(), + compressor, byteOrder, headerType, model.binaryBuffer); // Add new output model.output[outputIndex++] = polydata; diff --git a/Sources/IO/XML/XMLReader/index.js b/Sources/IO/XML/XMLReader/index.js index 1a940398ca3..34ce7798497 100644 --- a/Sources/IO/XML/XMLReader/index.js +++ b/Sources/IO/XML/XMLReader/index.js @@ -152,12 +152,18 @@ function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType } } } else if (format === 'appended') { - const offset = dataArrayElem.getAttribute('offset'); + const offset = Number(dataArrayElem.getAttribute('offset')); // read header // NOTE: this will incorrectly read the size if headerType is (U)Int64 and // the value requires (U)Int64. const header = new TYPED_ARRAY[headerType](binaryBuffer, offset, 1); - const arraySize = header[0] / TYPED_ARRAY_BYTES[dataType]; + let arraySize = header[0] / TYPED_ARRAY_BYTES[dataType]; + + // if we are dealing with Uint64, we need to get double the values since + // TYPED_ARRAY[Uint64] is Uint32. + if (dataType.indexOf('Int64') !== -1) { + arraySize *= 2; + } // read values values = new TYPED_ARRAY[dataType](binaryBuffer, offset + TYPED_ARRAY_BYTES[headerType], arraySize); @@ -174,7 +180,7 @@ function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType // ---------------------------------------------------------------------------- -function processCells(size, containerElem, compressor, byteOrder, headerType) { +function processCells(size, containerElem, compressor, byteOrder, headerType, binaryBuffer) { const arrayElems = {}; const dataArrayElems = containerElem.getElementsByTagName('DataArray'); for (let elIdx = 0; elIdx < dataArrayElems.length; elIdx++) { @@ -182,9 +188,9 @@ function processCells(size, containerElem, compressor, byteOrder, headerType) { arrayElems[el.getAttribute('Name')] = el; } - const offsets = processDataArray(size, arrayElems.offsets, compressor, byteOrder, headerType).values; + const offsets = processDataArray(size, arrayElems.offsets, compressor, byteOrder, headerType, binaryBuffer).values; const connectivitySize = offsets[offsets.length - 1]; - const connectivity = processDataArray(connectivitySize, arrayElems.connectivity, compressor, byteOrder, headerType).values; + const connectivity = processDataArray(connectivitySize, arrayElems.connectivity, compressor, byteOrder, headerType, binaryBuffer).values; const values = new Uint32Array(size + connectivitySize); let writeOffset = 0; let previousOffset = 0; @@ -313,7 +319,41 @@ function vtkXMLReader(publicAPI, model) { if (encoding === 'base64') { // substr(1) is to remove the '_' prefix - model.binaryBuffer = toByteArray(appendedDataElem.textContent.trim().substr(1)).buffer; + const appendedData = appendedDataElem.textContent.trim().substr(1); + const arrays = rootElem.querySelectorAll('DataArray'); + + // read binary chunks + const binChunks = []; + let bufferLength = 0; + for (let i = 0; i < arrays.length; ++i) { + const offset = arrays[i].getAttribute('offset'); + let nextOffset = 0; + if (i === arrays.length - 1) { + nextOffset = appendedData.length; + } else { + nextOffset = arrays[i + 1].getAttribute('offset'); + } + + const base64 = appendedData.substring(offset, nextOffset); + const data = toByteArray(base64); + binChunks.push(data); + + // Modify the DataArray offset to point to the offset in the binary array + // rather than the offset in the original AppendedData buffer. + arrays[i].setAttribute('offset', bufferLength); + + bufferLength += data.length; + } + + const buffer = new ArrayBuffer(bufferLength); + const view = new Uint8Array(buffer); + + for (let i = 0, offset = 0; i < binChunks.length; ++i) { + view.set(binChunks[i], offset); + offset += binChunks[i].length; + } + + model.binaryBuffer = buffer; } if (!model.binaryBuffer) { From 2a8f3b68f219459e30136070b1d2b2d8f082bde2 Mon Sep 17 00:00:00 2001 From: Forrest Li Date: Wed, 6 Dec 2017 17:50:12 -0500 Subject: [PATCH 6/7] fix(XMLReader): Support reading zlib compressed appended format --- Sources/IO/XML/XMLReader/index.js | 80 ++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 2 deletions(-) diff --git a/Sources/IO/XML/XMLReader/index.js b/Sources/IO/XML/XMLReader/index.js index 34ce7798497..475fba9ab99 100644 --- a/Sources/IO/XML/XMLReader/index.js +++ b/Sources/IO/XML/XMLReader/index.js @@ -326,12 +326,12 @@ function vtkXMLReader(publicAPI, model) { const binChunks = []; let bufferLength = 0; for (let i = 0; i < arrays.length; ++i) { - const offset = arrays[i].getAttribute('offset'); + const offset = Number(arrays[i].getAttribute('offset')); let nextOffset = 0; if (i === arrays.length - 1) { nextOffset = appendedData.length; } else { - nextOffset = arrays[i + 1].getAttribute('offset'); + nextOffset = Number(arrays[i + 1].getAttribute('offset')); } const base64 = appendedData.substring(offset, nextOffset); @@ -356,6 +356,82 @@ function vtkXMLReader(publicAPI, model) { model.binaryBuffer = buffer; } + if (compressor === 'vtkZLibDataCompressor') { + const arrays = rootElem.querySelectorAll('DataArray'); + + // read binary chunks + const binChunks = []; + let bufferLength = 0; + for (let i = 0; i < arrays.length; ++i) { + const offset = Number(arrays[i].getAttribute('offset')); + let nextOffset = 0; + if (i === arrays.length - 1) { + nextOffset = model.binaryBuffer.byteLength; + } else { + nextOffset = Number(arrays[i + 1].getAttribute('offset')); + } + + // need to slice here otherwise readerHeader breaks + const uint8 = new Uint8Array(model.binaryBuffer.slice(offset, nextOffset)); + + // Header reading + // Refer to processDataArray() above for info on header fields + const header = readerHeader(uint8, headerType); + const nbBlocks = header[1]; + let compressedOffset = + uint8.length - + (header.reduce((a, b) => a + b, 0) - (header[0] + header[1] + header[2] + header[3])) + + uint8.byteOffset; + + let buffer = null; + if (nbBlocks > 0) { + buffer = new ArrayBuffer((header[2] * (nbBlocks - 1)) + header[3]); + } else { + buffer = new ArrayBuffer(16); + } + + // uncompressed buffer + const uncompressed = new Uint8Array(buffer); + const output = { + offset: 0, + uint8: uncompressed, + }; + + // console.log('header', header, compressedOffset, buffer.byteLength); + + for (let j = 0; j < nbBlocks; j++) { + const blockSize = header[4 + j]; + const compressedBlock = new Uint8Array(uint8.buffer, compressedOffset, blockSize); + uncompressBlock(compressedBlock, output); + compressedOffset += blockSize; + } + + const data = new Uint8Array(uncompressed.length + 8); + // set length header + // NOTE: This does not work for lengths that are greater than the max Uint32 value. + (new TYPED_ARRAY[headerType](data.buffer))[0] = uncompressed.length; + data.set(uncompressed, 8); + + binChunks.push(data); + + // Modify the DataArray offset to point to the offset in the binary array + // rather than the offset in the original AppendedData buffer. + arrays[i].setAttribute('offset', bufferLength); + + bufferLength += data.length; + } + + const buffer = new ArrayBuffer(bufferLength); + const view = new Uint8Array(buffer); + + for (let i = 0, offset = 0; i < binChunks.length; ++i) { + view.set(binChunks[i], offset); + offset += binChunks[i].length; + } + + model.binaryBuffer = buffer; + } + if (!model.binaryBuffer) { console.error('Processing appended data format: requires binaryBuffer to parse'); return; From cb3ce3225cbb696a8a7312c25325f0d5b5723007 Mon Sep 17 00:00:00 2001 From: Forrest Li Date: Thu, 7 Dec 2017 13:09:55 -0500 Subject: [PATCH 7/7] fix(XMLReader): Fix reading appended and zlib formats This (mostly) correctly reads in VTP/VTI files in appended format, with optional base64 encoding and/or zlib compression. --- Sources/IO/XML/XMLReader/index.js | 135 +++++++++++++----------------- 1 file changed, 58 insertions(+), 77 deletions(-) diff --git a/Sources/IO/XML/XMLReader/index.js b/Sources/IO/XML/XMLReader/index.js index 475fba9ab99..eef00d63f0b 100644 --- a/Sources/IO/XML/XMLReader/index.js +++ b/Sources/IO/XML/XMLReader/index.js @@ -152,7 +152,7 @@ function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType } } } else if (format === 'appended') { - const offset = Number(dataArrayElem.getAttribute('offset')); + let offset = Number(dataArrayElem.getAttribute('offset')); // read header // NOTE: this will incorrectly read the size if headerType is (U)Int64 and // the value requires (U)Int64. @@ -165,8 +165,15 @@ function processDataArray(size, dataArrayElem, compressor, byteOrder, headerType arraySize *= 2; } + offset += TYPED_ARRAY_BYTES[headerType]; + // read values - values = new TYPED_ARRAY[dataType](binaryBuffer, offset + TYPED_ARRAY_BYTES[headerType], arraySize); + // if offset is aligned to dataType, use view. Otherwise, slice due to misalignment. + if (offset % TYPED_ARRAY_BYTES[dataType] === 0) { + values = new TYPED_ARRAY[dataType](binaryBuffer, offset, arraySize); + } else { + values = new TYPED_ARRAY[dataType](binaryBuffer.slice(offset, offset + header[0])); + } // remove higher order 32 bits assuming they're not used. if (dataType.indexOf('Int64') !== -1) { values = integer64to32(values); @@ -295,7 +302,8 @@ function vtkXMLReader(publicAPI, model) { const type = rootElem.getAttribute('type'); const compressor = rootElem.getAttribute('compressor'); const byteOrder = rootElem.getAttribute('byte_order'); - const headerType = rootElem.getAttribute('header_type'); + // default to UInt32. I think version 0.1 vtp/vti files default to UInt32. + const headerType = rootElem.getAttribute('header_type') || 'UInt32'; if (compressor && compressor !== 'vtkZLibDataCompressor') { console.error('Invalid compressor', compressor); @@ -316,78 +324,54 @@ function vtkXMLReader(publicAPI, model) { if (rootElem.querySelector('AppendedData')) { const appendedDataElem = rootElem.querySelector('AppendedData'); const encoding = appendedDataElem.getAttribute('encoding'); + // Only get data arrays that are descendants of + // We don't parse DataArrays from FieldData right now. + const arrayElems = rootElem.querySelectorAll('Piece DataArray'); + + let appendedBuffer = model.binaryBuffer; if (encoding === 'base64') { // substr(1) is to remove the '_' prefix - const appendedData = appendedDataElem.textContent.trim().substr(1); - const arrays = rootElem.querySelectorAll('DataArray'); - - // read binary chunks - const binChunks = []; - let bufferLength = 0; - for (let i = 0; i < arrays.length; ++i) { - const offset = Number(arrays[i].getAttribute('offset')); - let nextOffset = 0; - if (i === arrays.length - 1) { - nextOffset = appendedData.length; - } else { - nextOffset = Number(arrays[i + 1].getAttribute('offset')); - } - - const base64 = appendedData.substring(offset, nextOffset); - const data = toByteArray(base64); - binChunks.push(data); - - // Modify the DataArray offset to point to the offset in the binary array - // rather than the offset in the original AppendedData buffer. - arrays[i].setAttribute('offset', bufferLength); + appendedBuffer = appendedDataElem.textContent.trim().substr(1); + } - bufferLength += data.length; + // get data array chunks + const dataArrays = []; + for (let i = 0; i < arrayElems.length; ++i) { + const offset = Number(arrayElems[i].getAttribute('offset')); + let nextOffset = 0; + if (i === arrayElems.length - 1) { + nextOffset = appendedBuffer.length; + } else { + nextOffset = Number(arrayElems[i + 1].getAttribute('offset')); } - const buffer = new ArrayBuffer(bufferLength); - const view = new Uint8Array(buffer); - - for (let i = 0, offset = 0; i < binChunks.length; ++i) { - view.set(binChunks[i], offset); - offset += binChunks[i].length; + if (encoding === 'base64') { + dataArrays.push(toByteArray(appendedBuffer.substring(offset, nextOffset))); + } else { // encoding === 'raw' + // Need to slice the ArrayBuffer so readerHeader() works properly + dataArrays.push(new Uint8Array(appendedBuffer.slice(offset, nextOffset))); } - - model.binaryBuffer = buffer; } if (compressor === 'vtkZLibDataCompressor') { - const arrays = rootElem.querySelectorAll('DataArray'); - - // read binary chunks - const binChunks = []; - let bufferLength = 0; - for (let i = 0; i < arrays.length; ++i) { - const offset = Number(arrays[i].getAttribute('offset')); - let nextOffset = 0; - if (i === arrays.length - 1) { - nextOffset = model.binaryBuffer.byteLength; - } else { - nextOffset = Number(arrays[i + 1].getAttribute('offset')); - } - - // need to slice here otherwise readerHeader breaks - const uint8 = new Uint8Array(model.binaryBuffer.slice(offset, nextOffset)); + for (let arrayidx = 0; arrayidx < dataArrays.length; ++arrayidx) { + const dataArray = dataArrays[arrayidx]; // Header reading // Refer to processDataArray() above for info on header fields - const header = readerHeader(uint8, headerType); + const header = readerHeader(dataArray, headerType); const nbBlocks = header[1]; let compressedOffset = - uint8.length - - (header.reduce((a, b) => a + b, 0) - (header[0] + header[1] + header[2] + header[3])) + - uint8.byteOffset; + dataArray.length - + (header.reduce((a, b) => a + b, 0) - (header[0] + header[1] + header[2] + header[3])); let buffer = null; if (nbBlocks > 0) { buffer = new ArrayBuffer((header[2] * (nbBlocks - 1)) + header[3]); } else { - buffer = new ArrayBuffer(16); + // if there is no blocks, then default to a zero array of size 0. + buffer = new ArrayBuffer(0); } // uncompressed buffer @@ -397,41 +381,38 @@ function vtkXMLReader(publicAPI, model) { uint8: uncompressed, }; - // console.log('header', header, compressedOffset, buffer.byteLength); - for (let j = 0; j < nbBlocks; j++) { - const blockSize = header[4 + j]; - const compressedBlock = new Uint8Array(uint8.buffer, compressedOffset, blockSize); + for (let i = 0; i < nbBlocks; i++) { + const blockSize = header[4 + i]; + const compressedBlock = new Uint8Array(dataArray.buffer, compressedOffset, blockSize); uncompressBlock(compressedBlock, output); compressedOffset += blockSize; } - const data = new Uint8Array(uncompressed.length + 8); + const data = new Uint8Array(uncompressed.length + TYPED_ARRAY_BYTES[headerType]); // set length header - // NOTE: This does not work for lengths that are greater than the max Uint32 value. + // TODO this does not work for lengths that are greater than the max Uint32 value. (new TYPED_ARRAY[headerType](data.buffer))[0] = uncompressed.length; - data.set(uncompressed, 8); - - binChunks.push(data); + data.set(uncompressed, TYPED_ARRAY_BYTES[headerType]); - // Modify the DataArray offset to point to the offset in the binary array - // rather than the offset in the original AppendedData buffer. - arrays[i].setAttribute('offset', bufferLength); - - bufferLength += data.length; + dataArrays[arrayidx] = data; } + } - const buffer = new ArrayBuffer(bufferLength); - const view = new Uint8Array(buffer); + const bufferLength = dataArrays.reduce((acc, arr) => acc + arr.length, 0); + const buffer = new ArrayBuffer(bufferLength); + const view = new Uint8Array(buffer); - for (let i = 0, offset = 0; i < binChunks.length; ++i) { - view.set(binChunks[i], offset); - offset += binChunks[i].length; - } - - model.binaryBuffer = buffer; + for (let i = 0, offset = 0; i < dataArrays.length; ++i) { + // set correct offsets + arrayElems[i].setAttribute('offset', offset); + // set final buffer data + view.set(dataArrays[i], offset); + offset += dataArrays[i].length; } + model.binaryBuffer = buffer; + if (!model.binaryBuffer) { console.error('Processing appended data format: requires binaryBuffer to parse'); return;