文章目录
简述
Cornerstone3D是一个基于CornerstoneJS的开源JavaScript库,用于在Web浏览器中呈现和处理3D医学影像。它利用 WebGL 技术提供了高性能的3D渲染,并提供了丰富的功能包括交互式3D操作、先进的分割算法和可视化、虚拟现实等。
在旧版本的cornerstone.js中,可以利用cornerstone-nifti-image-loader来实现nifti文件显示,但是cornerstone更新后,cornerstone-nifti-image-loader目前并没有更新。
1.利用setStack显示nifti文件
这个方式是我在cornerstone-nifti-image-loader的问题区看到的https://github.com/cornerstonejs/cornerstone-nifti-image-loader/issues/48
import * as cornerstone from '@cornerstonejs/core';
import * as cornerstoneNIFTIImageLoader from '@cornerstonejs/nifti-image-loader';
const { ViewportType } = cornerstone.Enums;
function createElement( size = '400px') {
let element = document.createElement('div');
element.style.width = size;
element.style.height = size;
element.style.border = '1px solid red';
element.style.margin = '10px';
// Disable default context menu
element.oncontextmenu = (e) => e.preventDefault();
return element;
}
/**
* Hardcoded metadata provider for NIFTI images, as they don't exist in the old cornerstone module
* @param type The type of metadata to retrieve
* @param imageId The imageId of the image to retrieve metadata for. Must start with 'nifti:'
* @returns {Object} The metadata object
*/
function additionalMetaDataProvider(type, imageId) {
const colonIndex = imageId.indexOf(':');
const scheme = imageId.substring(0, colonIndex);
if (scheme !== 'nifti') return;
if (type === 'generalSeriesModule') {
return {
modality: 'Unknown',
};
}
}
/**
* Uses the NIFTI image loader to fetch metadata of a NIFTI, cache it in cornerstone,
* and return a list of imageIds for the frames.
*
* @returns {string[]} An array of imageIds for instances in the study.
*/
async function createImageIdsAndCacheMetaData(imageId) {
const colonIndex = imageId.indexOf(':');
const scheme = imageId.substring(0, colonIndex);
if (scheme !== 'nifti') {
console.warn('createImageIdsAndCacheMetaData: imageId must have scheme "nifti". imageId: ', imageId);
return;
}
// Load the image (it will be stored in the cache, and the metadata also)
const imageIdObject = cornerstoneNIFTIImageLoader.nifti.ImageId.fromURL(imageId);
const image = await cornerstone.imageLoader.loadAndCacheImage(imageIdObject.url);
// Get the number of frames from the metadata the image loader provides
const numberOfFrames = cornerstone.metaData.get('multiFrame', image.imageId).numberOfFrames;
const imageIds = Array.from(Array(numberOfFrames),
(_, i) => `nifti:${imageIdObject.filePath}#${imageIdObject.slice.dimension}-${i},t-0`)
console.log('imageIds', imageIds);
return imageIds;
}
async function run() {
// Create elements to render into
const content = document.getElementById('content');
content.style.display = 'flex';
content.style.flexDirection = 'row';
const element1 = createElement();
content.appendChild(element1);
const element2 = createElement();
content.appendChild(element2);
const element3 = createElement();
content.appendChild(element3);
// Initialize cornerstone and tools
await cornerstone.init();
// Create a rendering engine
const renderingEngineId = 'myRenderingEngine';
const renderingEngine = new cornerstone.RenderingEngine(renderingEngineId);
// Create the viewports (of type STACK)
const viewportInputArray = [
{
viewportId: 'X_VIEWPORT',
type: ViewportType.STACK,
element: element1,
},
{
viewportId: 'Y_VIEWPORT',
type: ViewportType.STACK,
element: element2,
},
{
viewportId: 'Z_VIEWPORT',
type: ViewportType.STACK,
element: element3,
}
];
renderingEngine.setViewports(viewportInputArray);
const viewportX = renderingEngine.getViewport('X_VIEWPORT');
const viewportY = renderingEngine.getViewport('Y_VIEWPORT');
const viewportZ = renderingEngine.getViewport('Z_VIEWPORT');
// Register the nifti image loader
cornerstoneNIFTIImageLoader.external.cornerstone = cornerstone;
// NOTE: This is a hack to get around the fact that the nifti image loader
// uses the old cornerstone module, and we need to provide it with the
// new cornerstone module (events = eventTarget).
cornerstoneNIFTIImageLoader.external.cornerstone.events = cornerstone.eventTarget;
// cornerstoneNIFTIImageLoader.nifti.streamingMode = true;
// Register an additional metadata provider for Nifti images (for the generalSeriesModule, not provided by the package)
cornerstone.metaData.addProvider(
(type, imageId) => additionalMetaDataProvider(type, imageId),
1000 // Priority of the NIFTI metadata provider is 10000, so this one is called after
);
// Example of a Nifti image, from the web.
// The number after # is the frame index
// const imageUrl = 'https://raw.githubusercontent.com/muschellij2/Neurohacking_data/master/BRAINIX/NIfTI/Output_3D_File.nii.gz#10'
// Will load a local image
const imageUrl = `xxx.nii.gz`;
// Load the image and assign it to the viewport, for each orientation
const imageIdsZ = await createImageIdsAndCacheMetaData(`nifti:${imageUrl}#z`);
viewportZ.setStack(imageIdsZ, Math.floor(imageIdsZ.length / 2));
viewportZ.render();
const imageIdsY = await createImageIdsAndCacheMetaData(`nifti:${imageUrl}#y`);
viewportY.setStack(imageIdsY, Math.floor(imageIdsY.length / 2));
viewportY.render();
const imageIdsX = await createImageIdsAndCacheMetaData(`nifti:${imageUrl}#x`);
viewportX.setStack(imageIdsX, Math.floor(imageIdsX.length / 2));
viewportX.render();
}
run();
2.自定义volumeLoad加载nifti
参考cornerstone3D的PET-CT的例子(这个代码大家可以自行去github上clone下来,由于官方代码比较长就不展示全部了)
官方效果https://www.cornerstonejs.org/live-examples/petct
我们需要在官方的例子上做一些修改
...
async function setUpDisplay() {
const ctUrl = 'xxx.nii.gz';
const ptUrl = 'xxx.nii.gz';
// Get Cornerstone imageIds and fetch metadata into RAM
const ctImageIds = await createImageIdsAndCacheMetaData(`nifti:${ctUrl}#z`);
const ptImageIds = await createImageIdsAndCacheMetaData(`nifti:${ptUrl}#z`);
// Define a volume in memory
const ctVolume = await volumeLoader.createAndCacheVolume(ctVolumeId, {
imageIds: ctImageIds,
});
// Define a volume in memory
const ptVolume = await volumeLoader.createAndCacheVolume(ptVolumeId, {
imageIds: ptImageIds,
});
// Create the viewports
const viewportInputArray = [
{
viewportId: viewportIds.CT.AXIAL,
type: ViewportType.ORTHOGRAPHIC,
element: element1_1,
defaultOptions: {
orientation: Enums.OrientationAxis.AXIAL,
},
},
{
viewportId: viewportIds.CT.SAGITTAL,
type: ViewportType.ORTHOGRAPHIC,
element: element1_2,
defaultOptions: {
orientation: Enums.OrientationAxis.SAGITTAL,
},
},
{
viewportId: viewportIds.CT.CORONAL,
type: ViewportType.ORTHOGRAPHIC,
element: element1_3,
defaultOptions: {
orientation: Enums.OrientationAxis.CORONAL,
},
},
{
viewportId: viewportIds.PT.AXIAL,
type: ViewportType.ORTHOGRAPHIC,
element: element2_1,
defaultOptions: {
orientation: Enums.OrientationAxis.AXIAL,
background: <Types.Point3>[1, 1, 1],
},
},
{
viewportId: viewportIds.PT.SAGITTAL,
type: ViewportType.ORTHOGRAPHIC,
element: element2_2,
defaultOptions: {
orientation: Enums.OrientationAxis.SAGITTAL,
background: <Types.Point3>[1, 1, 1],
},
},
{
viewportId: viewportIds.PT.CORONAL,
type: ViewportType.ORTHOGRAPHIC,
element: element2_3,
defaultOptions: {
orientation: Enums.OrientationAxis.CORONAL,
background: <Types.Point3>[1, 1, 1],
},
},
{
viewportId: viewportIds.FUSION.AXIAL,
type: ViewportType.ORTHOGRAPHIC,
element: element3_1,
defaultOptions: {
orientation: Enums.OrientationAxis.AXIAL,
},
},
{
viewportId: viewportIds.FUSION.SAGITTAL,
type: ViewportType.ORTHOGRAPHIC,
element: element3_2,
defaultOptions: {
orientation: Enums.OrientationAxis.SAGITTAL,
},
},
{
viewportId: viewportIds.FUSION.CORONAL,
type: ViewportType.ORTHOGRAPHIC,
element: element3_3,
defaultOptions: {
orientation: Enums.OrientationAxis.CORONAL,
},
},
{
viewportId: viewportIds.PETMIP.CORONAL,
type: ViewportType.ORTHOGRAPHIC,
element: element_mip,
defaultOptions: {
orientation: Enums.OrientationAxis.CORONAL,
background: <Types.Point3>[1, 1, 1],
},
},
];
renderingEngine.setViewports(viewportInputArray);
// Set the volumes to load
ptVolume.load();
ctVolume.load();
// Set volumes on the viewports
await setVolumesForViewports(
renderingEngine,
[
{
volumeId: ctVolumeId,
callback: setCtTransferFunctionForVolumeActor,
},
],
[viewportIds.CT.AXIAL, viewportIds.CT.SAGITTAL, viewportIds.CT.CORONAL]
);
await setVolumesForViewports(
renderingEngine,
[
{
volumeId: ptVolumeId,
callback: setPetTransferFunctionForVolumeActor,
},
],
[viewportIds.PT.AXIAL, viewportIds.PT.SAGITTAL, viewportIds.PT.CORONAL]
);
await setVolumesForViewports(
renderingEngine,
[
{
volumeId: ctVolumeId,
callback: setCtTransferFunctionForVolumeActor,
},
{
volumeId: ptVolumeId,
callback: setPetColorMapTransferFunctionForVolumeActor,
},
],
[
viewportIds.FUSION.AXIAL,
viewportIds.FUSION.SAGITTAL,
viewportIds.FUSION.CORONAL,
]
);
// Calculate size of fullBody pet mip
const ptVolumeDimensions = ptVolume.dimensions;
// Only make the MIP as large as it needs to be.
const slabThickness = Math.sqrt(
ptVolumeDimensions[0] * ptVolumeDimensions[0] +
ptVolumeDimensions[1] * ptVolumeDimensions[1] +
ptVolumeDimensions[2] * ptVolumeDimensions[2]
);
setVolumesForViewports(
renderingEngine,
[
{
volumeId: ptVolumeId,
callback: setPetTransferFunctionForVolumeActor,
blendMode: BlendModes.MAXIMUM_INTENSITY_BLEND,
slabThickness,
},
],
[viewportIds.PETMIP.CORONAL]
);
initializeCameraSync(renderingEngine);
// Render the viewports
renderingEngine.render();
}
...
async function run() {
// Init Cornerstone and related libraries
await initDemo();
// Instantiate a rendering engine
renderingEngine = new RenderingEngine(renderingEngineId);
// Display needs to be set up first so that we have viewport to reference for tools and synchronizers.
await setUpDisplay();
// Tools and synchronizers can be set up in any order.
setUpToolGroups();
setUpSynchronizers();
}
run();
2.1.修改initDemo.js
import * as cornerstone from "@cornerstonejs/core";
import * as cornerstoneTools from "@cornerstonejs/tools";
import * as myCustomProvider from './myCustomProvider';
import { volumeLoader } from "@cornerstonejs/core";
import {
cornerstoneStreamingDynamicImageVolumeLoader,
cornerstoneStreamingImageVolumeLoader
} from "@cornerstonejs/streaming-image-volume-loader";
import cornerstoneNiftiStreamingImageVolumeLoader from "./niftiImageVolumeLoader";
// 注册 nifti 图像加载器
import * as cornerstoneNIFTIImageLoader from '@cornerstonejs/nifti-image-loader';
cornerstoneNIFTIImageLoader.nifti.register(cornerstone)
cornerstoneNIFTIImageLoader.external.cornerstone = cornerstone;
// uses the old cornerstone module, and we need to provide it with the
// new cornerstone module (events = eventTarget).
cornerstoneNIFTIImageLoader.external.cornerstone.events = cornerstone.eventTarget;
cornerstoneNIFTIImageLoader.nifti.streamingMode = false;
// 全局属性structuredClone 深拷贝,node17以上才支持,(为了避免报错,只是使用JSON简单实现,可以改为更好的写法)
if(!global.structuredClone){
global.structuredClone = function (object) {
const stringifyObj = JSON.stringify(object);
return JSON.parse(stringifyObj);
}
}
export default async function initDemo() {
// 自定义
cornerstone.metaData.addProvider(
myCustomProvider.get.bind(myCustomProvider),
100
);
volumeLoader.registerVolumeLoader(
'nifti',// 自定义
cornerstoneNiftiStreamingImageVolumeLoader
);
// initVolumeLoader
volumeLoader.registerUnknownVolumeLoader(
cornerstoneStreamingImageVolumeLoader
);
volumeLoader.registerVolumeLoader(
'cornerstoneStreamingImageVolume',
cornerstoneStreamingImageVolumeLoader
);
volumeLoader.registerVolumeLoader(
'cornerstoneStreamingDynamicImageVolume',
cornerstoneStreamingDynamicImageVolumeLoader
);
await cornerstone.init();
await cornerstoneTools.init();
}
2.2.自定义myCustomProvider.js
这个文件参考的是@cornerstonejs/nifti-image-loader/src/nifti/metaData/metaDataProvider.js。感兴趣的可以直接看源码
import * as cornerstoneNIFTIImageLoader from '@cornerstonejs/nifti-image-loader';
import { decimalToFraction } from "@cornerstonejs/nifti-image-loader/src/nifti/metaData/decimalToFraction";
const dependencies = {
decimalToFraction,
niftiReader: cornerstoneNIFTIImageLoader.external.niftiReader
};
const scalingPerImageId = {};
function add(imageId, scalingMetaData) {
scalingPerImageId[imageId] = scalingMetaData
}
function get(type, imageId) {
if(!imageId) return
const colonIndex = imageId.indexOf(':');
const scheme = imageId.substring(0, colonIndex);
if (scheme !== 'nifti') return;
const metaData = scalingPerImageId[imageId];
const frameOfReferenceUID ='1.2.840';
switch (type) {
case 'generalSeriesModule':
return {
modality:'unknown'
};
// 功能性
case 'functional': {
return {
frameOfReferenceUID,
timeSlices: metaData.timeSlices
};
}
case 'imagePlane':
case 'imagePlaneModule': {
return {
frameOfReferenceUID,
columns: metaData.columns,
rows: metaData.rows,
imageOrientationPatient: metaData.imageOrientationPatient,
columnCosines: metaData.columnCosines,
rowCosines: metaData.rowCosines,
imagePositionPatient: metaData.imagePositionPatient,
sliceThickness: metaData.slicePixelSpacing,
columnPixelSpacing: metaData.columnPixelSpacing,
rowPixelSpacing: metaData.rowPixelSpacing,
pixelSpacing:metaData.pixelSpacing
};
}
case 'imagePixel':
case 'imagePixelModule': {
return {
samplesPerPixel: getSamplesPerPixel(metaData),
photometricInterpretation: getPhotometricInterpretation(metaData, dependencies.niftiReader),
rows: metaData.rows,
columns: metaData.columns,
bitsAllocated: metaData.header.numBitsPerVoxel,
bitsStored: metaData.header.numBitsPerVoxel,
highBit: metaData.header.numBitsPerVoxel - 1,
pixelRepresentation: getPixelRepresentation(metaData, dependencies.niftiReader),
planarConfiguration: getPlanarConfiguration(metaData),
pixelAspectRatio: getPixelAspectRatio(metaData),
smallestPixelValue: metaData.minPixelValue,
largestPixelValue: metaData.maxPixelValue
};
}
case 'modalityLut':
case 'modalityLutModule':
return {
rescaleIntercept: metaData.intercept,
rescaleSlope: metaData.slope,
rescaleType: 'US',
modalityLutSequence: undefined
};
case 'voiLut':
case 'voiLutModule':
return {
windowCenter: metaData.windowCenter,
windowWidth: metaData.windowWidth,
voiLutSequence: undefined
};
case 'multiFrame':
case 'multiFrameModule':
return {
numberOfFrames: metaData.numberOfFrames,
frameIncrementPointer: undefined,
stereoPairsPresent: 'NO'
};
case 'scalingModule':
return {
suvbw: 1,
suvlbm: 1,
suvbsa: 1,
}
default:
return;
}
}
//
function getSamplesPerPixel (metaData) {
// the fifth dimension (metaData.header.dims[5]), if present, represents the
// samples per voxel
const hasFifthDimensionSpecified = metaData.header.dims[0] >= 5;
const hasSamplesPerVoxelSpecified = hasFifthDimensionSpecified && (metaData.header.dims[5] > 1);
return hasSamplesPerVoxelSpecified ? metaData.header.dims[5] : 1;
}
function getPhotometricInterpretation (metaData, niftiReader) {
const dataTypeCode = metaData.header.datatypeCode;
const samplesPerPixel = getSamplesPerPixel(metaData);
const isRGB = dataTypeCode === niftiReader.NIFTI1.TYPE_RGB24 && samplesPerPixel === 3;
const isRGBA = dataTypeCode === niftiReader.NIFTI1.TYPE_RGB24 && samplesPerPixel === 4;
// we assume 'RGB' if nifti file has RGB or RGBA types and samplesPerPixel matches
if (isRGB || isRGBA) {
return 'RGB';
}
// or 'MONOCHROME2' otherwise, as its the most typical photometric interpretation
return 'MONOCHROME2';
}
function getPixelRepresentation (metaData, niftiReader) {
const dataTypeCode = metaData.header.datatypeCode;
switch (dataTypeCode) {
case niftiReader.NIFTI1.TYPE_UINT8:
case niftiReader.NIFTI1.TYPE_UINT16:
case niftiReader.NIFTI1.TYPE_UINT32:
case niftiReader.NIFTI1.TYPE_UINT64:
// '0000H' means unsigned integer, by DICOM pixel representation value
return '0000H';
case niftiReader.NIFTI1.TYPE_INT8:
case niftiReader.NIFTI1.TYPE_INT16:
case niftiReader.NIFTI1.TYPE_INT32:
case niftiReader.NIFTI1.TYPE_INT64:
// '0001H' means signed integer, 2-complement
return '0001H';
case niftiReader.NIFTI1.TYPE_FLOAT32:
case niftiReader.NIFTI1.TYPE_FLOAT64:
case niftiReader.NIFTI1.TYPE_RGB:
case niftiReader.NIFTI1.TYPE_RGBA:
// as images using float or rgb(a) values are converted to Uint16, we
// return the pixel representation as unsigned integer
return '0000H';
}
}
function getPlanarConfiguration (metaData) {
// the planar configuration only applies if image has samplesPerPixel > 1
// it determines how the samples are organized
const samplesPerPixel = getSamplesPerPixel(metaData);
// value '0': RGB RGB RGB (image with 3 px)
// value '1': RRR GGG BBB
// in a nifti file, if it has samplesPerPixel > 1, the config is always '0'
return samplesPerPixel > 1 ? 0 : undefined;
}
function getPixelAspectRatio (metaData) {
const decimalToFraction = dependencies.decimalToFraction;
const horizontalSize = metaData.header.pixDims[1]; // TODO what if z is not the slice dim?
const verticalSize = metaData.header.pixDims[2]; // TODO what if z is not the slice dim?
const fraction = decimalToFraction(verticalSize / horizontalSize);
return `${fraction.numerator}/${fraction.denominator}`;
}
export { add, get };
2.3.自定义niftiImageVolumeLoader.js
这部分代码参照的是 @cornerstonejs/streaming-image-volume-loader的cornerstoneStreamingImageVolumeLoader
import { cache, utilities, Enums, getShouldUseSharedArrayBuffer, getConfiguration, utilities as csUtils, } from '@cornerstonejs/core';
import { vec3 } from 'gl-matrix';
import { StreamingImageVolume } from "@cornerstonejs/streaming-image-volume-loader";
import {
sortImageIdsAndGetSpacing
} from "@cornerstonejs/streaming-image-volume-loader/dist/cjs/helpers";
const { createUint8SharedArray, createFloat32SharedArray, createUint16SharedArray, createInt16SharedArray, } = utilities;
import { makeVolumeMetadata } from "@/components/3DDemo/makeVolumeMetadata";
function cornerstoneNiftiStreamingImageVolumeLoader(volumeId, options) {
if (!options || !options.imageIds || !options.imageIds.length) {
throw new Error('ImageIds must be provided to create a streaming image volume');
}
const { useNorm16Texture, preferSizeOverAccuracy } = getConfiguration().rendering;
const use16BitDataType = useNorm16Texture || preferSizeOverAccuracy;
async function getStreamingImageVolume() {
const { imageIds } = options;
const imageIdIndex = Math.floor(imageIds.length / 2);
const imageId = imageIds[imageIdIndex];
const volumeMetadata = makeVolumeMetadata(imageId);
const scalingParameters = csUtils.getScalingParameters(imageId);
const hasNegativeRescale = scalingParameters.rescaleIntercept < 0 ||
scalingParameters.rescaleSlope < 0;
const { BitsAllocated, PixelRepresentation, PhotometricInterpretation, ImageOrientationPatient, PixelSpacing, Columns, Rows, } = volumeMetadata;
const rowCosineVec = vec3.fromValues(ImageOrientationPatient[0], ImageOrientationPatient[1], ImageOrientationPatient[2]);
const colCosineVec = vec3.fromValues(ImageOrientationPatient[3], ImageOrientationPatient[4], ImageOrientationPatient[5]);
const scanAxisNormal = vec3.create();
vec3.cross(scanAxisNormal, rowCosineVec, colCosineVec);
let { zSpacing, origin, sortedImageIds } = sortImageIdsAndGetSpacing(imageIds, scanAxisNormal);
const numFrames = imageIds.length;
const spacing = [PixelSpacing[1], PixelSpacing[0], zSpacing];
const dimensions = [Columns, Rows, numFrames];
const direction = [
...rowCosineVec,
...colCosineVec,
...scanAxisNormal,
];
const signed = PixelRepresentation === 1;
const numComponents = PhotometricInterpretation === 'RGB' ? 3 : 1;
const useSharedArrayBuffer = getShouldUseSharedArrayBuffer();
const length = dimensions[0] * dimensions[1] * dimensions[2];
const handleCache = (sizeInBytes) => {
if (!cache.isCacheable(sizeInBytes)) {
throw new Error(Enums.Events.CACHE_SIZE_EXCEEDED);
}
cache.decacheIfNecessaryUntilBytesAvailable(sizeInBytes);
};
let scalarData, sizeInBytes;
switch (BitsAllocated) {
case 8:
if (signed) {
throw new Error('8 Bit signed images are not yet supported by this plugin.');
}
sizeInBytes = length;
handleCache(sizeInBytes);
scalarData = useSharedArrayBuffer
? createUint8SharedArray(length)
: new Uint8Array(length);
break;
case 16:
if (!use16BitDataType) {
sizeInBytes = length * 4;
scalarData = useSharedArrayBuffer
? createFloat32SharedArray(length)
: new Float32Array(length);
break;
}
sizeInBytes = length * 2;
if (signed || hasNegativeRescale) {
handleCache(sizeInBytes);
scalarData = useSharedArrayBuffer
? createInt16SharedArray(length)
: new Int16Array(length);
break;
}
if (!signed && !hasNegativeRescale) {
handleCache(sizeInBytes);
scalarData = useSharedArrayBuffer
? createUint16SharedArray(length)
: new Uint16Array(length);
break;
}
sizeInBytes = length * 4;
handleCache(sizeInBytes);
scalarData = useSharedArrayBuffer
? createFloat32SharedArray(length)
: new Float32Array(length);
break;
case 24:
sizeInBytes = length * numComponents;
handleCache(sizeInBytes);
scalarData = useSharedArrayBuffer
? createUint8SharedArray(length * numComponents)
: new Uint8Array(length * numComponents);
break;
case 32:
sizeInBytes = length * numComponents;
handleCache(sizeInBytes);
scalarData = useSharedArrayBuffer
? createFloat32SharedArray(length * numComponents)
: new Float32Array(length * numComponents);
break;
}
const streamingImageVolume = new StreamingImageVolume({
volumeId,
metadata: volumeMetadata,
dimensions,
spacing,
origin,
direction,
scalarData,
sizeInBytes,
}, {
imageIds: sortedImageIds,
loadStatus: {
loaded: false,
loading: false,
cancelled: false,
cachedFrames: [],
callbacks: [],
},
});
return streamingImageVolume;
}
const streamingImageVolumePromise = getStreamingImageVolume();
return {
promise: streamingImageVolumePromise,
decache: () => {
streamingImageVolumePromise.then((streamingImageVolume) => {
streamingImageVolume.destroy();
streamingImageVolume = null;
});
},
cancel: () => {
streamingImageVolumePromise.then((streamingImageVolume) => {
streamingImageVolume.cancelLoading();
});
},
};
}
export default cornerstoneNiftiStreamingImageVolumeLoader;
2.4.自定义createImageIdsAndCacheMetaData.js
import * as myCustomProvider from "./myCustomProvider";
import * as cornerstoneNIFTIImageLoader from '@cornerstonejs/nifti-image-loader';
import * as cornerstone from "@cornerstonejs/core";
/**
* Uses the NIFTI image loader to fetch metadata of a NIFTI, cache it in cornerstone,
* and return a list of imageIds for the frames.
* @returns {string[]} An array of imageIds for instances in the study.
*/
const createImageIdsAndCacheMetaData = async (imageId) => {
const colonIndex = imageId.indexOf(':');
const scheme = imageId.substring(0, colonIndex);
if (scheme !== 'nifti') {
console.warn('createImageIdsAndCacheMetaData: imageId must have scheme "nifti". imageId: ', imageId);
return;
}
const imageIdObject = cornerstoneNIFTIImageLoader.nifti.ImageId.fromURL(imageId);
const image = await cornerstone.imageLoader.loadAndCacheImage(imageIdObject.url)
const imageMata = await cornerstoneNIFTIImageLoader.nifti.loadHeader(imageIdObject.url)
myCustomProvider.add(imageId, {
...imageMata,
...imageIdObject,
...image
})
const numberOfSlices = cornerstone.metaData.get('multiFrame', image.imageId).numberOfFrames;
let imageIds = []
for (const i of [...Array(numberOfSlices).keys()]) {
let curImageId = `nifti:${imageIdObject.filePath}#${imageIdObject.slice.dimension}-${i},t-0`
const curImage = await cornerstone.imageLoader.loadAndCacheImage(curImageId)
const curHead = await cornerstoneNIFTIImageLoader.nifti.loadHeader(curImageId)
myCustomProvider.add(curImageId, {
...curImage,
...curHead,
})
imageIds.push(curImageId)
}
return imageIds;
}
export {
createImageIdsAndCacheMetaData
}
总结
通过以上方式只是简单实现了pet-ct融合,还存在许多bug,如果你有更好的实现方式,欢迎分享给我!
2023-10-31更新
在最近的cornerstone.js中更新了nifti的例子,有需要的朋友推荐参考官方例子哦!
地址:
https://www.cornerstonejs.org/live-examples/niftiwithtools
https://www.cornerstonejs.org/live-examples/niftibasic