diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ff7fe4ac8..f08b668dd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -3,23 +3,23 @@ name: Build on: [push, pull_request] jobs: - client: + build-and-deploy: + permissions: + contents: 'read' + id-token: 'write' + deployments: 'write' strategy: - matrix: - node-version: - - '16.x' - os: - - 'ubuntu-latest' - - 'windows-latest' - - 'macos-latest' - runs-on: ${{ matrix.os }} + matrix: + node-version: + - '16.x' + runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Use Node.js ${{ matrix.node-version }} uses: actions/setup-node@v1 with: node-version: ${{ matrix.node-version }} - - uses: actions/cache@v2 + - uses: actions/cache@v3 with: path: "**/node_modules" key: ${{ runner.os }}-${{ matrix.node-version }}-node_modules-${{ hashFiles('**/package-lock.json') }} @@ -27,226 +27,43 @@ jobs: - name: Typecheck with TypeScript run: npm run typecheck - name: Build client bundles - run: npm run build -- --no-typecheck - - name: Build JavaScript module - run: npm run build-module -- --no-typecheck - - name: Build Python client bundles - run: npm run build-python -- --no-typecheck - - uses: ./.github/actions/setup-firefox - - name: Run JavaScript tests (including WebGL) - # Swiftshader, used by Chrome headless, crashes when running Neuroglancer - # tests. - # - # The only reliable headless configuration is Firefox on Linux under - # xvfb-run, which uses Mesa software rendering. - if: startsWith(runner.os, 'Linux') - run: xvfb-run --auto-servernum --server-args='-screen 0 1024x768x24' npm run test -- --browsers Firefox - - name: Run JavaScript tests (excluding WebGL) - if: ${{ !startsWith(runner.os, 'Linux') }} - run: npm run test -- --browsers ChromeHeadless --define=NEUROGLANCER_SKIP_WEBGL_TESTS - - name: Run JavaScript benchmarks - run: npm run benchmark - - # Builds Python package and runs Python tests - # - # On ubuntu-latest, this also runs browser-based tests. On Mac OS and - # Windows, this only runs tests that do not require a browser, since a working - # headless WebGL2 implementation is not available on Github actions. - python-tox: - strategy: - matrix: - python-version: - - '3.8' - - '3.9' - - '3.10' - - '3.11' - node-version: - - '16.x' - os: - - 'ubuntu-latest' - - 'windows-latest' - - 'macos-latest' - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v2 - with: - # Need full history to determine version number. - fetch-depth: 0 - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - uses: actions/cache@v2 - with: - path: "**/node_modules" - key: ${{ runner.os }}-${{ matrix.node-version }}-node_modules-${{ hashFiles('**/package-lock.json') }} - - name: Get pip cache dir - id: pip-cache - run: | - echo "::set-output name=dir::$(pip cache dir)" - - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.py') }} - # Uncomment the action below for an interactive shell - # - name: Setup tmate session - # uses: mxschmitt/action-tmate@v3 - - name: Install Python packaging/test tools - run: python -m pip install --upgrade pip tox wheel numpy pytest - - uses: ./.github/actions/setup-firefox - - name: Test with tox - run: tox -e ${{ fromJSON('["skip-browser-tests","firefox-xvfb"]')[runner.os == 'Linux'] }} - env: - TOX_TESTENV_PASSENV: GH_TOKEN - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # Verify that editable install works - - name: Install in editable form - run: pip install -e . --config-settings editable_mode=strict - - name: Run Python tests against editable install (excluding WebGL) - working-directory: python/tests - run: pytest -vv --skip-browser-tests - - python-build-package: - strategy: - matrix: - include: - - os: 'ubuntu-latest' - cibw_build: '*' - wheel_identifier: 'linux' - node-version: '16.x' - - os: 'windows-latest' - cibw_build: '*' - wheel_identifier: 'windows' - node-version: '16.x' - - os: 'macos-latest' - cibw_build: '*_x86_64' - wheel_identifier: 'macos_x86_64' - node-version: '16.x' - - os: 'macos-latest' - cibw_build: '*_arm64' - wheel_identifier: 'macos_arm64' - node-version: '16.x' - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v2 - with: - # Need full history to determine version number. - fetch-depth: 0 - - name: Use Node.js - uses: actions/setup-node@v1 - with: - node-version: ${{ matrix.node-version }} - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.x - - uses: actions/cache@v2 - with: - path: "**/node_modules" - key: ${{ runner.os }}-${{ matrix.node-version }}-node_modules-${{ hashFiles('**/package-lock.json') }} - - name: Get pip cache dir - id: pip-cache - run: | - echo "::set-output name=dir::$(pip cache dir)" - - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-buildwheel-${{ hashFiles('setup.py') }} - - run: npm install - - run: | - build_info="{'tag':'$(git describe --always --tags)', 'url':'https://github.com/google/neuroglancer/commit/$(git rev-parse HEAD)', 'timestamp':'$(date)'}" - npm run build-python -- --no-typecheck --define NEUROGLANCER_BUILD_INFO="${build_info}" + run: node ./config/esbuild-cli.js --config=min --no-typecheck --define STATE_SERVERS=$(cat config/state_servers.json | tr -d " \t\n\r") + - run: cp -r ./dist/min appengine/frontend/static/ + - name: Get branch name (merge) + if: github.event_name != 'pull_request' shell: bash - - name: Check for dirty working directory - run: git diff --exit-code - - name: Build Python source distribution (sdist) - run: python setup.py sdist --format gztar - if: ${{ runner.os == 'Linux' }} - - name: Install cibuildwheel - run: pip install cibuildwheel - - name: Build Python wheels - run: bash -xve ./python/build_tools/cibuildwheel.sh - env: - # On Linux, share pip cache with manylinux docker containers - CIBW_ENVIRONMENT_LINUX: PIP_CACHE_DIR=/host${{ steps.pip-cache.outputs.dir }} - CIBW_BEFORE_ALL_LINUX: /project/python/build_tools/cibuildwheel_linux_cache_setup.sh /host${{ steps.pip-cache.outputs.dir }} - CIBW_BUILD: ${{ matrix.cibw_build }} - - name: Upload wheels as artifacts - uses: actions/upload-artifact@v2 - with: - name: python-wheels-${{ matrix.wheel_identifier }} - path: | - dist/*.whl - dist/*.tar.gz - - python-publish-package: - # Only publish package on push to tag or default branch. - if: ${{ github.event_name == 'push' && (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/master') }} - runs-on: ubuntu-latest - needs: - - 'python-build-package' - steps: - - uses: actions/download-artifact@v2 - with: - name: python-wheels-linux - path: dist - - uses: actions/download-artifact@v2 - with: - name: python-wheels-macos_x86_64 - path: dist - - uses: actions/download-artifact@v2 - with: - name: python-wheels-macos_arm64 - path: dist - - uses: actions/download-artifact@v2 - with: - name: python-wheels-windows - path: dist - # - name: Publish to PyPI (test server) - # uses: pypa/gh-action-pypi-publish@54b39fb9371c0b3a6f9f14bb8a67394defc7a806 # 2020-09-25 - # with: - # user: __token__ - # password: ${{ secrets.pypi_test_token }} - - name: Publish to PyPI (main server) - uses: pypa/gh-action-pypi-publish@54b39fb9371c0b3a6f9f14bb8a67394defc7a806 # 2020-09-25 - with: - user: __token__ - password: ${{ secrets.pypi_token }} - if: ${{ startsWith(github.ref, 'refs/tags/v') }} - ngauth: - strategy: - matrix: - go-version: ['1.19'] - os: - - ubuntu-latest - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v2 - - name: Setup go ${{ matrix.go-version }} - uses: actions/setup-go@v1 - with: - go-version: ${{ matrix.go-version }} - - uses: actions/cache@v2 - with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-${{ matrix.go-version }}-go-${{ hashFiles('**/go.sum') }} - - run: go build . - working-directory: ngauth_server - wasm: - # Ensures that .wasm files are reproducible. - strategy: - matrix: - os: - - ubuntu-latest - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v2 - - run: ./src/neuroglancer/mesh/draco/build.sh - - run: ./src/neuroglancer/sliceview/compresso/build.sh - - run: ./src/neuroglancer/sliceview/png/build.sh - # Check that there are no differences. - - run: git diff --exit-code + run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV + - name: Get branch name (pull request) + if: github.event_name == 'pull_request' + shell: bash + run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF} | tr / -)" >> $GITHUB_ENV + - run: echo "BRANCH_NAME_URL=$(echo ${{ env.BRANCH_NAME }} | tr / - | tr _ -)" >> $GITHUB_ENV + - name: start deployment + uses: bobheadxi/deployments@v1 + id: deployment + with: + step: start + token: ${{ secrets.GITHUB_TOKEN }} + env: ${{ env.BRANCH_NAME }} + desc: Setting up staging deployment for ${{ env.BRANCH_NAME }} + - id: 'auth' + uses: 'google-github-actions/auth@v1' + with: + workload_identity_provider: 'projects/483670036293/locations/global/workloadIdentityPools/neuroglancer-github/providers/github' + service_account: 'chris-apps-deploy@seung-lab.iam.gserviceaccount.com' + - id: deploy + uses: google-github-actions/deploy-appengine@main + with: + version: ${{ env.BRANCH_NAME_URL }} + deliverables: appengine/frontend/app.yaml + promote: false + - name: update deployment status + uses: bobheadxi/deployments@v1 + if: always() + with: + step: finish + token: ${{ secrets.GITHUB_TOKEN }} + env: ${{ steps.deployment.outputs.env }} + env_url: ${{ steps.deploy.outputs.url }} + status: ${{ job.status }} + deployment_id: ${{ steps.deployment.outputs.deployment_id }} diff --git a/appengine/frontend/app.yaml b/appengine/frontend/app.yaml new file mode 100644 index 000000000..7c9baf75a --- /dev/null +++ b/appengine/frontend/app.yaml @@ -0,0 +1,32 @@ +runtime: python27 +api_version: 1 +threadsafe: true + +# The skip_files element specifies which files +# in the application directory are not to be +# uploaded to App Engine. +skip_files: +- ^(.*/)?#.*#$ +- ^(.*/)?.*~$ +- ^(.*/)?.*\.py[co]$ +- ^(.*/)?.*/RCS/.*$ +- ^(.*/)?\..*$ +- ^node_modules$ + +service: neuroglancer + +handlers: +# Handle the main page by serving the index page. +# Note the $ to specify the end of the path, since app.yaml does prefix matching. +- url: /$ + static_files: static/index.html + upload: static/index.html + login: optional + secure: always + redirect_http_response_code: 301 + +- url: / + static_dir: static + login: optional + secure: always + redirect_http_response_code: 301 diff --git a/config/state_servers.json b/config/state_servers.json new file mode 100644 index 000000000..c61efbd86 --- /dev/null +++ b/config/state_servers.json @@ -0,0 +1,6 @@ +{ + "cave": { + "url": "middleauth+https://global.daf-apis.com/nglstate/api/v1/post", + "default": true + } +} diff --git a/src/neuroglancer/datasource/graphene/backend.ts b/src/neuroglancer/datasource/graphene/backend.ts index f1f84fa70..4f84fa977 100644 --- a/src/neuroglancer/datasource/graphene/backend.ts +++ b/src/neuroglancer/datasource/graphene/backend.ts @@ -16,8 +16,8 @@ import {WithParameters} from 'neuroglancer/chunk_manager/backend'; import {WithSharedCredentialsProviderCounterpart} from 'neuroglancer/credentials_provider/shared_counterpart'; -import {assignMeshFragmentData, FragmentChunk, ManifestChunk, MeshSource} from 'neuroglancer/mesh/backend'; -import {getGrapheneFragmentKey, responseIdentity} from 'neuroglancer/datasource/graphene/base'; +import {assignMeshFragmentData, assignMultiscaleMeshFragmentData, FragmentChunk, FragmentId, ManifestChunk, MeshSource, MultiscaleFragmentChunk, MultiscaleManifestChunk, MultiscaleMeshSource} from 'neuroglancer/mesh/backend'; +import {getGrapheneFragmentKey, MultiscaleMeshSourceParameters, responseIdentity} from 'neuroglancer/datasource/graphene/base'; import {CancellationToken} from 'neuroglancer/util/cancellation'; import {isNotFoundError, responseArrayBuffer, responseJson} from 'neuroglancer/util/http_request'; import {cancellableFetchSpecialOk, SpecialProtocolCredentials, SpecialProtocolCredentialsProvider} from 'neuroglancer/util/special_protocol_request'; @@ -44,14 +44,15 @@ import { SharedWatchableValue } from 'neuroglancer/shared_watchable_value'; import { DisplayDimensionRenderInfo } from 'neuroglancer/navigation_state'; import { forEachVisibleSegment } from 'neuroglancer/segmentation_display_state/base'; import { computeChunkBounds } from 'neuroglancer/sliceview/volume/backend'; +import { verifyObject } from 'neuroglancer/util/json'; function getVerifiedFragmentPromise( credentialsProvider: SpecialProtocolCredentialsProvider, - chunk: FragmentChunk, - parameters: MeshSourceParameters, + fragmentId: string|null, + parameters: MeshSourceParameters|MultiscaleMeshSourceParameters, cancellationToken: CancellationToken) { - if (chunk.fragmentId && chunk.fragmentId.charAt(0) === '~') { - let parts = chunk.fragmentId.substr(1).split(':'); + if (fragmentId && fragmentId.charAt(0) === '~') { + let parts = fragmentId.substr(1).split(':'); let startOffset: Uint64|number, endOffset: Uint64|number; startOffset = Number(parts[1]); endOffset = startOffset+Number(parts[2]); @@ -64,22 +65,22 @@ function getVerifiedFragmentPromise( } return cancellableFetchSpecialOk( credentialsProvider, - `${parameters.fragmentUrl}/dynamic/${chunk.fragmentId}`, {}, responseArrayBuffer, + `${parameters.fragmentUrl}/dynamic/${fragmentId}`, {}, responseArrayBuffer, cancellationToken); } function getFragmentDownloadPromise( credentialsProvider: SpecialProtocolCredentialsProvider, - chunk: FragmentChunk, - parameters: MeshSourceParameters, + fragmentId: string|null, + parameters: MeshSourceParameters|MultiscaleMeshSourceParameters, cancellationToken: CancellationToken) { let fragmentDownloadPromise; if (parameters.sharding){ - fragmentDownloadPromise = getVerifiedFragmentPromise(credentialsProvider, chunk, parameters, cancellationToken); + fragmentDownloadPromise = getVerifiedFragmentPromise(credentialsProvider, fragmentId, parameters, cancellationToken); } else { fragmentDownloadPromise = cancellableFetchSpecialOk( credentialsProvider, - `${parameters.fragmentUrl}/${chunk.fragmentId}`, {}, responseArrayBuffer, + `${parameters.fragmentUrl}/${fragmentId}`, {}, responseArrayBuffer, cancellationToken); } return fragmentDownloadPromise; @@ -111,7 +112,7 @@ async function decodeDracoFragmentChunk( try { const response = await getFragmentDownloadPromise( - undefined, chunk, parameters, cancellationToken); + undefined, chunk.fragmentId, parameters, cancellationToken); await decodeDracoFragmentChunk(chunk, response); } catch (e) { if (isNotFoundError(e)) { @@ -127,6 +128,86 @@ async function decodeDracoFragmentChunk( } } +interface ShardInfo { + shardUrl: string; + offset: Uint64; +} + +interface GrapheneMultiscaleManifestChunk extends MultiscaleManifestChunk { + fragmentIds: FragmentId[]|null; + shardInfo?: ShardInfo; +} + +function decodeMultiscaleManifestChunk(chunk: GrapheneMultiscaleManifestChunk, response: any) { + verifyObject(response); + chunk.manifest = { + chunkShape: vec3.clone(response.chunkShape), + chunkGridSpatialOrigin: vec3.clone(response.chunkGridSpatialOrigin), + lodScales: new Float32Array(response.lodScales), + octree: new Uint32Array(response.octree), + vertexOffsets: new Float32Array(response.lodScales.length * 3), + clipLowerBound: vec3.clone(response.clipLowerBound), + clipUpperBound: vec3.clone(response.clipUpperBound), + } + chunk.fragmentIds = response.fragments; + chunk.manifest.clipLowerBound.fill(0); + chunk.manifest.clipUpperBound.fill(100000); + chunk.manifest.octree[5*(response.fragments.length-1) + 4] &= 0x7FFFFFFF; + chunk.manifest.octree[5*(response.fragments.length-1) + 3] |= 0x80000000; +} + +async function decodeMultiscaleFragmentChunk( + chunk: MultiscaleFragmentChunk, response: ArrayBuffer) { + const {lod} = chunk; + const source = chunk.manifestChunk!.source! as GrapheneMultiscaleMeshSource; + const m = await import(/* webpackChunkName: "draco" */ 'neuroglancer/mesh/draco'); + const rawMesh = await m.decodeDracoPartitioned(new Uint8Array(response), 0, lod !== 0, false); + assignMultiscaleMeshFragmentData(chunk, rawMesh, source.format.vertexPositionFormat); +} + + +@registerSharedObject() +export class GrapheneMultiscaleMeshSource extends +(WithParameters(WithSharedCredentialsProviderCounterpart()(MultiscaleMeshSource), MultiscaleMeshSourceParameters)) { + async download(chunk: GrapheneMultiscaleManifestChunk, cancellationToken: CancellationToken): + Promise { + const {parameters} = this; + let url = `${parameters.manifestUrl}/manifest/multiscale`; + let manifestUrl = `${url}/${chunk.objectId}?verify=1&prepend_seg_ids=1`; + await cancellableFetchSpecialOk(this.credentialsProvider, manifestUrl, {}, responseJson, cancellationToken) + .then(response => decodeMultiscaleManifestChunk(chunk, response)); + } + + async downloadFragment( + chunk: MultiscaleFragmentChunk, cancellationToken: CancellationToken): Promise { + const {parameters} = this; + const manifestChunk = chunk.manifestChunk! as GrapheneMultiscaleManifestChunk; + const chunkIndex = chunk.chunkIndex; + const {fragmentIds} = manifestChunk; + + try { + let fragmentId = null; + if (fragmentIds !== null){ + fragmentId = fragmentIds[chunkIndex]; + fragmentId = fragmentId.substring(fragmentId.indexOf(':') + 1) + } + const response = await getFragmentDownloadPromise( + undefined, fragmentId, parameters, cancellationToken); + await decodeMultiscaleFragmentChunk(chunk, response); + } catch (e) { + if (isNotFoundError(e)) { + chunk.source!.removeChunk(chunk); + } + Promise.reject(e); + } + } + + getFragmentKey(objectKey: string|null, fragmentId: string) { + objectKey; + return getGrapheneFragmentKey(fragmentId); + } +} + export class ChunkedGraphChunk extends Chunk { backendOnly = true; chunkGridPosition: Float32Array; diff --git a/src/neuroglancer/datasource/graphene/base.ts b/src/neuroglancer/datasource/graphene/base.ts index 1981464f4..6030205c1 100644 --- a/src/neuroglancer/datasource/graphene/base.ts +++ b/src/neuroglancer/datasource/graphene/base.ts @@ -52,6 +52,16 @@ export class MeshSourceParameters { static RPC_ID = 'graphene/MeshSource'; } +export class MultiscaleMeshSourceParameters { + manifestUrl: string; + fragmentUrl: string; + metadata: MultiscaleMeshMetadata; + sharding: Array|undefined; + nBitsForLayerId: number; + + static RPC_ID = 'graphene/MultiscaleMeshSource'; +} + export class MultiscaleMeshMetadata { transform: mat4; lodScaleMultiplier: number; diff --git a/src/neuroglancer/datasource/graphene/frontend.ts b/src/neuroglancer/datasource/graphene/frontend.ts index 5efd642eb..408ece3ce 100644 --- a/src/neuroglancer/datasource/graphene/frontend.ts +++ b/src/neuroglancer/datasource/graphene/frontend.ts @@ -24,12 +24,12 @@ import {makeIdentityTransform} from 'neuroglancer/coordinate_transform'; import {CredentialsManager} from 'neuroglancer/credentials_provider'; import {WithCredentialsProvider} from 'neuroglancer/credentials_provider/chunk_source_frontend'; import {DataSource, DataSubsourceEntry, GetDataSourceOptions, RedirectError} from 'neuroglancer/datasource'; -import {CHUNKED_GRAPH_LAYER_RPC_ID, CHUNKED_GRAPH_RENDER_LAYER_UPDATE_SOURCES_RPC_ID, ChunkedGraphChunkSource as ChunkedGraphChunkSourceInterface, ChunkedGraphChunkSpecification, ChunkedGraphSourceParameters, getGrapheneFragmentKey, isBaseSegmentId, makeChunkedGraphChunkSpecification, MeshSourceParameters, MultiscaleMeshMetadata, PYCG_APP_VERSION, responseIdentity} from 'neuroglancer/datasource/graphene/base'; +import {CHUNKED_GRAPH_LAYER_RPC_ID, CHUNKED_GRAPH_RENDER_LAYER_UPDATE_SOURCES_RPC_ID, ChunkedGraphChunkSource as ChunkedGraphChunkSourceInterface, ChunkedGraphChunkSpecification, ChunkedGraphSourceParameters, getGrapheneFragmentKey, isBaseSegmentId, makeChunkedGraphChunkSpecification, MeshSourceParameters, MultiscaleMeshMetadata, PYCG_APP_VERSION, responseIdentity, MultiscaleMeshSourceParameters} from 'neuroglancer/datasource/graphene/base'; import {DataEncoding, ShardingHashFunction, ShardingParameters} from 'neuroglancer/datasource/precomputed/base'; import {getSegmentPropertyMap, MultiscaleVolumeInfo, parseMultiscaleVolumeInfo, parseProviderUrl, PrecomputedDataSource, PrecomputedMultiscaleVolumeChunkSource, resolvePath} from 'neuroglancer/datasource/precomputed/frontend'; import {LayerView, MouseSelectionState, VisibleLayerInfo} from 'neuroglancer/layer'; import {LoadedDataSubsource} from 'neuroglancer/layer_data_source'; -import {MeshSource} from 'neuroglancer/mesh/frontend'; +import {MeshSource, MultiscaleMeshSource} from 'neuroglancer/mesh/frontend'; import {DisplayDimensionRenderInfo} from 'neuroglancer/navigation_state'; import {ChunkTransformParameters, getChunkPositionFromCombinedGlobalLocalPositions, getChunkTransformParameters, RenderLayerTransformOrError} from 'neuroglancer/render_coordinate_transform'; import {RenderLayer, RenderLayerRole} from 'neuroglancer/renderlayer'; @@ -61,6 +61,7 @@ import {Uint64} from 'neuroglancer/util/uint64'; import {makeDeleteButton} from 'neuroglancer/widget/delete_button'; import {DependentViewContext} from 'neuroglancer/widget/dependent_view_widget'; import {makeIcon} from 'neuroglancer/widget/icon'; +import {VertexPositionFormat} from 'neuroglancer/mesh/base'; function vec4FromVec3(vec: vec3, alpha = 0) { const res = vec4.clone([...vec]); @@ -88,6 +89,9 @@ class GrapheneMeshSource extends } } +class GrapheneMultiscaleMeshSource extends +(WithParameters(WithCredentialsProvider()(MultiscaleMeshSource), MultiscaleMeshSourceParameters)) {} + class AppInfo { segmentationUrl: string; meshingUrl: string; @@ -311,17 +315,43 @@ async function getMeshSource( url: string, fragmentUrl: string, nBitsForLayerId: number) { const {metadata, segmentPropertyMap} = await getMeshMetadata(chunkManager, undefined, fragmentUrl); - const parameters: MeshSourceParameters = { + if (metadata === undefined) { + throw new Error('Mesh metadata is missing'); + } + + if (metadata.lodScaleMultiplier === 0) { + const parameters: MeshSourceParameters = { + manifestUrl: url, + fragmentUrl: fragmentUrl, + lod: 0, + sharding: metadata.sharding, + nBitsForLayerId, + }; + const transform = metadata?.transform || mat4.create(); + return { + source: getShardedMeshSource(chunkManager, parameters, credentialsProvider), + transform, + segmentPropertyMap, + }; + } + + const parameters: MultiscaleMeshSourceParameters = { manifestUrl: url, fragmentUrl: fragmentUrl, - lod: 0, - sharding: metadata?.sharding, - nBitsForLayerId, + metadata: metadata, + sharding: metadata.sharding, + nBitsForLayerId: nBitsForLayerId, }; - const transform = metadata?.transform || mat4.create(); return { - source: getShardedMeshSource(chunkManager, parameters, credentialsProvider), - transform, + source: chunkManager.getChunkSource(GrapheneMultiscaleMeshSource, { + credentialsProvider, + parameters: parameters, + format: { + fragmentRelativeVertices: false, + vertexPositionFormat: VertexPositionFormat.float32, + } + }), + transform: metadata.transform, segmentPropertyMap, }; } @@ -1478,7 +1508,7 @@ const synchronizeAnnotationSource = (source: WatchableSet, sta annotationSource.childDeleted.add(annotationId => { const selection = [...source].find(selection => selection.annotationReference?.id === annotationId) - if (selection) source.delete(selection); + if (selection) source.delete(selection); }); const addSelection = (selection: SegmentSelection) => { diff --git a/src/neuroglancer/datasource/precomputed/backend.ts b/src/neuroglancer/datasource/precomputed/backend.ts index ce0162cd2..c0ab0b719 100644 --- a/src/neuroglancer/datasource/precomputed/backend.ts +++ b/src/neuroglancer/datasource/precomputed/backend.ts @@ -538,7 +538,7 @@ async function decodeMultiscaleFragmentChunk( const source = chunk.manifestChunk!.source! as PrecomputedMultiscaleMeshSource; const m = await import(/* webpackChunkName: "draco" */ 'neuroglancer/mesh/draco'); const rawMesh = await m.decodeDracoPartitioned( - new Uint8Array(response), source.parameters.metadata.vertexQuantizationBits, lod !== 0); + new Uint8Array(response), source.parameters.metadata.vertexQuantizationBits, lod !== 0, true); assignMultiscaleMeshFragmentData(chunk, rawMesh, source.format.vertexPositionFormat); } diff --git a/src/neuroglancer/mesh/draco/index.ts b/src/neuroglancer/mesh/draco/index.ts index 8f6a69ef8..ad8ac0604 100644 --- a/src/neuroglancer/mesh/draco/index.ts +++ b/src/neuroglancer/mesh/draco/index.ts @@ -61,14 +61,14 @@ const dracoModulePromise = (async () => { export async function decodeDracoPartitioned( buffer: Uint8Array, vertexQuantizationBits: number, - partition: boolean): Promise { + partition: boolean, skipDequantization: boolean): Promise { const m = await dracoModulePromise; const offset = (m.instance.exports.malloc as Function)(buffer.byteLength); const heap = new Uint8Array((m.instance.exports.memory as WebAssembly.Memory).buffer); heap.set(buffer, offset); numPartitions = partition ? 8 : 1; const code = (m.instance.exports.neuroglancer_draco_decode as Function)( - offset, buffer.byteLength, partition, vertexQuantizationBits, true); + offset, buffer.byteLength, partition, vertexQuantizationBits, skipDequantization); if (code === 0) { const r = decodeResult; decodeResult = undefined; diff --git a/src/neuroglancer/mesh/multiscale.ts b/src/neuroglancer/mesh/multiscale.ts index 3f4e9dad6..64250efbb 100644 --- a/src/neuroglancer/mesh/multiscale.ts +++ b/src/neuroglancer/mesh/multiscale.ts @@ -134,7 +134,7 @@ export function getDesiredMultiscaleMeshChunks( const size = 1 << lod; const rowOffset = row * 5; const gridX = octree[rowOffset], gridY = octree[rowOffset + 1], gridZ = octree[rowOffset + 2], - childBegin = octree[rowOffset + 3], childEndAndEmpty = octree[rowOffset + 4]; + childBeginAndVirtual = octree[rowOffset + 3], childEndAndEmpty = octree[rowOffset + 4]; let xLower = gridX * size * chunkShape[0] + chunkGridSpatialOrigin[0], yLower = gridY * size * chunkShape[1] + chunkGridSpatialOrigin[1], zLower = gridZ * size * chunkShape[2] + chunkGridSpatialOrigin[2]; @@ -152,13 +152,19 @@ export function getDesiredMultiscaleMeshChunks( const pixelSize = minW / scaleFactor; if (priorLodScale === 0 || pixelSize * detailCutoff < priorLodScale) { - const lodScale = lodScales[lod]; + let lodScale = lodScales[lod]; if (lodScale !== 0) { - callback(lod, row, lodScale / pixelSize, (childEndAndEmpty >>> 31)); + const virtual = (childBeginAndVirtual >>> 31); + if (virtual) { + lodScale = 0; + } + const empty = (childEndAndEmpty >>> 31); + callback(lod, row, lodScale / pixelSize, empty | virtual); } if (lod > 0 && (lodScale === 0 || pixelSize * detailCutoff < lodScale)) { const nextPriorLodScale = lodScale === 0 ? priorLodScale : lodScale; + const childBegin = (childBeginAndVirtual & 0x7FFFFFFF) >>> 0; const childEnd = (childEndAndEmpty & 0x7FFFFFFF) >>> 0; for (let childRow = childBegin; childRow < childEnd; ++childRow) { handleChunk(lod - 1, childRow, nextPriorLodScale);