diff --git a/.github/workflows/package-linux-anaconda.yml b/.github/workflows/package-linux-anaconda.yml index ab66b33371..a9985b40e1 100644 --- a/.github/workflows/package-linux-anaconda.yml +++ b/.github/workflows/package-linux-anaconda.yml @@ -12,7 +12,7 @@ jobs: steps: - name: install conda-build # and anaconda-client - run: $CONDA/bin/conda install -y conda-build #anaconda-client + run: $CONDA/bin/conda install -c conda-forge -y conda-build #anaconda-client - name: fetch recipe run: | @@ -28,6 +28,6 @@ jobs: - name: Upload package to GitHub Release uses: AButler/upload-release-assets@v2.0 with: - files: '*.tar.bz2' + files: '*.conda*' repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/package-macos-anaconda.yml b/.github/workflows/package-macos-anaconda.yml index 68189eaa63..c44fa9d9cf 100644 --- a/.github/workflows/package-macos-anaconda.yml +++ b/.github/workflows/package-macos-anaconda.yml @@ -6,33 +6,34 @@ on: jobs: package: - runs-on: macos-latest steps: - - - name: fetch MacOSX 10.11 SDK - run: curl -L https://github.com/phracker/MacOSX-SDKs/releases/download/MacOSX10.11.sdk/MacOSX10.11.sdk.tar.xz | sudo tar xf - -C /opt/ - - - name: install conda-build # and anaconda-client - run: sudo $CONDA/bin/conda install -y conda-build # anaconda-client - - - name: fetch recipe - run: | - git clone https://github.com/MRtrix3/conda-build.git - mv conda-build/* . - { echo "CONDA_BUILD_SYSROOT:"; echo " - /opt/MacOSX10.11.sdk # [osx]"; } > conda_build_config.yaml - - - name: build package - run: sudo CONDA="$CONDA" ./run.sh ${GITHUB_REF#refs/*/} ${GITHUB_REPOSITORY%/*} conda-macos - -# - name: upload package -# run: $CONDA/bin/anaconda -t ${{ secrets.ANACONDA_TOKEN }} upload -u MRtrix3 $(sudo $CONDA/bin/conda build conda-build/ --output) - - - name: Upload package to GitHub Release - uses: AButler/upload-release-assets@v2.0 - with: - files: '*.tar.bz2' - repo-token: ${{ secrets.GITHUB_TOKEN }} - - + - name: Set up Miniconda + uses: conda-incubator/setup-miniconda@v2 + with: + miniconda-version: "latest" + + - name: Install conda-build + run: conda install -c conda-forge -y conda-build + + - name: fetch MacOSX 10.11 SDK + run: curl -L https://github.com/phracker/MacOSX-SDKs/releases/download/MacOSX10.11.sdk/MacOSX10.11.sdk.tar.xz | sudo tar xf - -C /opt/ + + - name: fetch recipe + run: | + git clone https://github.com/MRtrix3/conda-build.git + mv conda-build/* . + { echo "CONDA_BUILD_SYSROOT:"; echo " - /opt/MacOSX10.11.sdk # [osx]"; } > conda_build_config.yaml + + - name: build package + run: CONDA="$CONDA" ./run.sh ${GITHUB_REF#refs/*/} ${GITHUB_REPOSITORY%/*} conda-macos + + # - name: upload package + # run: $CONDA/bin/anaconda -t ${{ secrets.ANACONDA_TOKEN }} upload -u MRtrix3 $(sudo $CONDA/bin/conda build conda-build/ --output) + + - name: Upload package to GitHub Release + uses: AButler/upload-release-assets@v2.0 + with: + files: "*.conda*" + repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/package-macos-native.yml b/.github/workflows/package-macos-native.yml index bd31b64e87..0c7686ad54 100644 --- a/.github/workflows/package-macos-native.yml +++ b/.github/workflows/package-macos-native.yml @@ -11,11 +11,16 @@ jobs: steps: + - name: Symlink Python3 + run: | + mkdir -p python_dir + ln -s $(which python3) python_dir/python + - name: fetch recipe run: git clone https://github.com/MRtrix3/macos-installer.git - name: build packages - run: ./build ${GITHUB_REF#refs/*/} + run: ./build ${GITHUB_REF#refs/*/} 5.15.16 ${{ github.workspace }}/python_dir working-directory: ./macos-installer - name: Upload package to GitHub Release diff --git a/.github/workflows/package-windows-msys2.yml b/.github/workflows/package-windows-msys2.yml index 7ac55ecbe3..ea95e6a97f 100644 --- a/.github/workflows/package-windows-msys2.yml +++ b/.github/workflows/package-windows-msys2.yml @@ -4,37 +4,45 @@ on: release: types: [created] - jobs: package: - runs-on: windows-latest + defaults: + run: + shell: msys2 {0} env: - MSYSTEM: MINGW64 MSYSCON: defterm CHERE_INVOKING: enabled_from_arguments MSYS2_NOSTART: yes + MINGW_PACKAGE_PREFIX: mingw-w64-x86_64 steps: - - - name: fetch and install MSYS2 - run: bash -c 'curl -sL https://github.com/MRtrix3/MinGW/releases/download/1.0/msys2.tar.{0,1} | tar xf -' - - - name: run qtbinpatcher - shell: cmd - run: msys64\msys2_shell.cmd -c "qtbinpatcher --qt-dir=$(dirname $(which qmake))" - - - name: fetch PKGBUILD - shell: cmd - run: msys64\msys2_shell.cmd -c "git clone https://github.com/MRtrix3/MinGW.git && mv MinGW/* ." - - - name: run makepkg - shell: cmd - run: msys64\msys2_shell.cmd -c "./run.sh ${GITHUB_REF#refs/*/} ${GITHUB_REPOSITORY%%/*}" - - - name: Upload package to GitHub Release - uses: AButler/upload-release-assets@v2.0 - with: - files: '*.tar.xz' - repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: msys2/setup-msys2@v2 + with: + msystem: MINGW64 + install: | + git + pkg-config + python + ${{env.MINGW_PACKAGE_PREFIX}}-bc + ${{env.MINGW_PACKAGE_PREFIX}}-diffutils + ${{env.MINGW_PACKAGE_PREFIX}}-eigen3 + ${{env.MINGW_PACKAGE_PREFIX}}-fftw + ${{env.MINGW_PACKAGE_PREFIX}}-gcc + ${{env.MINGW_PACKAGE_PREFIX}}-libtiff + ${{env.MINGW_PACKAGE_PREFIX}}-qt5-base + ${{env.MINGW_PACKAGE_PREFIX}}-qt5-svg + ${{env.MINGW_PACKAGE_PREFIX}}-zlib + + - name: fetch PKGBUILD + run: git clone https://github.com/MRtrix3/MinGW.git && mv MinGW/* . + + - name: run makepkg + run: LINKFLAGS=-lopengl32 ./run.sh ${GITHUB_REF#refs/*/} ${GITHUB_REPOSITORY%%/*} + + - name: Upload package to GitHub Release + uses: AButler/upload-release-assets@v2.0 + with: + files: "*.tar.zst" + repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/CMakeLists.txt b/CMakeLists.txt index 4d2fd2ebb8..72ce584ced 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.16 FATAL_ERROR) set(CMAKE_OSX_DEPLOYMENT_TARGET 11.00 CACHE STRING "") -project(mrtrix3 LANGUAGES CXX VERSION 3.0.4) +project(mrtrix3 LANGUAGES CXX VERSION 3.0.5) if(NOT CMAKE_GENERATOR STREQUAL "Ninja") message(WARNING "It is recommended to use the Ninja generator to build MRtrix3. " diff --git a/Dockerfile b/Dockerfile index ab43a39499..a4629fc210 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,6 +22,7 @@ RUN apt-get -qq update \ libqt5opengl5-dev \ libqt5svg5-dev \ libtiff5-dev \ + python3 \ qtbase5-dev \ zlib1g-dev \ && rm -rf /var/lib/apt/lists/* diff --git a/cpp/cmd/amp2sh.cpp b/cpp/cmd/amp2sh.cpp index 20a65f4faf..561ab3329b 100644 --- a/cpp/cmd/amp2sh.cpp +++ b/cpp/cmd/amp2sh.cpp @@ -21,7 +21,7 @@ #include "file/matrix.h" #include "image.h" #include "math/SH.h" -#include "phase_encoding.h" +#include "metadata/phase_encoding.h" #include "progressbar.h" using namespace MR; @@ -183,8 +183,9 @@ class Amp2SH { }; void run() { - auto amp = Image::open(argument[0]).with_direct_io(3); - Header header(amp); + Header header_in(Header::open(argument[0])); + Header header_out(header_in); + header_out.datatype() = DataType::Float32; std::vector bzeros, dwis; Eigen::MatrixXd dirs; @@ -194,8 +195,8 @@ void run() { if (dirs.cols() == 3) dirs = Math::Sphere::cartesian2spherical(dirs); } else { - auto hit = header.keyval().find("directions"); - if (hit != header.keyval().end()) { + auto hit = header_in.keyval().find("directions"); + if (hit != header_in.keyval().end()) { std::vector dir_vector; for (auto line : split_lines(hit->second)) { auto v = parse_floats(line); @@ -206,20 +207,20 @@ void run() { dirs(i / 2, 0) = dir_vector[i]; dirs(i / 2, 1) = dir_vector[i + 1]; } - header.keyval()["basis_directions"] = hit->second; - header.keyval().erase(hit); + header_out.keyval()["basis_directions"] = hit->second; + header_out.keyval().erase(hit); } else { - auto grad = DWI::get_DW_scheme(amp); + auto grad = DWI::get_DW_scheme(header_in); DWI::Shells shells(grad); shells.select_shells(true, false, false); if (shells.smallest().is_bzero()) bzeros = shells.smallest().get_volumes(); dwis = shells.largest().get_volumes(); dirs = DWI::gen_direction_matrix(grad, dwis); - DWI::stash_DW_scheme(header, grad); + DWI::stash_DW_scheme(header_out, grad); } } - PhaseEncoding::clear_scheme(header); + Metadata::PhaseEncoding::clear_scheme(header_out.keyval()); auto sh2amp = DWI::compute_SH2amp_mapping(dirs, true, 8); @@ -227,11 +228,12 @@ void run() { if (normalise && bzeros.empty()) throw Exception("the normalise option is only available if the input data contains b=0 images."); - header.size(3) = sh2amp.cols(); - Stride::set_from_command_line(header); - auto SH = Image::create(argument[1], header); + header_out.size(3) = sh2amp.cols(); + Stride::set_from_command_line(header_out); Amp2SHCommon common(sh2amp, bzeros, dwis, normalise); + auto amp = header_in.get_image().with_direct_io(3); + auto SH = Image::create(argument[1], header_out); opt = get_options("rician"); if (!opt.empty()) { diff --git a/cpp/cmd/dwi2adc.cpp b/cpp/cmd/dwi2adc.cpp index 268200fc1c..0248cdbfa6 100644 --- a/cpp/cmd/dwi2adc.cpp +++ b/cpp/cmd/dwi2adc.cpp @@ -19,7 +19,7 @@ #include "dwi/gradient.h" #include "image.h" #include "math/least_squares.h" -#include "phase_encoding.h" +#include "metadata/phase_encoding.h" #include "progressbar.h" using namespace MR; @@ -44,7 +44,7 @@ void usage () "adopts a 2-stage fitting strategy, in which the ADC is first estimated based on " "the DWI data with b > cutoff, and the other parameters are estimated subsequently. " "The output consists of 4 volumes, respectively S(0), D, f, and D'." - + + "Note that this command ignores the gradient orientation entirely. This approach is " "therefore only suited for mean DWI (trace-weighted) images or for DWI data collected " "with isotropically-distributed gradient directions."; @@ -60,12 +60,12 @@ void usage () + Argument ("bval").type_integer (0, 1000) + DWI::GradImportOptions(); - + REFERENCES + "Le Bihan, D.; Breton, E.; Lallemand, D.; Aubin, M.L.; Vignaud, J.; Laval-Jeantet, M. " "Separation of diffusion and perfusion in intravoxel incoherent motion MR imaging. " "Radiology, 1988, 168, 497–505." - + + "Jalnefjord, O.; Andersson, M.; Montelius; M.; Starck, G.; Elf, A.; Johanson, V.; Svensson, J.; Ljungberg, M. " "Comparison of methods for estimation of the intravoxel incoherent motion (IVIM) " "diffusion coefficient (D) and perfusion fraction (f). " @@ -144,11 +144,11 @@ class DWI2ADC { }; void run() { - auto dwi = Header::open(argument[0]).get_image(); - auto grad = DWI::get_DW_scheme(dwi); + auto header_in = Header::open(argument[0]); + auto grad = DWI::get_DW_scheme(header_in); size_t dwi_axis = 3; - while (dwi.size(dwi_axis) < 2) + while (header_in.size(dwi_axis) < 2) ++dwi_axis; INFO("assuming DW images are stored along axis " + str(dwi_axis)); @@ -156,14 +156,15 @@ void run() { bool ivim = opt.size(); int bmin = get_option_value("cutoff", 120); - Header header(dwi); - header.datatype() = DataType::Float32; - DWI::stash_DW_scheme(header, grad); - PhaseEncoding::clear_scheme(header); - header.ndim() = 4; - header.size(3) = ivim ? 4 : 2; + Header header_out(header_in); + header_out.datatype() = DataType::Float32; + DWI::stash_DW_scheme(header_out, grad); + Metadata::PhaseEncoding::clear_scheme(header_out.keyval()); + header_out.ndim() = 4; + header_out.size(3) = ivim ? 4 : 2; - auto adc = Image::create(argument[1], header); + auto dwi = header_in.get_image(); + auto adc = Image::create(argument[1], header_out); ThreadedLoop("computing ADC values", dwi, 0, 3).run(DWI2ADC(grad.col(3), dwi_axis, ivim, bmin), dwi, adc); } diff --git a/cpp/cmd/dwi2fod.cpp b/cpp/cmd/dwi2fod.cpp index a529e07f1f..d927ceb994 100644 --- a/cpp/cmd/dwi2fod.cpp +++ b/cpp/cmd/dwi2fod.cpp @@ -21,7 +21,7 @@ #include "header.h" #include "image.h" #include "math/SH.h" -#include "phase_encoding.h" +#include "metadata/phase_encoding.h" #include "dwi/sdeconv/csd.h" #include "dwi/sdeconv/msmt_csd.h" @@ -258,7 +258,7 @@ void run() { shared.init(); DWI::stash_DW_scheme(header_out, shared.grad); - PhaseEncoding::clear_scheme(header_out); + Metadata::PhaseEncoding::clear_scheme(header_out.keyval()); header_out.size(3) = shared.nSH(); auto fod = Image::create(argument[3], header_out); diff --git a/cpp/cmd/dwi2tensor.cpp b/cpp/cmd/dwi2tensor.cpp index d673a2eec9..bab951df6a 100644 --- a/cpp/cmd/dwi2tensor.cpp +++ b/cpp/cmd/dwi2tensor.cpp @@ -22,7 +22,7 @@ #include "file/matrix.h" #include "image.h" #include "math/constrained_least_squares.h" -#include "phase_encoding.h" +#include "metadata/phase_encoding.h" #include "progressbar.h" using namespace MR; @@ -277,14 +277,14 @@ inline Processor processor(const } void run() { - auto dwi = Header::open(argument[0]).get_image(); - auto grad = DWI::get_DW_scheme(dwi); + auto header_in = Header::open(argument[0]); + auto grad = DWI::get_DW_scheme(header_in); Image mask; auto opt = get_options("mask"); if (!opt.empty()) { mask = Image::open(opt[0][0]); - check_dimensions(dwi, mask, 0, 3); + check_dimensions(header_in, mask, 0, 3); } const bool ols = !get_options("ols").empty(); @@ -292,34 +292,35 @@ void run() { // depending on whether first (initialisation) loop should be considered an iteration auto iter = get_option_value("iter", DEFAULT_NITER); - Header header(dwi); - header.datatype() = DataType::Float32; - header.ndim() = 4; - PhaseEncoding::clear_scheme(header); + Header header_out(header_in); + header_out.datatype() = DataType::Float32; + header_out.ndim() = 4; + DWI::stash_DW_scheme(header_out, grad); + Metadata::PhaseEncoding::clear_scheme(header_out.keyval()); Image predict; opt = get_options("predicted_signal"); if (!opt.empty()) - predict = Image::create(opt[0][0], header); + predict = Image::create(opt[0][0], header_out); - DWI::stash_DW_scheme(header, grad); - header.size(3) = 6; - auto dt = Image::create(argument[1], header); + DWI::stash_DW_scheme(header_out, grad); + header_out.size(3) = 6; + auto dt = Image::create(argument[1], header_out); Image b0; opt = get_options("b0"); if (!opt.empty()) { - header.ndim() = 3; - b0 = Image::create(opt[0][0], header); + header_out.ndim() = 3; + b0 = Image::create(opt[0][0], header_out); } Image dkt; opt = get_options("dkt"); const bool dki = !opt.empty(); if (dki) { - header.ndim() = 4; - header.size(3) = 15; - dkt = Image::create(opt[0][0], header); + header_out.ndim() = 4; + header_out.size(3) = 15; + dkt = Image::create(opt[0][0], header_out); } Eigen::MatrixXd A = -DWI::grad2bmatrix(grad, dki); @@ -346,5 +347,6 @@ void run() { } } + auto dwi = header_in.get_image(); ThreadedLoop("computing tensors", dwi, 0, 3).run(processor(A, Aneq, ols, iter, mask, b0, dt, dkt, predict), dwi); } diff --git a/cpp/cmd/dwiextract.cpp b/cpp/cmd/dwiextract.cpp index e077242afe..afc1458bcd 100644 --- a/cpp/cmd/dwiextract.cpp +++ b/cpp/cmd/dwiextract.cpp @@ -18,8 +18,9 @@ #include "algo/loop.h" #include "command.h" #include "dwi/gradient.h" +#include "dwi/shells.h" #include "image.h" -#include "phase_encoding.h" +#include "metadata/phase_encoding.h" #include "progressbar.h" using namespace MR; @@ -65,17 +66,17 @@ void usage() { + DWI::GradImportOptions() + DWI::ShellsOption + DWI::GradExportOptions() - + PhaseEncoding::ImportOptions - + PhaseEncoding::SelectOptions + + Metadata::PhaseEncoding::ImportOptions + + Metadata::PhaseEncoding::SelectOptions + Stride::Options; } // clang-format off void run() { - auto input_image = Image::open(argument[0]); - if (input_image.ndim() < 4) + auto header_in = Header::open(argument[0]); + if (header_in.ndim() < 4) throw Exception("Epected input image to contain more than three dimensions"); - auto grad = DWI::get_DW_scheme(input_image); + auto grad = DWI::get_DW_scheme(header_in); // Want to support non-shell-like data if it's just a straight extraction // of all dwis or all bzeros i.e. don't initialise the Shells class @@ -108,7 +109,7 @@ void run() { } auto opt = get_options("pe"); - const auto pe_scheme = PhaseEncoding::get_scheme(input_image); + const auto pe_scheme = Metadata::PhaseEncoding::get_scheme(header_in); if (!opt.empty()) { if (!pe_scheme.rows()) throw Exception("Cannot filter volumes by phase-encoding: No such information present"); @@ -141,24 +142,25 @@ void run() { std::sort(volumes.begin(), volumes.end()); - Header header(input_image); - Stride::set_from_command_line(header); - header.size(3) = volumes.size(); + Header header_out(header_in); + Stride::set_from_command_line(header_out); + header_out.size(3) = volumes.size(); Eigen::MatrixXd new_grad(volumes.size(), grad.cols()); for (size_t i = 0; i < volumes.size(); i++) new_grad.row(i) = grad.row(volumes[i]); - DWI::set_DW_scheme(header, new_grad); + DWI::set_DW_scheme(header_out, new_grad); if (pe_scheme.rows()) { Eigen::MatrixXd new_scheme(volumes.size(), pe_scheme.cols()); for (size_t i = 0; i != volumes.size(); ++i) new_scheme.row(i) = pe_scheme.row(volumes[i]); - PhaseEncoding::set_scheme(header, new_scheme); + Metadata::PhaseEncoding::set_scheme(header_out.keyval(), new_scheme); } - auto output_image = Image::create(argument[1], header); - DWI::export_grad_commandline(header); + auto input_image = header_in.get_image(); + auto output_image = Image::create(argument[1], header_out); + DWI::export_grad_commandline(header_out); auto input_volumes = Adapter::make(input_image, 3, volumes); threaded_copy_with_progress_message("extracting volumes", input_volumes, output_image); diff --git a/cpp/cmd/mrcalc.cpp b/cpp/cmd/mrcalc.cpp index 3513c708f9..98d9da44c0 100644 --- a/cpp/cmd/mrcalc.cpp +++ b/cpp/cmd/mrcalc.cpp @@ -320,7 +320,6 @@ UNARY_OP( #include "image.h" #include "math/rng.h" #include "memory.h" -#include "phase_encoding.h" using namespace MR; using namespace App; @@ -823,7 +822,7 @@ void get_header(const StackEntry &entry, Header &header) { header.spacing(n) = entry.image->spacing(n); } - header.merge_keyval(*entry.image); + header.merge_keyval(entry.image->keyval()); } class ThreadFunctor { diff --git a/cpp/cmd/mrcat.cpp b/cpp/cmd/mrcat.cpp index 8872cb4a50..08a80ad27d 100644 --- a/cpp/cmd/mrcat.cpp +++ b/cpp/cmd/mrcat.cpp @@ -18,7 +18,6 @@ #include "command.h" #include "dwi/gradient.h" #include "image.h" -#include "phase_encoding.h" #include "progressbar.h" using namespace MR; diff --git a/cpp/cmd/mrconvert.cpp b/cpp/cmd/mrconvert.cpp index f4f60bf762..8286b9ff0c 100644 --- a/cpp/cmd/mrconvert.cpp +++ b/cpp/cmd/mrconvert.cpp @@ -24,7 +24,7 @@ #include "file/ofstream.h" #include "header.h" #include "image.h" -#include "phase_encoding.h" +#include "metadata/phase_encoding.h" #include "transform.h" #include "types.h" @@ -244,8 +244,8 @@ void usage() { + DWI::bvalue_scaling_option + DWI::GradExportOptions() - + PhaseEncoding::ImportOptions - + PhaseEncoding::ExportOptions; + + Metadata::PhaseEncoding::ImportOptions + + Metadata::PhaseEncoding::ExportOptions; } // clang-format on @@ -271,7 +271,7 @@ void permute_DW_scheme(Header &H, const std::vector &axes) { } void permute_PE_scheme(Header &H, const std::vector &axes) { - auto in = PhaseEncoding::parse_scheme(H); + auto in = Metadata::PhaseEncoding::parse_scheme(H.keyval(), H); if (!in.rows()) return; @@ -285,16 +285,27 @@ void permute_PE_scheme(Header &H, const std::vector &axes) { for (int row = 0; row != in.rows(); ++row) out.block<1, 3>(row, 0) = in.block<1, 3>(row, 0) * permute; - PhaseEncoding::set_scheme(H, out); + Metadata::PhaseEncoding::set_scheme(H.keyval(), out); } void permute_slice_direction(Header &H, const std::vector &axes) { - auto it = H.keyval().find("SliceEncodingDirection"); - if (it == H.keyval().end()) + using Metadata::BIDS::axis_vector_type; + auto slice_encoding_it = H.keyval().find("SliceEncodingDirection"); + auto slice_timing_it = H.keyval().find("SliceTiming"); + if (slice_encoding_it == H.keyval().end() && slice_timing_it == H.keyval().end()) return; - const Eigen::Vector3d orig_dir = Axes::id2dir(it->second); - const Eigen::Vector3d new_dir(orig_dir[axes[0]], orig_dir[axes[1]], orig_dir[axes[2]]); - it->second = Axes::dir2id(new_dir); + if (slice_encoding_it == H.keyval().end()) { + const axis_vector_type orig_dir({0, 0, 1}); + const axis_vector_type new_dir(orig_dir[axes[0]], orig_dir[axes[1]], orig_dir[axes[2]]); + slice_encoding_it->second = Metadata::BIDS::vector2axisid(new_dir); + WARN("Header field \"SliceEncodingDirection\" inferred to be \"k\" in input image" // + " and then transformed according to axis permutation" // + " in order to preserve validity of existing header field \"SliceTiming\""); // + return; + } + const axis_vector_type orig_dir = Metadata::BIDS::axisid2vector(slice_encoding_it->second); + const axis_vector_type new_dir(orig_dir[axes[0]], orig_dir[axes[1]], orig_dir[axes[2]]); + slice_encoding_it->second = Metadata::BIDS::vector2axisid(new_dir); } template inline std::vector set_header(Header &header, const ImageType &input) { @@ -352,7 +363,7 @@ void copy_permute(const InputType &in, Header &header_out, const std::string &ou const auto axes = set_header(header_out, in); auto out = Image::create(output_filename, header_out, add_to_command_history); DWI::export_grad_commandline(out); - PhaseEncoding::export_commandline(out); + Metadata::PhaseEncoding::export_commandline(out); auto perm = Adapter::make(in, axes); threaded_copy_with_progress(perm, out, 0, std::numeric_limits::max(), 2); } @@ -381,20 +392,17 @@ void run() { throw; e.display(2); } + auto opt = get_options("json_import"); + if (!opt.empty()) + File::JSON::load(header_in, opt[0][0]); + if (!get_options("import_pe_table").empty() || !get_options("import_pe_eddy").empty()) + Metadata::PhaseEncoding::set_scheme(header_in.keyval(), Metadata::PhaseEncoding::get_scheme(header_in)); Header header_out(header_in); header_out.datatype() = DataType::from_command_line(header_out.datatype()); - if (header_in.datatype().is_complex() && !header_out.datatype().is_complex()) WARN("requested datatype is real but input datatype is complex - imaginary component will be ignored"); - if (!get_options("import_pe_table").empty() || !get_options("import_pe_eddy").empty()) - PhaseEncoding::set_scheme(header_out, PhaseEncoding::get_scheme(header_in)); - - auto opt = get_options("json_import"); - if (!opt.empty()) - File::JSON::load(header_out, opt[0][0]); - opt = get_options("copy_properties"); if (!opt.empty()) { header_out.keyval().clear(); @@ -466,8 +474,8 @@ void run() { const auto grad = DWI::parse_DW_scheme(header_out); if (grad.rows()) { if ((ssize_t)grad.rows() != header_in.size(3)) { - WARN("Diffusion encoding of input file does not match number of image volumes; omitting gradient " - "information from output image"); + WARN("Diffusion encoding of input file does not match number of image volumes;" // + " omitting gradient information from output image"); // DWI::clear_DW_scheme(header_out); } else { Eigen::MatrixXd extract_grad(pos[3].size(), grad.cols()); @@ -478,17 +486,17 @@ void run() { } Eigen::MatrixXd pe_scheme; try { - pe_scheme = PhaseEncoding::get_scheme(header_in); + pe_scheme = Metadata::PhaseEncoding::get_scheme(header_in); if (pe_scheme.rows()) { Eigen::MatrixXd extract_scheme(pos[3].size(), pe_scheme.cols()); for (size_t vol = 0; vol != pos[3].size(); ++vol) extract_scheme.row(vol) = pe_scheme.row(pos[3][vol]); - PhaseEncoding::set_scheme(header_out, extract_scheme); + Metadata::PhaseEncoding::set_scheme(header_out.keyval(), extract_scheme); } } catch (...) { - WARN("Phase encoding scheme of input file does not match number of image volumes; omitting information from " - "output image"); - PhaseEncoding::set_scheme(header_out, Eigen::MatrixXd()); + WARN("Phase encoding scheme of input file does not match number of image volumes;" // + " omitting information from output image"); // + Metadata::PhaseEncoding::clear_scheme(header_out.keyval()); } } } diff --git a/cpp/cmd/mrdegibbs.cpp b/cpp/cmd/mrdegibbs.cpp index 0d0ec9cc02..a04f678f95 100644 --- a/cpp/cmd/mrdegibbs.cpp +++ b/cpp/cmd/mrdegibbs.cpp @@ -20,6 +20,7 @@ #include "degibbs/degibbs.h" #include "degibbs/unring2d.h" #include "degibbs/unring3d.h" +#include "metadata/bids.h" using namespace MR; using namespace App; @@ -159,11 +160,14 @@ void run() { auto slice_encoding_it = header.keyval().find("SliceEncodingDirection"); if (slice_encoding_it != header.keyval().end()) { if (mode == 1) { - WARN("running 3D volume-wise unringing, but image header contains \"SliceEncodingDirection\" field"); - WARN("If data were acquired using multi-slice encoding, run in default 2D mode."); + WARN("running 3D volume-wise unringing," // + " but image header contains \"SliceEncodingDirection\" field;" // + " if data were acquired using multi-slice encoding," // + " run in default 2D mode."); // } else { try { - const Eigen::Vector3d slice_encoding_axis_onehot = Axes::id2dir(slice_encoding_it->second); + const Metadata::BIDS::axis_vector_type slice_encoding_axis_onehot = + Metadata::BIDS::axisid2vector(slice_encoding_it->second); std::vector auto_slice_axes = {0, 0}; if (slice_encoding_axis_onehot[0]) auto_slice_axes = {1, 2}; diff --git a/cpp/cmd/mrinfo.cpp b/cpp/cmd/mrinfo.cpp index 155811a9b6..098e76f506 100644 --- a/cpp/cmd/mrinfo.cpp +++ b/cpp/cmd/mrinfo.cpp @@ -19,10 +19,11 @@ #include "command.h" #include "dwi/gradient.h" +#include "dwi/shells.h" #include "file/json_utils.h" #include "header.h" #include "image_io/pipe.h" -#include "phase_encoding.h" +#include "metadata/phase_encoding.h" #include "types.h" #include @@ -120,7 +121,7 @@ void usage() { + Option ("shell_sizes", "list the number of volumes in each shell") + Option ("shell_indices", "list the image volumes attributed to each b-value shell") - + PhaseEncoding::ExportOptions + + Metadata::PhaseEncoding::ExportOptions + Option ("petable", "print the phase encoding table") + OptionGroup ("Handling of piped images") @@ -253,7 +254,7 @@ void run() { ImageIO::Pipe::delete_piped_images = false; const bool export_grad = check_option_group(GradExportOptions); - const bool export_pe = check_option_group(PhaseEncoding::ExportOptions); + const bool export_pe = check_option_group(Metadata::PhaseEncoding::ExportOptions); if (export_grad && argument.size() > 1) throw Exception("can only export DW gradient table to file if a single input image is provided"); @@ -311,7 +312,7 @@ void run() { if (transform) print_transform(header); if (petable) - std::cout << PhaseEncoding::get_scheme(header) << "\n"; + std::cout << Metadata::PhaseEncoding::get_scheme(header) << "\n"; for (size_t n = 0; n < properties.size(); ++n) print_properties(header, properties[n][0]); @@ -330,7 +331,7 @@ void run() { } DWI::export_grad_commandline(header); - PhaseEncoding::export_commandline(header); + Metadata::PhaseEncoding::export_commandline(header); if (json_keyval) File::JSON::write(header, *json_keyval, (argument.size() > 1 ? std::string("") : std::string(argument[0]))); diff --git a/cpp/cmd/mrmath.cpp b/cpp/cmd/mrmath.cpp index f4b23c21b4..48469b3d47 100644 --- a/cpp/cmd/mrmath.cpp +++ b/cpp/cmd/mrmath.cpp @@ -24,8 +24,8 @@ #include "math/math.h" #include "math/median.h" #include "memory.h" +#include "metadata/phase_encoding.h" #include "misc/voxel2vector.h" -#include "phase_encoding.h" #include "progressbar.h" #include @@ -360,7 +360,7 @@ void run() { } catch (...) { } DWI::clear_DW_scheme(header_out); - PhaseEncoding::clear_scheme(header_out); + Metadata::PhaseEncoding::clear_scheme(header_out.keyval()); } header_out.datatype() = DataType::from_command_line(DataType::Float32); @@ -449,7 +449,7 @@ void run() { throw Exception("Image " + path + " has axis with non-unary dimension beyond first input image " + header.name()); } - header.merge_keyval(temp); + header.merge_keyval(temp.keyval()); } // Instantiate a kernel depending on the operation requested diff --git a/cpp/cmd/mrtransform.cpp b/cpp/cmd/mrtransform.cpp index 9ee295efa3..fac500b89f 100644 --- a/cpp/cmd/mrtransform.cpp +++ b/cpp/cmd/mrtransform.cpp @@ -630,7 +630,7 @@ void run() { if (interp == 0) // nearest output_header.datatype() = DataType::from_command_line(input_header.datatype()); - auto output = Image::create(argument[1], output_header).with_direct_io(); + auto output = Image::create(argument[1], output_header); switch (interp) { case 0: @@ -672,7 +672,7 @@ void run() { output_header.transform() = warp.transform(); } - auto output = Image::create(argument[1], output_header).with_direct_io(); + auto output = Image::create(argument[1], output_header); if (warp.ndim() == 5) { Image warp_deform; @@ -727,7 +727,7 @@ void run() { output_header.transform() = linear_transform; else output_header.transform() = linear_transform.inverse() * output_header.transform(); - auto output = Image::create(argument[1], output_header).with_direct_io(); + auto output = Image::create(argument[1], output_header); copy_with_progress(input, output); if (fod_reorientation || modulate_jac) { diff --git a/cpp/cmd/tckglobal.cpp b/cpp/cmd/tckglobal.cpp index dc03d4aa29..e189bd7db8 100644 --- a/cpp/cmd/tckglobal.cpp +++ b/cpp/cmd/tckglobal.cpp @@ -235,8 +235,7 @@ void run() { using namespace DWI::Tractography::GT; // Inputs ----------------------------------------------------------------------------- - - auto dwi = Image::open(argument[0]).with_direct_io(3); + Header header_in = Header::open(argument[0]); Properties properties; properties.resp_WM = File::Matrix::load_matrix(argument[1]); @@ -253,7 +252,7 @@ void run() { opt = get_options("mask"); if (!opt.empty()) { mask = Image::open(opt[0][0]); - check_dimensions(dwi, mask, 0, 3); + check_dimensions(header_in, mask, 0, 3); } // Parameters ------------------------------------------------------------------------- @@ -317,9 +316,9 @@ void run() { if (!opt.empty()) stats.open_stream(opt[0][0]); + auto dwi = header_in.get_image().with_direct_io(3); ParticleGrid pgrid(dwi); - - ExternalEnergyComputer *Eext = new ExternalEnergyComputer(stats, dwi, properties); + ExternalEnergyComputer *Eext = new ExternalEnergyComputer(stats, header_in, properties); InternalEnergyComputer *Eint = new InternalEnergyComputer(stats, pgrid); Eint->setConnPot(cpot); EnergySumComputer *Esum = new EnergySumComputer( @@ -362,14 +361,14 @@ void run() { pgrid.exportTracks(writer); // Save fiso, tod and eext - Header header(dwi); - header.datatype() = DataType::Float32; + Header header_out(dwi); + header_out.datatype() = DataType::Float32; opt = get_options("fod"); if (!opt.empty()) { INFO("Saving fODF image to file"); - header.size(3) = Math::SH::NforL(properties.Lmax); - auto fODF = Image::create(opt[0][0], header); + header_out.size(3) = Math::SH::NforL(properties.Lmax); + auto fODF = Image::create(opt[0][0], header_out); auto f = __copy_fod(properties.Lmax, properties.weight, get_options("noapo").empty()); ThreadedLoop(Eext->getTOD(), 0, 3).run(f, Eext->getTOD(), fODF); } @@ -378,8 +377,8 @@ void run() { if (!opt.empty()) { if (!properties.resp_ISO.empty()) { INFO("Saving isotropic fractions to file"); - header.size(3) = properties.resp_ISO.size(); - auto Fiso = Image::create(opt[0][0], header); + header_out.size(3) = properties.resp_ISO.size(); + auto Fiso = Image::create(opt[0][0], header_out); threaded_copy(Eext->getFiso(), Fiso); } else { WARN("Ignore saving file " + opt[0][0] + ", because no isotropic response functions were provided."); @@ -389,8 +388,8 @@ void run() { opt = get_options("eext"); if (!opt.empty()) { INFO("Saving external energy to file"); - header.ndim() = 3; - auto EextI = Image::create(opt[0][0], header); + header_out.ndim() = 3; + auto EextI = Image::create(opt[0][0], header_out); threaded_copy(Eext->getEext(), EextI); } } diff --git a/cpp/cmd/transformconvert.cpp b/cpp/cmd/transformconvert.cpp index 779ad93f4f..d302b693a5 100644 --- a/cpp/cmd/transformconvert.cpp +++ b/cpp/cmd/transformconvert.cpp @@ -14,6 +14,7 @@ * For more details, see http://www.mrtrix.org/. */ +#include "axes.h" #include "command.h" #include "file/key_value.h" #include "file/matrix.h" @@ -64,15 +65,14 @@ void usage() { // clang-format on transform_type get_flirt_transform(const Header &header) { - std::vector axes; - transform_type nifti_transform = File::NIfTI::adjust_transform(header, axes); - if (nifti_transform.matrix().topLeftCorner<3, 3>().determinant() < 0.0) - return nifti_transform; - transform_type coord_switch; - coord_switch.setIdentity(); + const transform_type ondisk_transform = header.realignment().orig_transform(); + if (ondisk_transform.matrix().topLeftCorner<3, 3>().determinant() < 0.0) + return ondisk_transform; + transform_type coord_switch(transform_type::Identity()); coord_switch(0, 0) = -1.0f; - coord_switch(0, 3) = (header.size(axes[0]) - 1) * header.spacing(axes[0]); - return nifti_transform * coord_switch; + coord_switch(0, 3) = + (header.size(header.realignment().permutation(0)) - 1) * header.spacing(header.realignment().permutation(0)); + return ondisk_transform * coord_switch; } // transform_type parse_surfer_transform (const Header& header) { diff --git a/cpp/cmd/voxel2mesh.cpp b/cpp/cmd/voxel2mesh.cpp index ac5800d5cd..82de544eea 100644 --- a/cpp/cmd/voxel2mesh.cpp +++ b/cpp/cmd/voxel2mesh.cpp @@ -65,8 +65,8 @@ void run() { if (get_options("blocky").empty()) { auto input = Image::open(argument[0]); - auto opt = get_options("template"); - const float threshold = opt.empty() ? Filter::estimate_optimal_threshold(input) : float(opt[0][0]); + auto opt = get_options("threshold"); + const float threshold = opt.empty() ? Filter::estimate_optimal_threshold(input) : default_type(opt[0][0]); Surface::Algo::image2mesh_mc(input, mesh, threshold); } else { auto input = Image::open(argument[0]); diff --git a/cpp/core/axes.cpp b/cpp/core/axes.cpp index 39fb023b6d..381a48b889 100644 --- a/cpp/core/axes.cpp +++ b/cpp/core/axes.cpp @@ -16,69 +16,21 @@ #include "axes.h" -#include "exception.h" -#include "mrtrix.h" - namespace MR::Axes { -std::string dir2id(const Eigen::Vector3d &axis) { - if (axis[0] == -1) { - assert(!axis[1]); - assert(!axis[2]); - return "i-"; - } else if (axis[0] == 1) { - assert(!axis[1]); - assert(!axis[2]); - return "i"; - } else if (axis[1] == -1) { - assert(!axis[0]); - assert(!axis[2]); - return "j-"; - } else if (axis[1] == 1) { - assert(!axis[0]); - assert(!axis[2]); - return "j"; - } else if (axis[2] == -1) { - assert(!axis[0]); - assert(!axis[1]); - return "k-"; - } else if (axis[2] == 1) { - assert(!axis[0]); - assert(!axis[1]); - return "k"; - } else { - throw Exception("Malformed axis direction: \"" + str(axis.transpose()) + "\""); - } -} - -Eigen::Vector3d id2dir(const std::string &id) { - if (id == "i-") - return {-1, 0, 0}; - else if (id == "i") - return {1, 0, 0}; - else if (id == "j-") - return {0, -1, 0}; - else if (id == "j") - return {0, 1, 0}; - else if (id == "k-") - return {0, 0, -1}; - else if (id == "k") - return {0, 0, 1}; - else - throw Exception("Malformed image axis identifier: \"" + id + "\""); -} - -void get_shuffle_to_make_axial(const transform_type &T, std::array &perm, std::array &flip) { - perm = closest(T.matrix().topLeftCorner<3, 3>()); +Shuffle get_shuffle_to_make_RAS(const transform_type &T) { + Shuffle result; + result.permutations = closest(T.linear()); // Figure out whether any of the rows of the transform point in the // opposite direction to the MRtrix convention - flip[perm[0]] = T(0, perm[0]) < 0.0; - flip[perm[1]] = T(1, perm[1]) < 0.0; - flip[perm[2]] = T(2, perm[2]) < 0.0; + result.flips[result.permutations[0]] = T(0, result.permutations[0]) < 0.0; + result.flips[result.permutations[1]] = T(1, result.permutations[1]) < 0.0; + result.flips[result.permutations[2]] = T(2, result.permutations[2]) < 0.0; + return result; } -std::array closest(const Eigen::Matrix3d &M) { - std::array result{3, 3, 3}; +permutations_type closest(const Eigen::Matrix3d &M) { + permutations_type result; // Find which row of the transform is closest to each scanner axis Eigen::Matrix3d::Index index(0); M.row(0).cwiseAbs().maxCoeff(&index); @@ -87,7 +39,7 @@ std::array closest(const Eigen::Matrix3d &M) { result[1] = index; M.row(2).cwiseAbs().maxCoeff(&index); result[2] = index; - assert(result[0] < 3 && result[1] < 3 && result[2] < 3); + assert(result.valid()); // Disambiguate permutations auto not_any_of = [](size_t a, size_t b) -> size_t { @@ -105,7 +57,7 @@ std::array closest(const Eigen::Matrix3d &M) { result[2] = not_any_of(result[0], result[1]); if (result[1] == result[2]) result[2] = not_any_of(result[0], result[1]); - assert(result[0] != result[1] && result[1] != result[2] && result[2] != result[0]); + assert(result.valid()); return result; } diff --git a/cpp/core/axes.h b/cpp/core/axes.h index eb8d5fe308..001fe59aba 100644 --- a/cpp/core/axes.h +++ b/cpp/core/axes.h @@ -16,25 +16,44 @@ #pragma once -#include +#include +#include +#include #include "types.h" +// TODO Rename to "SpatialAxes"? namespace MR::Axes { -//! convert axis directions between formats -/*! these helper functions convert the definition of - * phase-encoding direction between a 3-vector (e.g. - * [0 1 0] ) and a NIfTI axis identifier (e.g. 'i-') - */ -std::string dir2id(const Eigen::Vector3d &); -Eigen::Vector3d id2dir(const std::string &); +// TODO Change to 8-bit integer & define invalid value +class permutations_type : public std::array { +public: + using BaseType = std::array; + permutations_type() + : std::array{std::numeric_limits::max(), + std::numeric_limits::max(), + std::numeric_limits::max()} {} + bool is_identity() const { return ((*this)[0] == 0 && (*this)[1] == 1 && (*this)[2] == 2); } + bool valid() const { return (std::set(begin(), end()) == std::set({0, 1, 2})); } +}; +using flips_type = std::array; +class Shuffle { +public: + Shuffle() : permutations(), flips({false, false, false}) {} + bool is_identity() const { + return (permutations.is_identity() && // + !flips[0] && !flips[1] && !flips[2]); + } + bool valid() const { return permutations.valid(); } + permutations_type permutations; + flips_type flips; +}; //! determine the axis permutations and flips necessary to make an image //! appear approximately axial -void get_shuffle_to_make_axial(const transform_type &T, std::array &perm, std::array &flip); +Shuffle get_shuffle_to_make_RAS(const transform_type &T); //! determine which vectors of a 3x3 transform are closest to the three axis indices -std::array closest(const Eigen::Matrix3d &M); +permutations_type closest(const Eigen::Matrix3d &M); } // namespace MR::Axes diff --git a/cpp/core/dwi/gradient.cpp b/cpp/core/dwi/gradient.cpp index de62290fa8..ec3cfd08cf 100644 --- a/cpp/core/dwi/gradient.cpp +++ b/cpp/core/dwi/gradient.cpp @@ -16,6 +16,7 @@ #include "dwi/gradient.h" #include "dwi/shells.h" +#include "file/config.h" #include "file/matrix.h" #include "file/nifti_utils.h" @@ -82,6 +83,15 @@ const char *const bvalue_scaling_description( " and override MRrtix3's automatic detection."); // clang-format on +// CONF option: BZeroThreshold +// CONF default: 10.0 +// CONF Specifies the b-value threshold for determining those image +// CONF volumes that correspond to b=0. +default_type bzero_threshold() { + static const default_type value = File::Config::get_float("BZeroThreshold", DWI_BZERO_THREHSOLD_DEFAULT); + return value; +} + BValueScalingBehaviour get_cmdline_bvalue_scaling_behaviour() { auto opt = App::get_options("bvalue_scaling"); if (opt.empty()) @@ -105,93 +115,134 @@ Eigen::MatrixXd parse_DW_scheme(const Header &header) { } Eigen::MatrixXd load_bvecs_bvals(const Header &header, const std::string &bvecs_path, const std::string &bvals_path) { + assert(header.realignment().orig_transform().matrix().allFinite()); + Eigen::MatrixXd bvals, bvecs; try { bvals = File::Matrix::load_matrix<>(bvals_path); bvecs = File::Matrix::load_matrix<>(bvecs_path); } catch (Exception &e) { - throw Exception(e, - "Unable to import files \"" + bvecs_path + "\" and \"" + bvals_path + "\" as FSL bvecs/bvals pair"); + // clang-format off + throw Exception(e, "Unable to import files \"" + bvecs_path + "\" and \"" + bvals_path + "\"" + " as FSL bvecs/bvals pair"); + // clang-format on } if (bvals.rows() != 1) { if (bvals.cols() == 1) bvals.transposeInPlace(); // transpose if file contains column vector else - throw Exception("bvals file must contain 1 row or column only (file \"" + bvals_path + "\" has " + - str(bvals.rows()) + ")"); + // clang-format off + throw Exception("bvals file must contain 1 row or column only;" + " file \"" + bvals_path + "\" has " + str(bvals.rows())); + // clang-format on } if (bvecs.rows() != 3) { if (bvecs.cols() == 3) bvecs.transposeInPlace(); else - throw Exception("bvecs file must contain exactly 3 rows or columns (file \"" + bvecs_path + "\" has " + - str(bvecs.rows()) + ")"); + // clang-format off + throw Exception("bvecs file must contain exactly 3 rows or columns;" + " file \"" + bvecs_path + "\" has " + str(bvecs.rows())); + // clang-format on } if (bvals.cols() != bvecs.cols()) - throw Exception("bvecs and bvals files must have same number of diffusion directions (file \"" + bvecs_path + - "\" has " + str(bvecs.cols()) + ", file \"" + bvals_path + "\" has " + str(bvals.cols()) + ")"); + // clang-format off + throw Exception("bvecs and bvals files must have same number of diffusion directions;" + " file \"" + bvecs_path + "\" has " + str(bvecs.cols()) + "," + " file \"" + bvals_path + "\" has " + str(bvals.cols()) + ""); + // clang-format on const size_t num_volumes = header.ndim() < 4 ? 1 : header.size(3); if (size_t(bvals.cols()) != num_volumes) - throw Exception("bvecs and bvals files must have same number of diffusion directions as DW-image (gradients: " + - str(bvecs.cols()) + ", image: " + str(num_volumes) + ")"); + // clang-format off + throw Exception("bvecs and bvals files do not have same number of diffusion directions as DW-image:" + " gradients: " + str(bvecs.cols()) + "," + " image: " + str(num_volumes)); + // clang-format on // bvecs format actually assumes a LHS coordinate system even if image is - // stored using RHS - x axis is flipped to make linear 3x3 part of + // stored using RHS; first axis is flipped to make linear 3x3 part of // transform have negative determinant: - std::vector order; - auto adjusted_transform = File::NIfTI::adjust_transform(header, order); - if (adjusted_transform.linear().determinant() > 0.0) - bvecs.row(0) = -bvecs.row(0); + if (header.realignment().orig_transform().linear().determinant() > 0.0) + bvecs.row(0) *= -1.0; + Eigen::MatrixXd grad(bvecs.cols(), 4); + grad.leftCols<3>().transpose() = header.realignment().orig_transform().linear() * bvecs; + grad.col(3) = bvals.row(0); - // account for the fact that bvecs are specified wrt original image axes, - // which may have been re-ordered and/or inverted by MRtrix to match the - // expected anatomical frame of reference: - Eigen::MatrixXd G(bvecs.cols(), 3); - for (ssize_t n = 0; n < G.rows(); ++n) { - G(n, order[0]) = header.stride(order[0]) > 0 ? bvecs(0, n) : -bvecs(0, n); - G(n, order[1]) = header.stride(order[1]) > 0 ? bvecs(1, n) : -bvecs(1, n); - G(n, order[2]) = header.stride(order[2]) > 0 ? bvecs(2, n) : -bvecs(2, n); + // Substitute NaNs with b=0 volumes + ssize_t nans_present_bvecs = false; + ssize_t nans_present_bvals = false; + ssize_t nan_linecount = 0; + for (ssize_t n = 0; n != grad.rows(); ++n) { + bool zero_row = false; + if (std::isnan(grad(n, 3))) { + if (grad.block<1, 3>(n, 0).squaredNorm() > 0.0) + // clang-format off + throw Exception("Corrupt content in bvecs/bvals data" + " (" + bvecs_path + " & " + bvals_path + ")" + " (NaN present in bval but valid direction in bvec)"); + // clang-format on + nans_present_bvals = true; + zero_row = true; + } + if (grad.block<1, 3>(n, 0).hasNaN()) { + if (grad(n, 3) > 0.0) + // clang-format off + throw Exception("Corrupt content in bvecs/bvals data" + " (" + bvecs_path + " & " + bvals_path + ")" + " (NaN bvec direction but non-zero value in bval)"); + // clang-format on + nans_present_bvecs = true; + zero_row = true; + } + if (zero_row) { + grad.block<1, 4>(n, 0).setZero(); + ++nan_linecount; + } + } + if (nan_linecount > 0) { + WARN(str(nan_linecount) + " row" + (nan_linecount > 1 ? "s" : "") + " with NaN values detected in " + + (nans_present_bvecs ? "bvecs file " + bvecs_path + (nans_present_bvals ? " and" : "") : "") + + (nans_present_bvals ? "bvals file " + bvals_path : "") + + "; these have been interpreted as b=0 volumes by MRtrix"); } - - // rotate gradients into scanner coordinate system: - Eigen::MatrixXd grad(G.rows(), 4); - - grad.leftCols<3>().transpose() = header.transform().rotation() * G.transpose(); - grad.col(3) = bvals.row(0); return grad; } void save_bvecs_bvals(const Header &header, const std::string &bvecs_path, const std::string &bvals_path) { const auto grad = parse_DW_scheme(header); - - // rotate vectors from scanner space to image space - Eigen::MatrixXd G = grad.leftCols<3>() * header.transform().rotation(); - - // deal with FSL requiring gradient directions to coincide with data strides - // also transpose matrices in preparation for file output - std::vector order; - auto adjusted_transform = File::NIfTI::adjust_transform(header, order); - Eigen::MatrixXd bvecs(3, grad.rows()); - Eigen::MatrixXd bvals(1, grad.rows()); - for (ssize_t n = 0; n < G.rows(); ++n) { - bvecs(0, n) = header.stride(order[0]) > 0 ? G(n, order[0]) : -G(n, order[0]); - bvecs(1, n) = header.stride(order[1]) > 0 ? G(n, order[1]) : -G(n, order[1]); - bvecs(2, n) = header.stride(order[2]) > 0 ? G(n, order[2]) : -G(n, order[2]); - bvals(0, n) = grad(n, 3); + Axes::permutations_type order; + const auto adjusted_transform = File::NIfTI::adjust_transform(header, order); + Eigen::MatrixXd bvecs = adjusted_transform.inverse().linear() * grad.leftCols<3>().transpose(); + + Eigen::VectorXd bvals = grad.col(3); + size_t bval_zeroed_count = 0; + for (ssize_t n = 0; n < bvals.size(); ++n) { + if (bvecs.row(n).squaredNorm() > 0.0 && bvals[n] && bvals[n] <= bzero_threshold()) { + ++bval_zeroed_count; + bvals[n] = 0.0; + } } // bvecs format actually assumes a LHS coordinate system even if image is - // stored using RHS - x axis is flipped to make linear 3x3 part of + // stored using RHS; first axis is flipped to make linear 3x3 part of // transform have negative determinant: if (adjusted_transform.linear().determinant() > 0.0) bvecs.row(0) = -bvecs.row(0); + if (bval_zeroed_count) { + WARN("For image \"" + header.name() + "\"," // + + str(bval_zeroed_count) + " volumes had zero gradient direction vector," // + + " but 0.0 < b-value <= BZeroThreshold;" // + + " these are clamped to zero in bvals file \"" + bvals_path + "\"" // + + " for compatibility with external software"); // + } + File::Matrix::save_matrix(bvecs, bvecs_path, KeyValues(), false); - File::Matrix::save_matrix(bvals, bvals_path, KeyValues(), false); + File::Matrix::save_vector(bvals, bvals_path, KeyValues(), false); } void clear_DW_scheme(Header &header) { @@ -248,8 +299,11 @@ Eigen::MatrixXd get_DW_scheme(const Header &header, BValueScalingBehaviour bvalu const bool exceeds_single_precision = max_log_scaling_factor > 1e-5; const bool requires_bvalue_scaling = max_log_scaling_factor > 0.01; - DEBUG("b-value scaling: max scaling factor = exp(" + str(max_log_scaling_factor) + - ") = " + str(max_scaling_factor)); + // clang-format off + DEBUG("b-value scaling:" + " max scaling factor = exp(" + str(max_log_scaling_factor) + ")" + " = " + str(max_scaling_factor)); + // clang-format on if ((requires_bvalue_scaling && bvalue_scaling == BValueScalingBehaviour::Auto) || bvalue_scaling == BValueScalingBehaviour::UserOn) { @@ -271,6 +325,7 @@ Eigen::MatrixXd get_DW_scheme(const Header &header, BValueScalingBehaviour bvalu str(max_scaling_factor) + ")"); } } + assert(grad.allFinite()); // write the scheme as interpreted back into the header if: // - vector normalisation effect is large, regardless of whether or not b-value scaling was applied diff --git a/cpp/core/dwi/gradient.h b/cpp/core/dwi/gradient.h index ae6faf0bb5..30ff4024db 100644 --- a/cpp/core/dwi/gradient.h +++ b/cpp/core/dwi/gradient.h @@ -17,19 +17,23 @@ #pragma once #include "app.h" -#include "dwi/shells.h" #include "file/config.h" #include "file/path.h" #include "header.h" #include "math/SH.h" #include "math/condition_number.h" #include "math/sphere.h" +#include "types.h" + +#define DWI_BZERO_THREHSOLD_DEFAULT 10.0 namespace MR { namespace App { class OptionGroup; } +// TODO Move some to metadata + namespace DWI { App::OptionGroup GradImportOptions(); @@ -38,6 +42,8 @@ App::OptionGroup GradExportOptions(); extern App::Option bvalue_scaling_option; extern const char *const bvalue_scaling_description; +default_type bzero_threshold(); + //! check that the DW scheme matches the DWI data in \a header /*! \note This is mostly for internal use. If you have obtained the DW * scheme using DWI::get_DW_scheme(), it should already by guaranteed to @@ -134,6 +140,63 @@ template void set_DW_scheme(Header &header, const MatrixType } } +//! store the DW gradient encoding matrix in a KeyValues structure +/*! this will store the DW gradient encoding matrix under the key 'dw_scheme'. + */ +template void set_DW_scheme(KeyValues &keyval, const MatrixType &G) { + if (!G.rows()) { + auto it = keyval.find("dw_scheme"); + if (it != keyval.end()) + keyval.erase(it); + return; + } + keyval["dw_scheme"] = scheme2str(G); +} + +template +Eigen::MatrixXd resolve_DW_scheme(const MatrixType1 &one, const MatrixType2 &two) { + if (one.rows() != two.rows()) + throw Exception("Unequal numbers of rows between gradient tables"); + if (one.cols() != two.cols()) + throw Exception("Unequal numbers of rows between gradient tables"); + Eigen::MatrixXd result(one.rows(), one.cols()); + // Don't know how to intellegently combine data beyond four columns; + // therefore don't proceed if such data are present and are not precisely equivalent + if (one.cols() > 4) { + if (one.rightCols(one.cols() - 4) != two.rightCols(two.cols() - 4)) + throw Exception("Unequal dw_scheme contents beyond standard four columns"); + result.rightCols(one.cols() - 4) = one.rightCols(one.cols() - 4); + } + for (ssize_t rowindex = 0; rowindex != one.rows(); ++rowindex) { + auto one_dir = one.template block<1, 3>(rowindex, 0); + auto two_dir = two.template block<1, 3>(rowindex, 0); + const default_type one_bvalue = one(rowindex, 3); + const default_type two_bvalue = two(rowindex, 3); + const bool is_bzero = std::max(one_bvalue, two_bvalue) <= bzero_threshold(); + if (one_dir == two_dir) { + result.block<1, 3>(rowindex, 0) = one_dir; + } else { + const Eigen::Vector3d mean_dir = (one_dir + two_dir).normalized(); + if (!is_bzero && mean_dir.dot(one_dir) < 1.0 - 1e-3) { + throw Exception( // + std::string("Diffusion vector directions not equal within permissible imprecision") // + + " (row " + str(rowindex) + ":" // + + " " + str(one_dir.transpose()) + " <--> " + str(two_dir.transpose()) + ";" // + + " dot product " + str(mean_dir.dot(one_dir)) + ")"); // + } + result.block<1, 3>(rowindex, 0) = mean_dir; + } + if (one_bvalue == two_bvalue) { + result(rowindex, 3) = one_bvalue; + } else if (is_bzero || abs(one_bvalue - two_bvalue) <= 1.0) { + result(rowindex, 3) = 0.5 * (one_bvalue + two_bvalue); + } else { + throw Exception("Diffusion gradient table b-values not equivalent"); + } + } + return result; +} + //! parse the DW gradient encoding matrix from a header /*! extract the DW gradient encoding matrix stored in the \a header if one * is present. This is expected to be stored in the Header::keyval() diff --git a/cpp/core/dwi/sdeconv/csd.h b/cpp/core/dwi/sdeconv/csd.h index 528372a3f9..d81c403f10 100644 --- a/cpp/core/dwi/sdeconv/csd.h +++ b/cpp/core/dwi/sdeconv/csd.h @@ -19,6 +19,7 @@ #include "app.h" #include "dwi/directions/predefined.h" #include "dwi/gradient.h" +#include "dwi/shells.h" #include "file/matrix.h" #include "header.h" #include "math/SH.h" diff --git a/cpp/core/dwi/shells.h b/cpp/core/dwi/shells.h index 323954ab76..c5a0ace63b 100644 --- a/cpp/core/dwi/shells.h +++ b/cpp/core/dwi/shells.h @@ -20,6 +20,7 @@ #include #include "app.h" +#include "dwi/gradient.h" #include "file/config.h" #include "misc/bitset.h" #include "types.h" @@ -34,13 +35,6 @@ // Default number of volumes necessary for a shell to be retained // (note: only applies if function reject_small_shells() is called explicitly) #define DWI_SHELLS_MIN_DIRECTIONS 6 -// Default b-value threshold for a shell to be classified as "b=0" -#define DWI_SHELLS_BZERO_THREHSOLD 10.0 - -// CONF option: BZeroThreshold -// CONF default: 10.0 -// CONF Specifies the b-value threshold for determining those image -// CONF volumes that correspond to b=0. // CONF option: BValueEpsilon // CONF default: 80.0 @@ -57,11 +51,6 @@ namespace DWI { extern const App::OptionGroup ShellsOption; -FORCE_INLINE default_type bzero_threshold() { - static const default_type value = File::Config::get_float("BZeroThreshold", DWI_SHELLS_BZERO_THREHSOLD); - return value; -} - class Shell { public: @@ -76,7 +65,7 @@ class Shell { default_type get_min() const { return min; } default_type get_max() const { return max; } - bool is_bzero() const { return (mean < bzero_threshold()); } + bool is_bzero() const { return (mean < MR::DWI::bzero_threshold()); } bool operator<(const Shell &rhs) const { return (mean < rhs.mean); } diff --git a/cpp/core/dwi/tractography/GT/externalenergy.cpp b/cpp/core/dwi/tractography/GT/externalenergy.cpp index 3915866dd1..6f804534f9 100644 --- a/cpp/core/dwi/tractography/GT/externalenergy.cpp +++ b/cpp/core/dwi/tractography/GT/externalenergy.cpp @@ -24,10 +24,10 @@ namespace MR::DWI::Tractography::GT { -ExternalEnergyComputer::ExternalEnergyComputer(Stats &stat, const Image &dwimage, const Properties &props) +ExternalEnergyComputer::ExternalEnergyComputer(Stats &stat, Header &dwiheader, const Properties &props) : EnergyComputer(stat), - dwi(dwimage), - T(Transform(dwimage).scanner2voxel), + dwi(dwiheader.get_image().with_direct_io(3)), + T(Transform(dwiheader).scanner2voxel), lmax(props.Lmax), ncols(Math::SH::NforL(lmax)), nf(props.resp_ISO.size()), @@ -37,7 +37,7 @@ ExternalEnergyComputer::ExternalEnergyComputer(Stats &stat, const Image & DEBUG("Initialise computation of external energy."); // Create images -------------------------------------------------------------- - Header header(dwimage); + Header header(dwiheader); header.datatype() = DataType::Float32; header.size(3) = ncols; tod = Image::scratch(header, "TOD image"); @@ -53,7 +53,7 @@ ExternalEnergyComputer::ExternalEnergyComputer(Stats &stat, const Image & eext = Image::scratch(header, "external energy"); // Set kernel matrices -------------------------------------------------------- - auto grad = DWI::get_DW_scheme(dwimage); + auto grad = DWI::get_DW_scheme(dwiheader); nrows = grad.rows(); DWI::Shells shells(grad); diff --git a/cpp/core/dwi/tractography/GT/externalenergy.h b/cpp/core/dwi/tractography/GT/externalenergy.h index d819253db2..c86828cd71 100644 --- a/cpp/core/dwi/tractography/GT/externalenergy.h +++ b/cpp/core/dwi/tractography/GT/externalenergy.h @@ -16,6 +16,7 @@ #pragma once +#include "header.h" #include "image.h" #include "math/constrained_least_squares.h" #include "transform.h" @@ -28,7 +29,7 @@ namespace MR::DWI::Tractography::GT { class ExternalEnergyComputer : public EnergyComputer { public: - ExternalEnergyComputer(Stats &stat, const Image &dwimage, const Properties &props); + ExternalEnergyComputer(Stats &stat, Header &dwimage, const Properties &props); Image &getTOD() { return tod; } Image &getFiso() { return fiso; } diff --git a/cpp/core/dwi/tractography/GT/particlegrid.cpp b/cpp/core/dwi/tractography/GT/particlegrid.cpp index 721dea563a..ae07d38310 100644 --- a/cpp/core/dwi/tractography/GT/particlegrid.cpp +++ b/cpp/core/dwi/tractography/GT/particlegrid.cpp @@ -18,6 +18,22 @@ namespace MR::DWI::Tractography::GT { +ParticleGrid::ParticleGrid(const Header &H) { + DEBUG("Initialise particle grid."); + dims[0] = Math::ceil(H.size(0) * H.spacing(0) / (2.0 * Particle::L)); + dims[1] = Math::ceil(H.size(1) * H.spacing(1) / (2.0 * Particle::L)); + dims[2] = Math::ceil(H.size(2) * H.spacing(2) / (2.0 * Particle::L)); + grid.resize(dims[0] * dims[1] * dims[2]); + + // Initialise scanner-to-grid transform + Eigen::DiagonalMatrix newspacing(2.0 * Particle::L, 2.0 * Particle::L, 2.0 * Particle::L); + Eigen::Vector3d shift(H.spacing(0) / 2.0 - Particle::L, // + H.spacing(1) / 2.0 - Particle::L, // + H.spacing(2) / 2.0 - Particle::L); // + T_s2g = H.transform() * newspacing; + T_s2g = T_s2g.inverse().translate(shift); +} + void ParticleGrid::add(const Point_t &pos, const Point_t &dir) { Particle *p = pool.create(pos, dir); size_t gidx = pos2idx(pos); diff --git a/cpp/core/dwi/tractography/GT/particlegrid.h b/cpp/core/dwi/tractography/GT/particlegrid.h index 13cda97e0c..69ad6bc1c1 100644 --- a/cpp/core/dwi/tractography/GT/particlegrid.h +++ b/cpp/core/dwi/tractography/GT/particlegrid.h @@ -35,22 +35,7 @@ class ParticleGrid { public: using ParticleVectorType = std::vector; - template ParticleGrid(const HeaderType &image) { - DEBUG("Initialise particle grid."); - dims[0] = Math::ceil(image.size(0) * image.spacing(0) / (2.0 * Particle::L)); - dims[1] = Math::ceil(image.size(1) * image.spacing(1) / (2.0 * Particle::L)); - dims[2] = Math::ceil(image.size(2) * image.spacing(2) / (2.0 * Particle::L)); - grid.resize(dims[0] * dims[1] * dims[2]); - - // Initialise scanner-to-grid transform - Eigen::DiagonalMatrix newspacing(2.0 * Particle::L, 2.0 * Particle::L, 2.0 * Particle::L); - Eigen::Vector3d shift(image.spacing(0) / 2.0 - Particle::L, - image.spacing(1) / 2.0 - Particle::L, - image.spacing(2) / 2.0 - Particle::L); - T_s2g = image.transform() * newspacing; - T_s2g = T_s2g.inverse().translate(shift); - } - + ParticleGrid(const Header &H); ParticleGrid(const ParticleGrid &) = delete; ParticleGrid &operator=(const ParticleGrid &) = delete; diff --git a/cpp/core/dwi/tractography/algorithms/tensor_det.h b/cpp/core/dwi/tractography/algorithms/tensor_det.h index e9aa189de4..90260df7d0 100644 --- a/cpp/core/dwi/tractography/algorithms/tensor_det.h +++ b/cpp/core/dwi/tractography/algorithms/tensor_det.h @@ -53,7 +53,7 @@ class Tensor_Det : public MethodBase { properties["method"] = "TensorDet"; try { - auto grad = DWI::get_DW_scheme(source); + auto grad = DWI::get_DW_scheme(source_header); auto bmat_double = grad2bmatrix(grad); binv = Math::pinv(bmat_double).cast(); bmat = bmat_double.cast(); diff --git a/cpp/core/dwi/tractography/file.h b/cpp/core/dwi/tractography/file.h index 8376bee944..1796e8fbae 100644 --- a/cpp/core/dwi/tractography/file.h +++ b/cpp/core/dwi/tractography/file.h @@ -344,6 +344,11 @@ template class Writer : public WriterUnbuffered buffer_capacity) commit(); + if (tck.size() + 1 >= buffer_capacity) { + buffer_capacity = tck.size() + 1; + buffer.reset(new vector_type[buffer_capacity]); + } + for (const auto &i : tck) { assert(i.allFinite()); add_point(i); @@ -359,7 +364,7 @@ template class Writer : public WriterUnbuffered buffer; size_t buffer_size; std::string weights_buffer; diff --git a/cpp/core/dwi/tractography/tracking/shared.cpp b/cpp/core/dwi/tractography/tracking/shared.cpp index 056f256c39..6ad21c6a28 100644 --- a/cpp/core/dwi/tractography/tracking/shared.cpp +++ b/cpp/core/dwi/tractography/tracking/shared.cpp @@ -19,7 +19,8 @@ namespace MR::DWI::Tractography::Tracking { SharedBase::SharedBase(const std::string &diff_path, Properties &property_set) - : source(Image::open(diff_path).with_direct_io(3)), + : source_header(Header::open(diff_path)), + source(source_header.get_image().with_direct_io(3)), properties(property_set), init_dir({NaN, NaN, NaN}), min_num_points_preds(0), @@ -54,7 +55,7 @@ SharedBase::SharedBase(const std::string &diff_path, Properties &property_set) properties.set(rk4, "rk4"); properties.set(stop_on_all_include, "stop_on_all_include"); - properties["source"] = source.name(); + properties["source"] = source_header.name(); max_num_seeds = Defaults::seed_to_select_ratio * max_num_tracks; properties.set(max_num_seeds, "max_num_seeds"); diff --git a/cpp/core/dwi/tractography/tracking/shared.h b/cpp/core/dwi/tractography/tracking/shared.h index cb3832aaad..1f0b417b80 100644 --- a/cpp/core/dwi/tractography/tracking/shared.h +++ b/cpp/core/dwi/tractography/tracking/shared.h @@ -42,6 +42,7 @@ class SharedBase { virtual ~SharedBase(); + Header source_header; Image source; Properties &properties; Eigen::Vector3f init_dir; @@ -60,7 +61,9 @@ class SharedBase { bool is_act() const { return bool(act_shared_additions); } const ACT::ACT_Shared_additions &act() const { return *act_shared_additions; } - float vox() const { return std::pow(source.spacing(0) * source.spacing(1) * source.spacing(2), float(1.0 / 3.0)); } + float vox() const { + return std::pow(source_header.spacing(0) * source_header.spacing(1) * source_header.spacing(2), float(1.0 / 3.0)); + } void set_step_and_angle(const float stepsize, const float angle, bool is_higher_order); void set_num_points(); diff --git a/cpp/core/file/dicom/image.cpp b/cpp/core/file/dicom/image.cpp index ae531a2697..b306409216 100644 --- a/cpp/core/file/dicom/image.cpp +++ b/cpp/core/file/dicom/image.cpp @@ -153,6 +153,9 @@ void Image::parse_item(Element &item, const std::string &dirname) { } return; case 0x0020U: + for (const auto &p : item.parents) + if (p.group == 0x2005) + return; // ignore if nested within Philips private sequence switch (item.element) { case 0x000EU: ignore_series_num = item.is_in_series_ref_sequence(); diff --git a/cpp/core/file/dicom/mapper.cpp b/cpp/core/file/dicom/mapper.cpp index b9ca1ed2cf..e2c037c57a 100644 --- a/cpp/core/file/dicom/mapper.cpp +++ b/cpp/core/file/dicom/mapper.cpp @@ -25,8 +25,9 @@ #include "header.h" #include "image_io/default.h" #include "image_io/mosaic.h" +#include "image_io/null.h" #include "image_io/variable_scaling.h" -#include "phase_encoding.h" +#include "metadata/phase_encoding.h" namespace MR::File::Dicom { @@ -62,6 +63,8 @@ std::unique_ptr dicom_to_mapper(MR::Header &H, std::vector frames; + bool transfer_syntax_supported = true; + // loop over series list: for (const auto &series_it : series) { try { @@ -75,15 +78,8 @@ std::unique_ptr dicom_to_mapper(MR::Header &H, std::vectortransfer_syntax_supported) { - Exception E("unsupported transfer syntax found in DICOM data"); - E.push_back("consider using third-party tools to convert your data to standard uncompressed encoding"); - E.push_back("See the MRtrix3 documentation on DICOM handling for details:"); - E.push_back(" " - "http://mrtrix.readthedocs.io/en/latest/tips_and_tricks/" - "dicom_handling.html#error-unsupported-transfer-syntax"); - throw E; - } + if (!image_it->transfer_syntax_supported) + transfer_syntax_supported = false; // if multi-frame, loop over frames in image: if (!image_it->frames.empty()) { std::sort(image_it->frames.begin(), image_it->frames.end(), compare_ptr_contents()); @@ -206,7 +202,7 @@ std::unique_ptr dicom_to_mapper(MR::Header &H, std::vector dicom_to_mapper(MR::Header &H, std::vector dicom_to_mapper(MR::Header &H, std::vectoris_boolean()) { result.insert(std::make_pair(i.key(), i.value() ? "true" : "false")); - } else if (i->is_number_integer()) { - result.insert(std::make_pair(i.key(), str(i.value()))); - } else if (i->is_number_float()) { - result.insert(std::make_pair(i.key(), str(i.value()))); + } else if (i->is_number_integer() || i->is_number_float()) { + result.insert(std::make_pair(i.key(), i->dump())); } else if (i->is_string()) { const std::string s = unquote(i.value()); result.insert(std::make_pair(i.key(), s)); @@ -103,56 +105,16 @@ KeyValues read(const nlohmann::json &json, const KeyValues &preexisting) { throw Exception("JSON entry \"" + i.key() + "\" contains mixture of elements and arrays"); } } - for (const auto &kv : preexisting) { - if (kv.first == "comments" && result.find("comments") != result.end()) { - add_line(result["comments"], kv.second); - } else { - // Will not overwrite existing entries - result.insert(kv); - } - } return result; } -void read(const nlohmann::json &json, Header &header, const bool realign) { - header.keyval() = read(json, header.keyval()); - const bool do_realign = realign && Header::do_realign_transform; - - // The corresponding header may have been rotated on image load prior to the JSON - // being loaded. If this is the case, any fields that indicate an image axis - // number / direction need to be correspondingly modified. - std::array perm; - std::array flip; - header.realignment(perm, flip); - if (perm[0] == 0 && perm[1] == 1 && perm[2] == 2 && !flip[0] && !flip[1] && !flip[2]) - return; - - auto pe_scheme = PhaseEncoding::get_scheme(header); - if (pe_scheme.rows()) { - if (do_realign) { - PhaseEncoding::set_scheme(header, PhaseEncoding::transform_for_image_load(pe_scheme, header)); - INFO("Phase encoding information read from JSON file modified to conform to prior MRtrix3 internal transform " - "realignment of image \"" + - header.name() + "\""); - } else { - INFO("Phase encoding information read from JSON file not modified"); - } - } - - auto slice_encoding_it = header.keyval().find("SliceEncodingDirection"); - if (slice_encoding_it != header.keyval().end()) { - if (do_realign) { - const Eigen::Vector3d orig_dir(Axes::id2dir(slice_encoding_it->second)); - Eigen::Vector3d new_dir; - for (size_t axis = 0; axis != 3; ++axis) - new_dir[axis] = flip[perm[axis]] ? -orig_dir[perm[axis]] : orig_dir[perm[axis]]; - slice_encoding_it->second = Axes::dir2id(new_dir); - INFO("Slice encoding direction read from JSON file modified to conform to prior MRtrix3 internal transform " - "realignment of input image"); - } else { - INFO("Slice encoding information read from JSON file not modified"); - } - } +void read(const nlohmann::json &json, Header &header) { + KeyValues keyval = read(json); + // Reorientation based on image load should be applied + // exclusively to metadata loaded via JSON; not anything pre-existing + Metadata::PhaseEncoding::transform_for_image_load(keyval, header); + Metadata::SliceEncoding::transform_for_image_load(keyval, header); + header.merge_keyval(keyval); } namespace { @@ -247,40 +209,14 @@ void write(const KeyValues &keyval, nlohmann::json &json) { void write(const Header &header, nlohmann::json &json, const std::string &image_path) { Header H_adj(header); H_adj.name() = image_path; + if (!App::get_options("export_grad_fsl").empty() || !App::get_options("export_grad_mrtrix").empty()) + DWI::clear_DW_scheme(H_adj); if (!Path::has_suffix(image_path, {".nii", ".nii.gz", ".img"})) { write(H_adj.keyval(), json); return; } - - std::vector order; - std::array flip; - File::NIfTI::axes_on_write(header, order, flip); - if (order[0] == 0 && order[1] == 1 && order[2] == 2 && !flip[0] && !flip[1] && !flip[2]) { - INFO( - "No need to transform orientation-based information written to JSON file to match image: image is already RAS"); - write(H_adj.keyval(), json); - return; - } - - auto pe_scheme = PhaseEncoding::get_scheme(header); - if (pe_scheme.rows()) { - // Assume that image being written to disk is going to have its transform adjusted, - // so modify the phase encoding scheme appropriately before writing to JSON - PhaseEncoding::set_scheme(H_adj, PhaseEncoding::transform_for_nifti_write(pe_scheme, header)); - INFO("Phase encoding information written to JSON file modified according to expected output NIfTI header transform " - "realignment"); - } - auto slice_encoding_it = H_adj.keyval().find("SliceEncodingDirection"); - if (slice_encoding_it != H_adj.keyval().end()) { - const Eigen::Vector3d orig_dir(Axes::id2dir(slice_encoding_it->second)); - Eigen::Vector3d new_dir; - for (size_t axis = 0; axis != 3; ++axis) - new_dir[axis] = flip[axis] ? orig_dir[order[axis]] : -orig_dir[order[axis]]; - slice_encoding_it->second = Axes::dir2id(new_dir); - INFO("Slice encoding direction written to JSON file modified according to expected output NIfTI header transform " - "realignment"); - } - + Metadata::PhaseEncoding::transform_for_nifti_write(H_adj.keyval(), H_adj); + Metadata::SliceEncoding::transform_for_nifti_write(H_adj.keyval(), H_adj); write(H_adj.keyval(), json); } diff --git a/cpp/core/file/json_utils.h b/cpp/core/file/json_utils.h index bf45eb2a10..be8ffd782f 100644 --- a/cpp/core/file/json_utils.h +++ b/cpp/core/file/json_utils.h @@ -27,8 +27,8 @@ namespace File::JSON { void load(Header &H, const std::string &path); void save(const Header &H, const std::string &json_path, const std::string &image_path); -KeyValues read(const nlohmann::json &json, const KeyValues &preexisting = KeyValues()); -void read(const nlohmann::json &json, Header &header, const bool realign); +KeyValues read(const nlohmann::json &json); +void read(const nlohmann::json &json, Header &header); void write(const KeyValues &keyval, nlohmann::json &json); void write(const Header &header, nlohmann::json &json, const std::string &image_path); diff --git a/cpp/core/file/mgh.h b/cpp/core/file/mgh.h index 4981d3e554..ff87cb2d4d 100644 --- a/cpp/core/file/mgh.h +++ b/cpp/core/file/mgh.h @@ -542,7 +542,7 @@ template void write_header(const Header &H, Output &out) { if (ndim > 4) throw Exception("MGH file format does not support images of more than 4 dimensions"); - std::vector axes; + Axes::permutations_type axes; auto M = File::NIfTI::adjust_transform(H, axes); store(1, out); // version diff --git a/cpp/core/file/nifti_utils.cpp b/cpp/core/file/nifti_utils.cpp index af7cac8d56..a531f5134e 100644 --- a/cpp/core/file/nifti_utils.cpp +++ b/cpp/core/file/nifti_utils.cpp @@ -358,7 +358,7 @@ template void store(NiftiHeader &NH, const Header &H, const bool is_BE = H.datatype().is_big_endian(); - std::vector axes; + Axes::permutations_type axes; auto M = File::NIfTI::adjust_transform(H, axes); memset(&NH, 0, sizeof(NH)); @@ -545,18 +545,22 @@ template void store(NiftiHeader &NH, const Header &H, const } } -void axes_on_write(const Header &H, std::vector &order, std::array &flip) { +Axes::Shuffle axes_on_write(const Header &H) { Stride::List strides = Stride::get(H); strides.resize(3); - order = Stride::order(strides); - flip = {strides[order[0]] < 0, strides[order[1]] < 0, strides[order[2]] < 0}; + auto order = Stride::order(strides); + Axes::Shuffle result; + result.permutations[0] = order[0]; + result.permutations[1] = order[1]; + result.permutations[2] = order[2]; + result.flips = {strides[order[0]] < 0, strides[order[1]] < 0, strides[order[2]] < 0}; + return result; } -transform_type adjust_transform(const Header &H, std::vector &axes) { - std::array flip; - axes_on_write(H, axes, flip); - - if (axes[0] == 0 && axes[1] == 1 && axes[2] == 2 && !flip[0] && !flip[1] && !flip[2]) +transform_type adjust_transform(const Header &H, Axes::permutations_type &axes) { + const Axes::Shuffle shuffle = axes_on_write(H); + axes = shuffle.permutations; + if (shuffle.is_identity()) return H.transform(); const auto &M_in = H.transform().matrix(); @@ -568,7 +572,7 @@ transform_type adjust_transform(const Header &H, std::vector &axes) { auto translation = M_out.col(3); for (size_t i = 0; i < 3; ++i) { - if (flip[i]) { + if (shuffle.flips[i]) { auto length = default_type(H.size(axes[i]) - 1) * H.spacing(axes[i]); auto axis = M_out.col(i); axis = -axis; diff --git a/cpp/core/file/nifti_utils.h b/cpp/core/file/nifti_utils.h index d094b6539f..b57da8f541 100644 --- a/cpp/core/file/nifti_utils.h +++ b/cpp/core/file/nifti_utils.h @@ -29,8 +29,8 @@ class Header; namespace File::NIfTI { extern bool right_left_warning_issued; -void axes_on_write(const Header &H, std::vector &order, std::array &flip); -transform_type adjust_transform(const Header &H, std::vector &order); +Axes::Shuffle axes_on_write(const Header &H); +transform_type adjust_transform(const Header &H, Axes::permutations_type &order); bool check(int VERSION, Header &H, const size_t num_axes, const std::vector &suffixes); diff --git a/cpp/core/header.cpp b/cpp/core/header.cpp index 3aa1e0489d..5df701d79c 100644 --- a/cpp/core/header.cpp +++ b/cpp/core/header.cpp @@ -26,8 +26,10 @@ #include "formats/list.h" #include "image_io/default.h" #include "image_io/scratch.h" +#include "math/math.h" +#include "metadata/phase_encoding.h" +#include "metadata/slice_encoding.h" #include "mrtrix.h" -#include "phase_encoding.h" #include "stride.h" #include "transform.h" @@ -63,38 +65,7 @@ void Header::check(const Header &H) const { throw Exception("scaling coefficients differ between image files for \"" + name() + "\""); } -namespace { -std::string resolve_slice_timing(const std::string &one, const std::string &two) { - if (one == "variable" || two == "variable") - return "variable"; - std::vector one_split = split(one, ","); - std::vector two_split = split(two, ","); - if (one_split.size() != two_split.size()) { - DEBUG("Slice timing vectors of inequal length"); - return "invalid"; - } - // Siemens CSA reports with 2.5ms precision = 0.0025s - // Allow slice times to vary by 1.5x this amount, but no more - for (size_t i = 0; i != one_split.size(); ++i) { - default_type f_one, f_two; - try { - f_one = to(one_split[i]); - f_two = to(two_split[i]); - } catch (Exception &e) { - DEBUG("Error converting slice timing vector to floating-point"); - return "invalid"; - } - const default_type diff = abs(f_two - f_one); - if (diff > 0.00375) { - DEBUG("Supra-threshold difference of " + str(diff) + "s in slice times"); - return "variable"; - } - } - return one; -} -} // namespace - -void Header::merge_keyval(const Header &H) { +void Header::merge_keyval(const KeyValues &in) { std::map new_keyval; std::set unique_comments; for (const auto &item : keyval()) { @@ -107,7 +78,7 @@ void Header::merge_keyval(const Header &H) { new_keyval.insert(item); } } - for (const auto &item : H.keyval()) { + for (const auto &item : in) { if (item.first == "comments") { const auto comments = split_lines(item.second); for (const auto &c : comments) { @@ -118,12 +89,21 @@ void Header::merge_keyval(const Header &H) { } } else { auto it = keyval().find(item.first); - if (it == keyval().end() || it->second == item.second) + if (it == keyval().end() || it->second == item.second) { new_keyval.insert(item); - else if (item.first == "SliceTiming") - new_keyval["SliceTiming"] = resolve_slice_timing(item.second, it->second); - else + } else if (item.first == "SliceTiming") { + new_keyval["SliceTiming"] = Metadata::SliceEncoding::resolve_slice_timing(item.second, it->second); + } else if (item.first == "dw_scheme") { + try { + auto scheme = DWI::resolve_DW_scheme(parse_matrix(item.second), parse_matrix(it->second)); + DWI::set_DW_scheme(new_keyval, scheme); + } catch (Exception &e) { + WARN("Error merging DW gradient tables between headers"); + new_keyval["dw_scheme"] = "variable"; + } + } else { new_keyval[item.first] = "variable"; + } } } std::swap(keyval_, new_keyval); @@ -247,8 +227,7 @@ Header Header::open(const std::string &image_name) { } // End branching for [] notation H.sanitise(); - if (do_realign_transform) - H.realign_transform(); + H.realign_transform(); } catch (CancelException &e) { throw; } catch (Exception &E) { @@ -349,9 +328,9 @@ Header Header::create(const std::string &image_name, const Header &template_head DWI::clear_DW_scheme(H); } try { - pe_scheme = PhaseEncoding::parse_scheme(template_header); + pe_scheme = Metadata::PhaseEncoding::parse_scheme(template_header.keyval(), template_header); } catch (Exception &) { - PhaseEncoding::clear_scheme(H); + Metadata::PhaseEncoding::clear_scheme(H.keyval()); } if (split_4d_schemes) { try { @@ -362,11 +341,11 @@ Header Header::create(const std::string &image_name, const Header &template_head DWI::clear_DW_scheme(H); } try { - PhaseEncoding::check(pe_scheme, template_header); - PhaseEncoding::set_scheme(H, pe_scheme.row(0)); + Metadata::PhaseEncoding::check(pe_scheme, template_header); + Metadata::PhaseEncoding::set_scheme(H.keyval(), pe_scheme.row(0)); } catch (Exception &) { pe_scheme.resize(0, 0); - PhaseEncoding::clear_scheme(H); + Metadata::PhaseEncoding::clear_scheme(H.keyval()); } } @@ -400,7 +379,7 @@ Header Header::create(const std::string &image_name, const Header &template_head if (dw_scheme.rows()) DWI::set_DW_scheme(header, dw_scheme.row(counter)); if (pe_scheme.rows()) - PhaseEncoding::set_scheme(header, pe_scheme.row(counter)); + Metadata::PhaseEncoding::set_scheme(header.keyval(), pe_scheme.row(counter)); } std::shared_ptr io_handler((*format_handler)->create(header)); assert(io_handler); @@ -431,7 +410,7 @@ Header Header::create(const std::string &image_name, const Header &template_head if (split_4d_schemes) { DWI::set_DW_scheme(H, dw_scheme); - PhaseEncoding::set_scheme(H, pe_scheme); + Metadata::PhaseEncoding::set_scheme(H.keyval(), pe_scheme); } H.io->set_image_is_new(true); H.io->set_readwrite(true); @@ -610,12 +589,19 @@ void Header::sanitise_transform() { } void Header::realign_transform() { + realignment_.orig_transform_ = transform(); + realignment_.applied_transform_ = Realignment::applied_transform_type::Identity(); + realignment_.orig_strides_ = Stride::get(*this); + realignment_.orig_keyval_ = keyval(); + + if (!do_realign_transform) + return; + // find which row of the transform is closest to each scanner axis: - Axes::get_shuffle_to_make_axial(transform(), realign_perm_, realign_flip_); + realignment_.shuffle_ = Axes::get_shuffle_to_make_RAS(transform()); // check if image is already near-axial, return if true: - if (realign_perm_[0] == 0 && realign_perm_[1] == 1 && realign_perm_[2] == 2 && !realign_flip_[0] && - !realign_flip_[1] && !realign_flip_[2]) + if (realignment_.is_identity()) return; auto M(transform()); @@ -623,22 +609,30 @@ void Header::realign_transform() { // modify translation vector: for (size_t i = 0; i < 3; ++i) { - if (realign_flip_[i]) { + if (realignment_.flip(i)) { const default_type length = (size(i) - 1) * spacing(i); auto axis = M.matrix().col(i); for (size_t n = 0; n < 3; ++n) { axis[n] = -axis[n]; translation[n] -= length * axis[n]; } + realignment_.applied_transform_.row(i) *= -1.0; } } // switch and/or invert rows if needed: for (size_t i = 0; i < 3; ++i) { - auto row = M.matrix().row(i).head<3>(); - row = Eigen::RowVector3d(row[realign_perm_[0]], row[realign_perm_[1]], row[realign_perm_[2]]); + auto row_transform = M.matrix().row(i).head<3>(); + row_transform = Eigen::RowVector3d(row_transform[realignment_.permutation(0)], + row_transform[realignment_.permutation(1)], + row_transform[realignment_.permutation(2)]); - if (realign_flip_[i]) + auto col_applied = realignment_.applied_transform_.matrix().col(i); + col_applied = Eigen::RowVector3i(col_applied[realignment_.permutation(0)], + col_applied[realignment_.permutation(1)], + col_applied[realignment_.permutation(2)]); + + if (realignment_.flip(i)) stride(i) = -stride(i); } @@ -646,42 +640,17 @@ void Header::realign_transform() { transform() = std::move(M); // switch axes to match: - Axis a[] = {axes_[realign_perm_[0]], axes_[realign_perm_[1]], axes_[realign_perm_[2]]}; + const std::array a = {axes_[realignment_.permutation(0)], // + axes_[realignment_.permutation(1)], // + axes_[realignment_.permutation(2)]}; // axes_[0] = a[0]; axes_[1] = a[1]; axes_[2] = a[2]; INFO("Axes and transform of image \"" + name() + "\" altered to approximate RAS coordinate system"); - // If there's any phase encoding direction information present in the - // header, it's necessary here to update it according to the - // flips / permutations that have taken place - auto pe_scheme = PhaseEncoding::get_scheme(*this); - if (pe_scheme.rows()) { - for (ssize_t row = 0; row != pe_scheme.rows(); ++row) { - Eigen::VectorXd new_line(pe_scheme.row(row)); - for (ssize_t axis = 0; axis != 3; ++axis) { - new_line[axis] = pe_scheme(row, realign_perm_[axis]); - if (new_line[axis] && realign_flip_[realign_perm_[axis]]) - new_line[axis] = -new_line[axis]; - } - pe_scheme.row(row) = new_line; - } - PhaseEncoding::set_scheme(*this, pe_scheme); - INFO("Phase encoding scheme modified to conform to MRtrix3 internal header transform realignment"); - } - - // If there's any slice encoding direction information present in the - // header, that's also necessary to update here - auto slice_encoding_it = keyval().find("SliceEncodingDirection"); - if (slice_encoding_it != keyval().end()) { - const Eigen::Vector3d orig_dir(Axes::id2dir(slice_encoding_it->second)); - Eigen::Vector3d new_dir; - for (size_t axis = 0; axis != 3; ++axis) - new_dir[axis] = orig_dir[realign_perm_[axis]] * (realign_flip_[realign_perm_[axis]] ? -1.0 : 1.0); - slice_encoding_it->second = Axes::dir2id(new_dir); - INFO("Slice encoding direction has been modified to conform to MRtrix3 internal header transform realignment"); - } + Metadata::PhaseEncoding::transform_for_image_load(keyval(), *this); + Metadata::SliceEncoding::transform_for_image_load(keyval(), *this); } Header @@ -752,7 +721,7 @@ concatenate(const std::vector
&headers, const size_t axis_to_concat, con } catch (Exception &) { } try { - pe_scheme = PhaseEncoding::get_scheme(result); + pe_scheme = Metadata::PhaseEncoding::get_scheme(result); } catch (Exception &) { } } @@ -781,7 +750,7 @@ concatenate(const std::vector
&headers, const size_t axis_to_concat, con dw_scheme.resize(0, 0); } try { - const auto extra_pe = PhaseEncoding::get_scheme(H); + const auto extra_pe = Metadata::PhaseEncoding::get_scheme(H); concat_scheme(pe_scheme, extra_pe); } catch (Exception &) { pe_scheme.resize(0, 0); @@ -789,7 +758,7 @@ concatenate(const std::vector
&headers, const size_t axis_to_concat, con } // Resolve key-value pairs - result.merge_keyval(H); + result.merge_keyval(H.keyval()); // Resolve discrepancies in datatype; // also throw an exception if such mismatch is not permitted @@ -805,9 +774,13 @@ concatenate(const std::vector
&headers, const size_t axis_to_concat, con if (axis_to_concat == 3) { DWI::set_DW_scheme(result, dw_scheme); - PhaseEncoding::set_scheme(result, pe_scheme); + Metadata::PhaseEncoding::set_scheme(result.keyval(), pe_scheme); } return result; } +Header::Realignment::Realignment() : applied_transform_(applied_transform_type::Identity()), orig_keyval_() { + orig_transform_.matrix().fill(std::numeric_limits::quiet_NaN()); +} + } // namespace MR diff --git a/cpp/core/header.h b/cpp/core/header.h index 99ea597a3f..b4c0bb2f42 100644 --- a/cpp/core/header.h +++ b/cpp/core/header.h @@ -20,6 +20,7 @@ #include #include "app.h" +#include "axes.h" #include "datatype.h" #include "debug.h" #include "file/mmap.h" @@ -53,12 +54,10 @@ class Header { }; Header() - : transform_(Eigen::Matrix::Constant(NaN)), - format_(nullptr), - offset_(0.0), - scale_(1.0), - realign_perm_{{0, 1, 2}}, - realign_flip_{{false, false, false}} {} + : transform_(Eigen::Matrix::Constant(NaN)), // + format_(nullptr), // + offset_(0.0), // + scale_(1.0) {} // explicit Header(Header &&H) noexcept : axes_(std::move(H.axes_)), @@ -70,8 +69,7 @@ class Header { datatype_(std::move(H.datatype_)), offset_(H.offset_), scale_(H.scale_), - realign_perm_{{0, 1, 2}}, - realign_flip_{{false, false, false}} {} + realignment_(H.realignment_) {} Header &operator=(Header &&H) noexcept { axes_ = std::move(H.axes_); @@ -83,8 +81,7 @@ class Header { datatype_ = std::move(H.datatype_); offset_ = H.offset_; scale_ = H.scale_; - realign_perm_ = H.realign_perm_; - realign_flip_ = H.realign_flip_; + realignment_ = H.realignment_; return *this; } @@ -100,8 +97,7 @@ class Header { datatype_(H.datatype_), offset_(datatype().is_integer() ? H.offset_ : 0.0), scale_(datatype().is_integer() ? H.scale_ : 1.0), - realign_perm_(H.realign_perm_), - realign_flip_(H.realign_flip_) {} + realignment_(H.realignment_) {} //! copy constructor from type of class derived from Header /*! This invokes the standard Header(const Header&) copy-constructor. */ @@ -110,7 +106,9 @@ class Header { Header(const HeaderType &original) : Header(static_cast(original)) {} //! copy constructor from type of class other than Header - /*! This copies all relevant parameters over from \a original. */ + /*! This copies all relevant parameters over from \a original. + * Note that information about transform realignment on image load will not be available. + */ template ::value, void *>::type = nullptr> Header(const HeaderType &original) @@ -120,9 +118,7 @@ class Header { format_(nullptr), datatype_(DataType::from()), offset_(0.0), - scale_(1.0), - realign_perm_{{0, 1, 2}}, - realign_flip_{{false, false, false}} { + scale_(1.0) { axes_.resize(original.ndim()); for (size_t n = 0; n < original.ndim(); ++n) { size(n) = original.size(n); @@ -143,8 +139,7 @@ class Header { datatype_ = H.datatype_; offset_ = datatype().is_integer() ? H.offset_ : 0.0; scale_ = datatype().is_integer() ? H.scale_ : 1.0; - realign_perm_ = H.realign_perm_; - realign_flip_ = H.realign_flip_; + realignment_ = H.realignment_; io.reset(); return *this; } @@ -175,8 +170,7 @@ class Header { datatype_ = DataType::from(); offset_ = 0.0; scale_ = 1.0; - realign_perm_ = {{0, 1, 2}}; - realign_flip_ = {{false, false, false}}; + realignment_ = Realignment(); io.reset(); return *this; } @@ -207,11 +201,42 @@ class Header { //! get/set the 4x4 affine transformation matrix mapping image to world coordinates transform_type &transform() { return transform_; } + // Class to store all information relating to internal transform realignment + class Realignment { + public: + // From one image space to another image space; + // linear component is permutations & flips only, + // transformation is in voxel count, + // therefore can store as integer + using applied_transform_type = Eigen::Matrix; + Realignment(); + Realignment(Header &); + bool is_identity() const { return shuffle_.is_identity(); } + bool valid() const { return shuffle_.valid(); } + const Axes::permutations_type &permutations() const { return shuffle_.permutations; } + size_t permutation(const size_t axis) const { + assert(axis < 3); + return shuffle_.permutations[axis]; + } + const Axes::flips_type &flips() const { return shuffle_.flips; } + bool flip(const size_t axis) const { + assert(axis < 3); + return shuffle_.flips[axis]; + } + const transform_type &orig_transform() const { return orig_transform_; } + const applied_transform_type &applied_transform() const { return applied_transform_; } + KeyValues &orig_keyval() { return orig_keyval_; } + + private: + Axes::Shuffle shuffle_; + transform_type orig_transform_; + Stride::List orig_strides_; + applied_transform_type applied_transform_; + KeyValues orig_keyval_; + friend class Header; + }; //! get information on how the transform was modified on image load - void realignment(std::array &perm, std::array &flip) const { - perm = realign_perm_; - flip = realign_flip_; - } + const Realignment &realignment() const { return realignment_; } class NDimProxy { public: @@ -359,8 +384,8 @@ class Header { const KeyValues &keyval() const { return keyval_; } //! get/set generic key/value text attributes KeyValues &keyval() { return keyval_; } - //! merge key/value entries from another header - void merge_keyval(const Header &H); + //! merge key/value entries from another dictionary + void merge_keyval(const KeyValues &); static Header open(const std::string &image_name); static Header @@ -397,8 +422,7 @@ class Header { void realign_transform(); /*! store information about how image was * realigned via realign_transform(). */ - std::array realign_perm_; - std::array realign_flip_; + Realignment realignment_; void sanitise_voxel_sizes(); void sanitise_transform(); diff --git a/cpp/core/image_io/null.cpp b/cpp/core/image_io/null.cpp new file mode 100644 index 0000000000..a0b1bc6dbc --- /dev/null +++ b/cpp/core/image_io/null.cpp @@ -0,0 +1,32 @@ +/* Copyright (c) 2008-2025 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#include "image_io/null.h" +#include "header.h" + +namespace MR { +namespace ImageIO { + +void Null::load(const Header &header, size_t) { + throw Exception("No suitable handler to access data in \"" + header.name() + "\""); +} + +void Null::unload(const Header &header) { + throw Exception("No suitable handler to access data in \"" + header.name() + "\""); +} + +} // namespace ImageIO +} // namespace MR diff --git a/cpp/core/image_io/null.h b/cpp/core/image_io/null.h new file mode 100644 index 0000000000..565e4c6e59 --- /dev/null +++ b/cpp/core/image_io/null.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2008-2025 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#ifndef __image_io_null_h__ +#define __image_io_null_h__ + +#include "image_io/base.h" + +namespace MR { +namespace ImageIO { + +class Null : public Base { +public: + Null(const Header &header) : Base(header) {} + +protected: + virtual void load(const Header &, size_t); + virtual void unload(const Header &); +}; + +} // namespace ImageIO +} // namespace MR + +#endif diff --git a/cpp/core/math/average_space.cpp b/cpp/core/math/average_space.cpp index d9eb75c7db..c22d4cd841 100644 --- a/cpp/core/math/average_space.cpp +++ b/cpp/core/math/average_space.cpp @@ -231,7 +231,7 @@ void compute_average_voxel2scanner( break; case avgspace_voxspacing_t::MIN_NEAREST: case avgspace_voxspacing_t::MEAN_NEAREST: { - std::array perm = Axes::closest(M); + const Axes::permutations_type perm = Axes::closest(M); for (size_t axis = 0; axis != 3; ++axis) rot_vox_size(axis, itrafo) = input_headers[itrafo].spacing(perm[axis]); } break; diff --git a/cpp/core/metadata/bids.cpp b/cpp/core/metadata/bids.cpp new file mode 100644 index 0000000000..93fd45f83a --- /dev/null +++ b/cpp/core/metadata/bids.cpp @@ -0,0 +1,71 @@ +/* Copyright (c) 2008-2025 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#include "metadata/bids.h" + +#include "exception.h" +#include "mrtrix.h" + +namespace MR::Metadata::BIDS { + +std::string vector2axisid(const axis_vector_type &dir) { + if (dir[0] == -1) { + assert(!dir[1]); + assert(!dir[2]); + return "i-"; + } else if (dir[0] == 1) { + assert(!dir[1]); + assert(!dir[2]); + return "i"; + } else if (dir[1] == -1) { + assert(!dir[0]); + assert(!dir[2]); + return "j-"; + } else if (dir[1] == 1) { + assert(!dir[0]); + assert(!dir[2]); + return "j"; + } else if (dir[2] == -1) { + assert(!dir[0]); + assert(!dir[1]); + return "k-"; + } else if (dir[2] == 1) { + assert(!dir[0]); + assert(!dir[1]); + return "k"; + } else { + throw Exception("Malformed image axis vector: \"" + str(dir.transpose()) + "\""); + } +} + +axis_vector_type axisid2vector(const std::string &id) { + if (id == "i-") + return {-1, 0, 0}; + else if (id == "i") + return {1, 0, 0}; + else if (id == "j-") + return {0, -1, 0}; + else if (id == "j") + return {0, 1, 0}; + else if (id == "k-") + return {0, 0, -1}; + else if (id == "k") + return {0, 0, 1}; + else + throw Exception("Malformed image axis identifier: \"" + id + "\""); +} + +} // namespace MR::Metadata::BIDS diff --git a/cpp/core/metadata/bids.h b/cpp/core/metadata/bids.h new file mode 100644 index 0000000000..abd8cf1f51 --- /dev/null +++ b/cpp/core/metadata/bids.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2008-2025 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#pragma once + +#include +#include + +namespace MR::Metadata::BIDS { + +//! convert axis directions between formats +/*! these helper functions convert the definition of + * phase-encoding direction between a 3-vector (e.g. + * [0 1 0] ) and a BIDS NIfTI axis identifier (e.g. 'i-') + */ +using axis_vector_type = Eigen::Matrix; +std::string vector2axisid(const axis_vector_type &); +axis_vector_type axisid2vector(const std::string &); + +} // namespace MR::Metadata::BIDS diff --git a/cpp/core/metadata/phase_encoding.cpp b/cpp/core/metadata/phase_encoding.cpp new file mode 100644 index 0000000000..32691ceef7 --- /dev/null +++ b/cpp/core/metadata/phase_encoding.cpp @@ -0,0 +1,347 @@ +/* Copyright (c) 2008-2025 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#include "metadata/phase_encoding.h" + +#include "exception.h" + +namespace MR::Metadata::PhaseEncoding { + +// clang-format off +using namespace App; +const OptionGroup ImportOptions = + OptionGroup("Options for importing phase-encode tables") + + Option("import_pe_table", "import a phase-encoding table from file") + + Argument("file").type_file_in() + + Option("import_pe_eddy", "import phase-encoding information from an EDDY-style config / index file pair") + + Argument("config").type_file_in() + + Argument("indices").type_file_in(); + +const OptionGroup SelectOptions = + OptionGroup("Options for selecting volumes based on phase-encoding") + + Option("pe", + "select volumes with a particular phase encoding;" + " this can be three comma-separated values" + " (for i,j,k components of vector direction)" + " or four (direction & total readout time)") + + Argument("desc").type_sequence_float(); + +const OptionGroup ExportOptions = + OptionGroup("Options for exporting phase-encode tables") + + Option("export_pe_table", "export phase-encoding table to file") + + Argument("file").type_file_out() + + Option("export_pe_eddy", "export phase-encoding information to an EDDY-style config / index file pair") + + Argument("config").type_file_out() + + Argument("indices").type_file_out(); +// clang-format on + +void check(const scheme_type &PE) { + if (!PE.rows()) + throw Exception("No valid phase encoding table found"); + + if (PE.cols() < 3) + throw Exception("Phase-encoding matrix must have at least 3 columns"); + + for (ssize_t row = 0; row != PE.rows(); ++row) { + for (ssize_t axis = 0; axis != 3; ++axis) { + if (std::round(PE(row, axis)) != PE(row, axis)) + throw Exception("Phase-encoding matrix contains non-integral axis designation"); + } + } +} + +void check(const scheme_type &PE, const Header &header) { + check(PE); + const ssize_t num_volumes = (header.ndim() > 3) ? header.size(3) : 1; + if (num_volumes != PE.rows()) + throw Exception("Number of volumes in image \"" + header.name() + "\"" // + + " (" + str(num_volumes) + ")" // + + " does not match that in phase encoding table" // + + " (" + str(PE.rows()) + ")"); // +} + +namespace { +void erase(KeyValues &keyval, const std::string &s) { + auto it = keyval.find(s); + if (it != keyval.end()) + keyval.erase(it); +}; +} // namespace +void set_scheme(KeyValues &keyval, const scheme_type &PE) { + if (!PE.rows()) { + erase(keyval, "pe_scheme"); + erase(keyval, "PhaseEncodingDirection"); + erase(keyval, "TotalReadoutTime"); + return; + } + std::string pe_scheme; + std::string first_line; + bool variation = false; + for (ssize_t row = 0; row < PE.rows(); ++row) { + std::string line = str(PE(row, 0)); + for (ssize_t col = 1; col < PE.cols(); ++col) + line += "," + str(PE(row, col), 3); + add_line(pe_scheme, line); + if (first_line.empty()) + first_line = line; + else if (line != first_line) + variation = true; + } + if (variation) { + keyval["pe_scheme"] = pe_scheme; + erase(keyval, "PhaseEncodingDirection"); + erase(keyval, "TotalReadoutTime"); + } else { + erase(keyval, "pe_scheme"); + const Metadata::BIDS::axis_vector_type dir{int(PE(0, 0)), int(PE(0, 1)), int(PE(0, 2))}; + keyval["PhaseEncodingDirection"] = Metadata::BIDS::vector2axisid(dir); + if (PE.cols() >= 4) + keyval["TotalReadoutTime"] = str(PE(0, 3), 3); + else + erase(keyval, "TotalReadoutTime"); + } +} + +void clear_scheme(KeyValues &keyval) { + auto erase = [&](const std::string &s) { + auto it = keyval.find(s); + if (it != keyval.end()) + keyval.erase(it); + }; + erase("pe_scheme"); + erase("PhaseEncodingDirection"); + erase("TotalReadoutTime"); +} + +scheme_type parse_scheme(const KeyValues &keyval, const Header &header) { + scheme_type PE; + const auto it = keyval.find("pe_scheme"); + if (it != keyval.end()) { + try { + PE = MR::parse_matrix(it->second); + } catch (Exception &e) { + throw Exception(e, "malformed PE scheme associated with image \"" + header.name() + "\""); + } + if (ssize_t(PE.rows()) != ((header.ndim() > 3) ? header.size(3) : 1)) + throw Exception("malformed PE scheme associated with image \"" + header.name() + "\":" + // + " number of rows does not equal number of volumes"); + } else { + const auto it_dir = keyval.find("PhaseEncodingDirection"); + if (it_dir != keyval.end()) { + const auto it_time = keyval.find("TotalReadoutTime"); + const size_t cols = it_time == keyval.end() ? 3 : 4; + Eigen::Matrix row(cols); + try { + row.head(3) = BIDS::axisid2vector(it_dir->second).cast(); + } catch (Exception &e) { + throw Exception( // + e, // + std::string("malformed phase encoding direction") // + + " associated with image \"" + header.name() + "\""); // + } + if (it_time != keyval.end()) { + try { + row[3] = to(it_time->second); + } catch (Exception &e) { + throw Exception(e, "Error adding readout time to phase encoding table"); + } + } + PE.resize((header.ndim() > 3) ? header.size(3) : 1, cols); + PE.rowwise() = row.transpose(); + } + } + return PE; +} + +scheme_type get_scheme(const Header &header) { + DEBUG("searching for suitable phase encoding data..."); + using namespace App; + scheme_type result; + + try { + const auto opt_table = get_options("import_pe_table"); + if (!opt_table.empty()) + result = load(opt_table[0][0], header); + const auto opt_eddy = get_options("import_pe_eddy"); + if (!opt_eddy.empty()) { + if (!opt_table.empty()) + throw Exception("Phase encoding table can be provided" // + " using either -import_pe_table or -import_pe_eddy option," // + " but NOT both"); // + result = load_eddy(opt_eddy[0][0], opt_eddy[0][1], header); + } + if (opt_table.empty() && opt_eddy.empty()) + result = parse_scheme(header.keyval(), header); + } catch (Exception &e) { + throw Exception(e, "error importing phase encoding table for image \"" + header.name() + "\""); + } + + if (result.rows() == 0) + return result; + + if (result.cols() < 3) + throw Exception("unexpected phase encoding table matrix dimensions"); + + INFO("found " + str(result.rows()) + "x" + str(result.cols()) + " phase encoding table"); + + return result; +} + +void transform_for_image_load(KeyValues &keyval, const Header &H) { + scheme_type pe_scheme; + try { + pe_scheme = parse_scheme(keyval, H); + } catch (Exception &e) { + WARN(std::string("Unable to conform phase encoding information to image realignment ") // + + " for image \"" + H.name() + "\"; erasing"); // + clear_scheme(keyval); + return; + } + if (pe_scheme.rows() == 0) { + DEBUG("No phase encoding information found for transformation with load of image \"" + H.name() + "\""); + return; + } + if (H.realignment().is_identity()) { + INFO("No transformation of phase encoding data for load of image \"" + H.name() + "\" required"); + return; + } + set_scheme(keyval, transform_for_image_load(pe_scheme, H)); + INFO("Phase encoding data transformed to match RAS realignment of image \"" + H.name() + "\""); +} + +scheme_type transform_for_image_load(const scheme_type &pe_scheme, const Header &H) { + if (H.realignment().is_identity()) + return pe_scheme; + scheme_type result(pe_scheme.rows(), pe_scheme.cols()); + for (ssize_t row = 0; row != pe_scheme.rows(); ++row) { + Eigen::VectorXd new_line = pe_scheme.row(row); + new_line.head<3>() = (H.realignment().applied_transform() * new_line.head<3>().cast()).cast(); + result.row(row) = new_line; + } + return result; +} + +void transform_for_nifti_write(KeyValues &keyval, const Header &H) { + scheme_type pe_scheme = parse_scheme(keyval, H); + if (pe_scheme.rows() == 0) { + DEBUG("No phase encoding information found for transformation with save of NIfTI image \"" + H.name() + "\""); + return; + } + set_scheme(keyval, transform_for_nifti_write(pe_scheme, H)); +} + +scheme_type transform_for_nifti_write(const scheme_type &pe_scheme, const Header &H) { + if (pe_scheme.rows() == 0) + return pe_scheme; + Axes::Shuffle shuffle = File::NIfTI::axes_on_write(H); + if (shuffle.is_identity()) { + INFO("No transformation of phase encoding data required for export to file:" + " output image will be RAS"); + return pe_scheme; + } + scheme_type result(pe_scheme.rows(), pe_scheme.cols()); + for (ssize_t row = 0; row != pe_scheme.rows(); ++row) { + Eigen::VectorXd new_line = pe_scheme.row(row); + for (ssize_t axis = 0; axis != 3; ++axis) + new_line[axis] = // + pe_scheme(row, shuffle.permutations[axis]) && shuffle.flips[axis] // + ? -pe_scheme(row, shuffle.permutations[axis]) // + : pe_scheme(row, shuffle.permutations[axis]); // + result.row(row) = new_line; + } + INFO("Phase encoding data transformed to match NIfTI / MGH image export prior to writing to file"); + return result; +} + +void scheme2eddy(const scheme_type &PE, Eigen::MatrixXd &config, Eigen::Array &indices) { + try { + check(PE); + } catch (Exception &e) { + throw Exception(e, "Cannot convert phase-encoding scheme to eddy format"); + } + if (PE.cols() != 4) + throw Exception("Phase-encoding matrix requires 4 columns to convert to eddy format"); + config.resize(0, 0); + indices = Eigen::Array::Constant(PE.rows(), PE.rows()); + for (ssize_t PE_row = 0; PE_row != PE.rows(); ++PE_row) { + for (ssize_t config_row = 0; config_row != config.rows(); ++config_row) { + bool dir_match = PE.template block<1, 3>(PE_row, 0).isApprox(config.block<1, 3>(config_row, 0)); + bool time_match = abs(PE(PE_row, 3) - config(config_row, 3)) < 1e-3; + if (dir_match && time_match) { + // FSL-style index file indexes from 1 + indices[PE_row] = config_row + 1; + break; + } + } + if (indices[PE_row] == PE.rows()) { + // No corresponding match found in config matrix; create a new entry + config.conservativeResize(config.rows() + 1, 4); + config.row(config.rows() - 1) = PE.row(PE_row); + indices[PE_row] = config.rows(); + } + } +} + +scheme_type eddy2scheme(const Eigen::MatrixXd &config, const Eigen::Array &indices) { + if (config.cols() != 4) + throw Exception("Expected 4 columns in EDDY-format phase-encoding config file"); + scheme_type result(indices.size(), 4); + for (ssize_t row = 0; row != indices.size(); ++row) { + if (indices[row] > config.rows()) + throw Exception("Malformed EDDY-style phase-encoding information:" + " index exceeds number of config entries"); + result.row(row) = config.row(indices[row] - 1); + } + return result; +} + +void export_commandline(const Header &header) { + auto check = [&](const scheme_type &m) -> const scheme_type & { + if (m.rows() == 0) + throw Exception("no phase-encoding information found within image \"" + header.name() + "\""); + return m; + }; + + auto scheme = parse_scheme(header.keyval(), header); + + auto opt = get_options("export_pe_table"); + if (!opt.empty()) + save(check(scheme), header, opt[0][0]); + + opt = get_options("export_pe_eddy"); + if (!opt.empty()) + save_eddy(check(scheme), header, opt[0][0], opt[0][1]); +} + +scheme_type load(const std::string &path, const Header &header) { + const scheme_type PE = File::Matrix::load_matrix(path); + check(PE, header); + // As with JSON import, need to query the header to discover if the + // strides / transform were modified on image load to make the image + // data appear approximately axial, in which case we need to apply the + // same transforms to the phase encoding data on load + return transform_for_image_load(PE, header); +} + +scheme_type load_eddy(const std::string &config_path, const std::string &index_path, const Header &header) { + const Eigen::MatrixXd config = File::Matrix::load_matrix(config_path); + const Eigen::Array indices = File::Matrix::load_vector(index_path); + const scheme_type PE = eddy2scheme(config, indices); + check(PE, header); + return transform_for_image_load(PE, header); +} + +} // namespace MR::Metadata::PhaseEncoding diff --git a/cpp/core/metadata/phase_encoding.h b/cpp/core/metadata/phase_encoding.h new file mode 100644 index 0000000000..4762c506f2 --- /dev/null +++ b/cpp/core/metadata/phase_encoding.h @@ -0,0 +1,145 @@ +/* Copyright (c) 2008-2025 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#pragma once + +#include +#include + +#include "app.h" +#include "axes.h" +#include "file/matrix.h" +#include "file/nifti_utils.h" +#include "file/ofstream.h" +#include "header.h" +#include "metadata/bids.h" +#include "types.h" + +namespace MR::Metadata::PhaseEncoding { + +extern const App::OptionGroup ImportOptions; +extern const App::OptionGroup SelectOptions; +extern const App::OptionGroup ExportOptions; + +using scheme_type = Eigen::MatrixXd; + +//! check that a phase-encoding table is valid +void check(const scheme_type &PE); + +//! check that the PE scheme matches the DWI data in \a header +void check(const scheme_type &PE, const Header &header); + +//! store the phase encoding matrix in a header +/*! this will store the phase encoding matrix into the + * Header::keyval() structure of \a header. + * - If the phase encoding direction and/or total readout + * time varies between volumes, then the information + * will be stored in field "pe_scheme"; if not, it + * will instead be stored in fields "PhaseEncodingDirection" + * and "TotalReadoutTime" + */ +void set_scheme(KeyValues &keyval, const scheme_type &PE); + +//! clear the phase encoding matrix from a key-value dictionary +/*! this will delete any trace of phase encoding information + * from the dictionary. + */ +void clear_scheme(KeyValues &keyval); + +//! parse the phase encoding matrix from a key-value dictionary +/*! extract the phase encoding matrix stored in \a the key-value dictionary + * if one is present. The key-value dictionary is not in all use cases + * the "keyval" member of the Header class. + */ +Eigen::MatrixXd parse_scheme(const KeyValues &, const Header &); + +//! get a phase encoding matrix +/*! get a valid phase-encoding matrix, either from files specified at + * the command-line that exclusively provide phase encoding information + * (ie. NOT from .json; that is handled elsewhere), + * or from the contents of the image header. + */ +Eigen::MatrixXd get_scheme(const Header &); + +//! Convert a phase-encoding scheme into the EDDY config / indices format +void scheme2eddy(const scheme_type &PE, Eigen::MatrixXd &config, Eigen::Array &indices); + +//! Convert phase-encoding infor from the EDDY config / indices format into a standard scheme +scheme_type eddy2scheme(const Eigen::MatrixXd &, const Eigen::Array &); + +//! Modifies a phase encoding scheme if being imported alongside a non-RAS image +// and internal header realignment is performed +void transform_for_image_load(KeyValues &keyval, const Header &H); +scheme_type transform_for_image_load(const scheme_type &pe_scheme, const Header &H); + +//! Modifies a phase encoding scheme if being exported alongside a NIfTI image +void transform_for_nifti_write(KeyValues &keyval, const Header &H); +scheme_type transform_for_nifti_write(const scheme_type &pe_scheme, const Header &H); + +namespace { +void __save(const scheme_type &PE, const std::string &path) { + File::OFStream out(path); + for (ssize_t row = 0; row != PE.rows(); ++row) { + // Write phase-encode direction as integers; other information as floating-point + out << PE.template block<1, 3>(row, 0).template cast(); + if (PE.cols() > 3) + out << " " << PE.block(row, 3, 1, PE.cols() - 3); + out << "\n"; + } +} +} // namespace + +//! Save a phase-encoding scheme to file +// Note that because the output table requires permutation / sign flipping +// only if the output target image is a NIfTI, the output file name must have +// already been set +template void save(const scheme_type &PE, const HeaderType &header, const std::string &path) { + try { + check(PE, header); + } catch (Exception &e) { + throw Exception(e, "Cannot export phase-encoding table to file \"" + path + "\""); + } + + if (Path::has_suffix(header.name(), {".mgh", ".mgz", ".nii", ".nii.gz", ".img"})) { + __save(transform_for_nifti_write(PE, header), path); + } else { + __save(PE, path); + } +} + +//! Save a phase-encoding scheme to EDDY format config / index files +template +void save_eddy(const scheme_type &PE, + const HeaderType &header, + const std::string &config_path, + const std::string &index_path) { + Eigen::MatrixXd config; + Eigen::Array indices; + scheme2eddy(transform_for_nifti_write(PE, header), config, indices); + File::Matrix::save_matrix(config, config_path, KeyValues(), false); + File::Matrix::save_vector(indices, index_path, KeyValues(), false); +} + +//! Save the phase-encoding scheme from a header to file depending on command-line options +void export_commandline(const Header &); + +//! Load a phase-encoding scheme from a matrix text file +scheme_type load(const std::string &path, const Header &header); + +//! Load a phase-encoding scheme from an EDDY-format config / indices file pair +scheme_type load_eddy(const std::string &config_path, const std::string &index_path, const Header &header); + +} // namespace MR::Metadata::PhaseEncoding diff --git a/cpp/core/metadata/slice_encoding.cpp b/cpp/core/metadata/slice_encoding.cpp new file mode 100644 index 0000000000..8836b1f5b2 --- /dev/null +++ b/cpp/core/metadata/slice_encoding.cpp @@ -0,0 +1,155 @@ +/* Copyright (c) 2008-2025 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#include "metadata/slice_encoding.h" + +#include "axes.h" +#include "file/nifti_utils.h" +#include "header.h" +#include "metadata/bids.h" + +namespace MR::Metadata::SliceEncoding { + +void transform_for_image_load(KeyValues &keyval, const Header &header) { + // If there's any slice encoding direction information present in the + // header, that's also necessary to update here + auto slice_encoding_it = keyval.find("SliceEncodingDirection"); + auto slice_timing_it = keyval.find("SliceTiming"); + if (!(slice_encoding_it == keyval.end() && slice_timing_it == keyval.end())) { + if (header.realignment().is_identity()) { + INFO("No transformation of slice encoding direction for load of image \"" + header.name() + "\" required"); + return; + } + Metadata::BIDS::axis_vector_type orig_dir = Metadata::BIDS::axis_vector_type({0, 0, 1}); + if (slice_encoding_it != keyval.end()) { + try { + orig_dir = Metadata::BIDS::axisid2vector(slice_encoding_it->second); + } catch (Exception &e) { + // clang-format off + INFO("Unable to conform slice encoding direction \"" + slice_encoding_it->second + "\"" + " to image realignment for image \"" + header.name() + "\";" + " erasing"); + // clang-format on + clear(keyval); + return; + } + } + const Metadata::BIDS::axis_vector_type new_dir = header.realignment().applied_transform() * orig_dir; + if (slice_encoding_it != keyval.end()) { + slice_encoding_it->second = Metadata::BIDS::vector2axisid(new_dir); + // clang-format off + INFO("Slice encoding direction has been modified" + " to conform to MRtrix3 internal header transform realignment" + " of image \"" + header.name() + "\""); + // clang-format on + } else if ((new_dir * -1).dot(orig_dir) == 1) { + auto slice_timing = parse_floats(slice_timing_it->second); + std::reverse(slice_timing.begin(), slice_timing.end()); + slice_timing_it->second = join(slice_timing, ","); + // clang-format off + INFO("Slice timing vector reversed" + " to conform to MRtrix3 internal transform realignment" + " of image \"" + header.name() + "\""); + // clang-format on + } else { + keyval["SliceEncodingDirection"] = Metadata::BIDS::vector2axisid(new_dir); + // clang-format off + WARN("Slice encoding direction of image \"" + header.name() + "\"" + " inferred to be \"k\"" + " in order to preserve interpretation of existing \"SliceTiming\" field" + " after MRtrix3 internal transform realignment"); + // clang-format on + } + } +} + +void transform_for_nifti_write(KeyValues &keyval, const Header &H) { + auto slice_encoding_it = keyval.find("SliceEncodingDirection"); + auto slice_timing_it = keyval.find("SliceTiming"); + if (slice_encoding_it == keyval.end() && slice_timing_it == keyval.end()) + return; + + const Axes::Shuffle shuffle = File::NIfTI::axes_on_write(H); + if (shuffle.is_identity()) { + INFO("No need to transform slice encoding information for NIfTI image write:" + " image is already RAS"); + return; + } + + const Metadata::BIDS::axis_vector_type // + orig_dir(slice_encoding_it == keyval.end() // + ? Metadata::BIDS::axis_vector_type({0, 0, 1}) // + : Metadata::BIDS::axisid2vector(slice_encoding_it->second)); // + Metadata::BIDS::axis_vector_type new_dir; + for (size_t axis = 0; axis != 3; ++axis) + new_dir[axis] = shuffle.flips[axis] ? -orig_dir[shuffle.permutations[axis]] : orig_dir[shuffle.permutations[axis]]; + + if (slice_encoding_it != keyval.end()) { + slice_encoding_it->second = Metadata::BIDS::vector2axisid(new_dir); + INFO("Slice encoding direction modified according to output NIfTI strides"); + } else if ((new_dir * -1).dot(orig_dir) == 1) { + auto slice_timing = parse_floats(slice_timing_it->second); + std::reverse(slice_timing.begin(), slice_timing.end()); + slice_timing_it->second = join(slice_timing, ","); + INFO("Slice timing vector reversed to conform to output NIfTI strides"); + } else { + keyval["SliceEncodingDirection"] = Metadata::BIDS::vector2axisid(new_dir); + WARN("Slice encoding direction added to metadata" + " in order to preserve interpretation of existing \"SliceTiming\" field" + " following output NIfTI strides"); + } +} + +std::string resolve_slice_timing(const std::string &one, const std::string &two) { + if (one == "variable" || two == "variable") + return "variable"; + std::vector one_split = split(one, ","); + std::vector two_split = split(two, ","); + if (one_split.size() != two_split.size()) { + DEBUG("Slice timing vectors of inequal length"); + return "invalid"; + } + // Siemens CSA reports with 2.5ms precision = 0.0025s + // Allow slice times to vary by 1.5x this amount, but no more + for (size_t i = 0; i != one_split.size(); ++i) { + default_type f_one, f_two; + try { + f_one = to(one_split[i]); + f_two = to(two_split[i]); + } catch (Exception &e) { + DEBUG("Error converting slice timing vector to floating-point"); + return "invalid"; + } + const default_type diff = abs(f_two - f_one); + if (diff > 0.00375) { + DEBUG("Supra-threshold difference of " + str(diff) + "s in slice times"); + return "variable"; + } + } + return one; +} + +void clear(KeyValues &keyval) { + auto erase = [](KeyValues &keyval, const std::string &s) { + auto it = keyval.find(s); + if (it != keyval.end()) + keyval.erase(it); + }; + erase(keyval, "SliceEncodingDirection"); + erase(keyval, "SliceTiming"); +} + +} // namespace MR::Metadata::SliceEncoding diff --git a/cpp/core/metadata/slice_encoding.h b/cpp/core/metadata/slice_encoding.h new file mode 100644 index 0000000000..462a55e399 --- /dev/null +++ b/cpp/core/metadata/slice_encoding.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2008-2025 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + * + * Covered Software is provided under this License on an "as is" + * basis, without warranty of any kind, either expressed, implied, or + * statutory, including, without limitation, warranties that the + * Covered Software is free of defects, merchantable, fit for a + * particular purpose or non-infringing. + * See the Mozilla Public License v. 2.0 for more details. + * + * For more details, see http://www.mrtrix.org/. + */ + +#pragma once + +#include + +#include "types.h" + +namespace MR { +class Header; +} // namespace MR + +namespace MR::Metadata::SliceEncoding { + +void transform_for_image_load(KeyValues &keyval, const Header &H); + +void transform_for_nifti_write(KeyValues &keyval, const Header &H); + +std::string resolve_slice_timing(const std::string &one, const std::string &two); + +void clear(KeyValues &); + +} // namespace MR::Metadata::SliceEncoding diff --git a/cpp/core/phase_encoding.cpp b/cpp/core/phase_encoding.cpp deleted file mode 100644 index ac7f7dfec8..0000000000 --- a/cpp/core/phase_encoding.cpp +++ /dev/null @@ -1,153 +0,0 @@ -/* Copyright (c) 2008-2025 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - * - * Covered Software is provided under this License on an "as is" - * basis, without warranty of any kind, either expressed, implied, or - * statutory, including, without limitation, warranties that the - * Covered Software is free of defects, merchantable, fit for a - * particular purpose or non-infringing. - * See the Mozilla Public License v. 2.0 for more details. - * - * For more details, see http://www.mrtrix.org/. - */ - -#include "phase_encoding.h" - -namespace MR::PhaseEncoding { - -// clang-format off -using namespace App; -const OptionGroup ImportOptions = - OptionGroup("Options for importing phase-encode tables") - + Option("import_pe_table", "import a phase-encoding table from file") - + Argument("file").type_file_in() - + Option("import_pe_eddy", "import phase-encoding information from an EDDY-style config / index file pair") - + Argument("config").type_file_in() - + Argument("indices").type_file_in(); - -const OptionGroup SelectOptions = - OptionGroup("Options for selecting volumes based on phase-encoding") - + Option("pe", - "select volumes with a particular phase encoding;" - " this can be three comma-separated values" - " (for i,j,k components of vector direction)" - " or four (direction & total readout time)") - + Argument("desc").type_sequence_float(); - -const OptionGroup ExportOptions = - OptionGroup("Options for exporting phase-encode tables") - + Option("export_pe_table", "export phase-encoding table to file") - + Argument("file").type_file_out() - + Option("export_pe_eddy", "export phase-encoding information to an EDDY-style config / index file pair") - + Argument("config").type_file_out() - + Argument("indices").type_file_out(); -// clang-format on - -void clear_scheme(Header &header) { - auto erase = [&](const std::string &s) { - auto it = header.keyval().find(s); - if (it != header.keyval().end()) - header.keyval().erase(it); - }; - erase("pe_scheme"); - erase("PhaseEncodingDirection"); - erase("TotalReadoutTime"); -} - -Eigen::MatrixXd parse_scheme(const Header &header) { - Eigen::MatrixXd PE; - const auto it = header.keyval().find("pe_scheme"); - if (it != header.keyval().end()) { - try { - PE = MR::parse_matrix(it->second); - } catch (Exception &e) { - throw Exception(e, "malformed PE scheme in image \"" + header.name() + "\""); - } - if (ssize_t(PE.rows()) != ((header.ndim() > 3) ? header.size(3) : 1)) - throw Exception("malformed PE scheme in image \"" + header.name() + "\":" + // - " number of rows does not equal number of volumes"); - } else { - const auto it_dir = header.keyval().find("PhaseEncodingDirection"); - if (it_dir != header.keyval().end()) { - const auto it_time = header.keyval().find("TotalReadoutTime"); - const size_t cols = it_time == header.keyval().end() ? 3 : 4; - Eigen::Matrix row(cols); - row.head(3) = Axes::id2dir(it_dir->second); - if (it_time != header.keyval().end()) - row[3] = to(it_time->second); - PE.resize((header.ndim() > 3) ? header.size(3) : 1, cols); - PE.rowwise() = row.transpose(); - } - } - return PE; -} - -Eigen::MatrixXd get_scheme(const Header &header) { - DEBUG("searching for suitable phase encoding data..."); - using namespace App; - Eigen::MatrixXd result; - - try { - const auto opt_table = get_options("import_pe_table"); - if (!opt_table.empty()) - result = load(opt_table[0][0], header); - const auto opt_eddy = get_options("import_pe_eddy"); - if (!opt_eddy.empty()) { - if (!opt_table.empty()) - throw Exception("Phase encoding table can be provided" - " using either -import_pe_table or -import_pe_eddy option," - " but NOT both"); - result = load_eddy(opt_eddy[0][0], opt_eddy[0][1], header); - } - if (opt_table.empty() && opt_eddy.empty()) - result = parse_scheme(header); - } catch (Exception &e) { - throw Exception(e, "error importing phase encoding table for image \"" + header.name() + "\""); - } - - if (!result.rows()) - return result; - - if (result.cols() < 3) - throw Exception("unexpected phase encoding table matrix dimensions"); - - INFO("found " + str(result.rows()) + "x" + str(result.cols()) + " phase encoding table"); - - return result; -} - -Eigen::MatrixXd eddy2scheme(const Eigen::MatrixXd &config, const Eigen::Array &indices) { - if (config.cols() != 4) - throw Exception("Expected 4 columns in EDDY-format phase-encoding config file"); - Eigen::MatrixXd result(indices.size(), 4); - for (ssize_t row = 0; row != indices.size(); ++row) { - if (indices[row] > config.rows()) - throw Exception("Malformed EDDY-style phase-encoding information:" - " index exceeds number of config entries"); - result.row(row) = config.row(indices[row] - 1); - } - return result; -} - -void export_commandline(const Header &header) { - auto check = [&](const Eigen::MatrixXd &m) -> const Eigen::MatrixXd & { - if (!m.rows()) - throw Exception("no phase-encoding information found within image \"" + header.name() + "\""); - return m; - }; - - auto scheme = parse_scheme(header); - - auto opt = get_options("export_pe_table"); - if (!opt.empty()) - save(check(scheme), header, opt[0][0]); - - opt = get_options("export_pe_eddy"); - if (!opt.empty()) - save_eddy(check(scheme), header, opt[0][0], opt[0][1]); -} - -} // namespace MR::PhaseEncoding diff --git a/cpp/core/phase_encoding.h b/cpp/core/phase_encoding.h deleted file mode 100644 index eb12d79591..0000000000 --- a/cpp/core/phase_encoding.h +++ /dev/null @@ -1,281 +0,0 @@ -/* Copyright (c) 2008-2025 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. - * - * Covered Software is provided under this License on an "as is" - * basis, without warranty of any kind, either expressed, implied, or - * statutory, including, without limitation, warranties that the - * Covered Software is free of defects, merchantable, fit for a - * particular purpose or non-infringing. - * See the Mozilla Public License v. 2.0 for more details. - * - * For more details, see http://www.mrtrix.org/. - */ - -#pragma once - -#include -#include - -#include "app.h" -#include "axes.h" -#include "file/matrix.h" -#include "file/nifti_utils.h" -#include "file/ofstream.h" -#include "header.h" - -namespace MR::PhaseEncoding { - -extern const App::OptionGroup ImportOptions; -extern const App::OptionGroup SelectOptions; -extern const App::OptionGroup ExportOptions; - -//! check that a phase-encoding table is valid -template void check(const MatrixType &PE) { - if (!PE.rows()) - throw Exception("No valid phase encoding table found"); - - if (PE.cols() < 3) - throw Exception("Phase-encoding matrix must have at least 3 columns"); - - for (ssize_t row = 0; row != PE.rows(); ++row) { - for (ssize_t axis = 0; axis != 3; ++axis) { - if (std::round(PE(row, axis)) != PE(row, axis)) - throw Exception("Phase-encoding matrix contains non-integral axis designation"); - } - } -} - -//! check that the PE scheme matches the DWI data in \a header -template void check(const MatrixType &PE, const HeaderType &header) { - check(PE); - const ssize_t num_volumes = (header.ndim() > 3) ? header.size(3) : 1; - if (num_volumes != PE.rows()) - throw Exception("Number of volumes in image \"" + header.name() + "\" (" + str(num_volumes) + - ") does not match that in phase encoding table (" + str(PE.rows()) + ")"); -} - -//! store the phase encoding matrix in a header -/*! this will store the phase encoding matrix into the - * Header::keyval() structure of \a header. - * - If the phase encoding direction and/or total readout - * time varies between volumes, then the information - * will be stored in field "pe_scheme"; if not, it - * will instead be stored in fields "PhaseEncodingDirection" - * and "TotalReadoutTime" - */ -template void set_scheme(Header &header, const MatrixType &PE) { - auto erase = [&](const std::string &s) { - auto it = header.keyval().find(s); - if (it != header.keyval().end()) - header.keyval().erase(it); - }; - if (!PE.rows()) { - erase("pe_scheme"); - erase("PhaseEncodingDirection"); - erase("TotalReadoutTime"); - return; - } - check(PE, header); - std::string pe_scheme; - std::string first_line; - bool variation = false; - for (ssize_t row = 0; row < PE.rows(); ++row) { - std::string line = str(PE(row, 0)); - for (ssize_t col = 1; col < PE.cols(); ++col) - line += "," + str(PE(row, col), 3); - add_line(pe_scheme, line); - if (first_line.empty()) - first_line = line; - else if (line != first_line) - variation = true; - } - if (variation) { - header.keyval()["pe_scheme"] = pe_scheme; - erase("PhaseEncodingDirection"); - erase("TotalReadoutTime"); - } else { - erase("pe_scheme"); - const Eigen::Vector3d dir{PE(0, 0), PE(0, 1), PE(0, 2)}; - header.keyval()["PhaseEncodingDirection"] = Axes::dir2id(dir); - if (PE.cols() >= 4) - header.keyval()["TotalReadoutTime"] = str(PE(0, 3), 3); - else - erase("TotalReadoutTime"); - } -} - -//! clear the phase encoding matrix from a header -/*! this will delete any trace of phase encoding information - * from the Header::keyval() structure of \a header. - */ -void clear_scheme(Header &header); - -//! parse the phase encoding matrix from a header -/*! extract the phase encoding matrix stored in the \a header if one - * is present. This is expected to be stored in the Header::keyval() - * structure, under the key 'pe_scheme'. Alternatively, if the phase - * encoding direction and bandwidth is fixed for all volumes in the - * series, this information may be stored using the keys - * 'PhaseEncodingDirection' and 'TotalReadoutTime'. - */ -Eigen::MatrixXd parse_scheme(const Header &); - -//! get a phase encoding matrix -/*! get a valid phase-encoding matrix, either from files specified at - * the command-line, or from the contents of the image header. - */ -Eigen::MatrixXd get_scheme(const Header &); - -//! Convert a phase-encoding scheme into the EDDY config / indices format -template -void scheme2eddy(const MatrixType &PE, Eigen::MatrixXd &config, Eigen::Array &indices) { - try { - check(PE); - } catch (Exception &e) { - throw Exception(e, "Cannot convert phase-encoding scheme to eddy format"); - } - if (PE.cols() != 4) - throw Exception("Phase-encoding matrix requires 4 columns to convert to eddy format"); - config.resize(0, 0); - indices = Eigen::Array::Constant(PE.rows(), PE.rows()); - for (ssize_t PE_row = 0; PE_row != PE.rows(); ++PE_row) { - for (ssize_t config_row = 0; config_row != config.rows(); ++config_row) { - bool dir_match = PE.template block<1, 3>(PE_row, 0).isApprox(config.block<1, 3>(config_row, 0)); - bool time_match = abs(PE(PE_row, 3) - config(config_row, 3)) < 1e-3; - if (dir_match && time_match) { - // FSL-style index file indexes from 1 - indices[PE_row] = config_row + 1; - break; - } - } - if (indices[PE_row] == PE.rows()) { - // No corresponding match found in config matrix; create a new entry - config.conservativeResize(config.rows() + 1, 4); - config.row(config.rows() - 1) = PE.row(PE_row); - indices[PE_row] = config.rows(); - } - } -} - -//! Convert phase-encoding infor from the EDDY config / indices format into a standard scheme -Eigen::MatrixXd eddy2scheme(const Eigen::MatrixXd &, const Eigen::Array &); - -//! Modifies a phase encoding scheme if being imported alongside a non-RAS image -template -Eigen::MatrixXd transform_for_image_load(const MatrixType &pe_scheme, const HeaderType &H) { - std::array perm; - std::array flip; - H.realignment(perm, flip); - if (perm[0] == 0 && perm[1] == 1 && perm[2] == 2 && !flip[0] && !flip[1] && !flip[2]) { - INFO("No transformation of external phase encoding data required to accompany image \"" + H.name() + "\""); - return pe_scheme; - } - Eigen::MatrixXd result(pe_scheme.rows(), pe_scheme.cols()); - for (ssize_t row = 0; row != pe_scheme.rows(); ++row) { - Eigen::VectorXd new_line = pe_scheme.row(row); - for (ssize_t axis = 0; axis != 3; ++axis) { - new_line[axis] = pe_scheme(row, perm[axis]); - if (new_line[axis] && flip[perm[axis]]) - new_line[axis] = -new_line[axis]; - } - result.row(row) = new_line; - } - INFO("External phase encoding data transformed to match RAS realignment of image \"" + H.name() + "\""); - return result; -} - -//! Modifies a phase encoding scheme if being exported alongside a NIfTI image -template -Eigen::MatrixXd transform_for_nifti_write(const MatrixType &pe_scheme, const HeaderType &H) { - std::vector order; - std::array flip; - File::NIfTI::axes_on_write(H, order, flip); - if (order[0] == 0 && order[1] == 1 && order[2] == 2 && !flip[0] && !flip[1] && !flip[2]) { - INFO("No transformation of phase encoding data required for export to file"); - return pe_scheme; - } - Eigen::Matrix result(pe_scheme.rows(), pe_scheme.cols()); - for (ssize_t row = 0; row != pe_scheme.rows(); ++row) { - Eigen::VectorXd new_line = pe_scheme.row(row); - for (ssize_t axis = 0; axis != 3; ++axis) - new_line[axis] = - pe_scheme(row, order[axis]) && flip[axis] ? -pe_scheme(row, order[axis]) : pe_scheme(row, order[axis]); - result.row(row) = new_line; - } - INFO("Phase encoding data transformed to match NIfTI / MGH image export prior to writing to file"); - return result; -} - -namespace { -template void __save(const MatrixType &PE, const std::string &path) { - File::OFStream out(path); - for (ssize_t row = 0; row != PE.rows(); ++row) { - // Write phase-encode direction as integers; other information as floating-point - out << PE.template block<1, 3>(row, 0).template cast(); - if (PE.cols() > 3) - out << " " << PE.block(row, 3, 1, PE.cols() - 3); - out << "\n"; - } -} -} // namespace - -//! Save a phase-encoding scheme to file -// Note that because the output table requires permutation / sign flipping -// only if the output target image is a NIfTI, the output file name must have -// already been set -template -void save(const MatrixType &PE, const HeaderType &header, const std::string &path) { - try { - check(PE, header); - } catch (Exception &e) { - throw Exception(e, "Cannot export phase-encoding table to file \"" + path + "\""); - } - - if (Path::has_suffix(header.name(), {".mgh", ".mgz", ".nii", ".nii.gz", ".img"})) { - __save(transform_for_nifti_write(PE, header), path); - } else { - __save(PE, path); - } -} - -//! Save a phase-encoding scheme to EDDY format config / index files -template -void save_eddy(const MatrixType &PE, - const HeaderType &header, - const std::string &config_path, - const std::string &index_path) { - Eigen::MatrixXd config; - Eigen::Array indices; - scheme2eddy(transform_for_nifti_write(PE, header), config, indices); - File::Matrix::save_matrix(config, config_path, KeyValues(), false); - File::Matrix::save_vector(indices, index_path, KeyValues(), false); -} - -//! Save the phase-encoding scheme from a header to file depending on command-line options -void export_commandline(const Header &); - -//! Load a phase-encoding scheme from a matrix text file -template Eigen::MatrixXd load(const std::string &path, const HeaderType &header) { - const Eigen::MatrixXd PE = File::Matrix::load_matrix(path); - check(PE, header); - // As with JSON import, need to query the header to discover if the - // strides / transform were modified on image load to make the image - // data appear approximately axial, in which case we need to apply the - // same transforms to the phase encoding data on load - return transform_for_image_load(PE, header); -} - -//! Load a phase-encoding scheme from an EDDY-format config / indices file pair -template -Eigen::MatrixXd load_eddy(const std::string &config_path, const std::string &index_path, const HeaderType &header) { - const Eigen::MatrixXd config = File::Matrix::load_matrix(config_path); - const Eigen::Array indices = File::Matrix::load_vector(index_path); - const Eigen::MatrixXd PE = eddy2scheme(config, indices); - check(PE, header); - return transform_for_image_load(PE, header); -} - -} // namespace MR::PhaseEncoding diff --git a/cpp/core/surface/algo/image2mesh.h b/cpp/core/surface/algo/image2mesh.h index f3c9087d44..a074e20a06 100644 --- a/cpp/core/surface/algo/image2mesh.h +++ b/cpp/core/surface/algo/image2mesh.h @@ -102,6 +102,7 @@ template void image2mesh_blocky(const ImageType &input_image, const auto existing = vox2vertindex.find(voxels[in_vertex]); if (existing == vox2vertindex.end()) { triangle_vertices[out_vertex] = vertices.size(); + vox2vertindex.insert(std::make_pair(voxels[in_vertex], vertices.size())); Eigen::Vector3d pos_voxelspace(default_type(voxels[in_vertex][0]) - 0.5, default_type(voxels[in_vertex][1]) - 0.5, default_type(voxels[in_vertex][2]) - 0.5); diff --git a/cpp/core/surface/filter/vertex_transform.cpp b/cpp/core/surface/filter/vertex_transform.cpp index ab80f71f33..4317156116 100644 --- a/cpp/core/surface/filter/vertex_transform.cpp +++ b/cpp/core/surface/filter/vertex_transform.cpp @@ -17,6 +17,7 @@ #include "surface/filter/vertex_transform.h" #include "file/nifti_utils.h" +#include "axes.h" #include "exception.h" namespace MR::Surface::Filter { @@ -82,14 +83,13 @@ void VertexTransform::operator()(const Mesh &in, Mesh &out) const { break; case transform_t::FS2REAL: - std::vector axes(3); - auto M = File::NIfTI::adjust_transform(header, axes); + auto M = header.realignment().orig_transform(); + const Axes::permutations_type &axes = header.realignment().permutations(); Eigen::Vector3d cras(3, 1); for (size_t i = 0; i < 3; i++) { cras[i] = M(i, 3); - for (size_t j = 0; j < 3; j++) { + for (size_t j = 0; j < 3; j++) cras[i] += 0.5 * header.size(axes[j]) * header.spacing(axes[j]) * M(i, j); - } } for (size_t i = 0; i != V; ++i) { vertices.push_back(in.vert(i) + cras); diff --git a/docs/reference/commands/5ttgen.rst b/docs/reference/commands/5ttgen.rst index b7834d795b..075581b127 100644 --- a/docs/reference/commands/5ttgen.rst +++ b/docs/reference/commands/5ttgen.rst @@ -513,7 +513,7 @@ Options - **-thalami choice** Select method to be used for thalamic segmentation; options are: nuclei,first,aseg -- **-white_stem** Classify the brainstem as white matter +- **-white_stem** Classify the brainstem as white matter; streamlines will not be permitted to terminate within this region Options common to all 5ttgen algorithms ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/reference/commands/afdconnectivity.rst b/docs/reference/commands/afdconnectivity.rst index 2d627cd637..086b0b0a6e 100644 --- a/docs/reference/commands/afdconnectivity.rst +++ b/docs/reference/commands/afdconnectivity.rst @@ -31,7 +31,7 @@ For valid comparisons of AFD connectivity across scans, images MUST be intensity Note that the sum of the AFD is normalised by streamline length to account for subject differences in fibre bundle length. This normalisation results in a measure that is more related to the cross-sectional volume of the tract (and therefore 'connectivity'). Note that SIFT-ed tract count is a superior measure because it is unaffected by tangential yet unrelated fibres. However, AFD connectivity may be used as a substitute when Anatomically Constrained Tractography is not possible due to uncorrectable EPI distortions, and SIFT may therefore not be as effective. -Longer discussion regarding this command can additionally be found at: https://mrtrix.readthedocs.io/en/3.0.4/concepts/afd_connectivity.html (as well as in the relevant reference). +Longer discussion regarding this command can additionally be found at: https://mrtrix.readthedocs.io/en/3.0.5/concepts/afd_connectivity.html (as well as in the relevant reference). Options ------- diff --git a/docs/reference/commands/amp2sh.rst b/docs/reference/commands/amp2sh.rst index 704d111637..d23d614979 100644 --- a/docs/reference/commands/amp2sh.rst +++ b/docs/reference/commands/amp2sh.rst @@ -26,7 +26,7 @@ The spherical harmonic decomposition is calculated by least-squares linear fitti The directions can be defined either as a DW gradient scheme (for example to compute the SH representation of the DW signal), a set of [az el] pairs as output by the dirgen command, or a set of [ x y z ] directions in Cartesian coordinates. The DW gradient scheme or direction set can be supplied within the input image header or using the -gradient or -directions option. Note that if a direction set and DW gradient scheme can be found, the direction set will be used by default. The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Options ------- diff --git a/docs/reference/commands/dwi2fod.rst b/docs/reference/commands/dwi2fod.rst index e917ad9b99..297f1f6e71 100644 --- a/docs/reference/commands/dwi2fod.rst +++ b/docs/reference/commands/dwi2fod.rst @@ -23,7 +23,7 @@ Description ----------- The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Example usages -------------- diff --git a/docs/reference/commands/dwi2mask.rst b/docs/reference/commands/dwi2mask.rst index a4ea9e84a9..d920a9727d 100644 --- a/docs/reference/commands/dwi2mask.rst +++ b/docs/reference/commands/dwi2mask.rst @@ -23,7 +23,7 @@ Description This script serves as an interface for many different algorithms that generate a binary mask from DWI data in different ways. Each algorithm available has its own help page, including necessary references; e.g. to see the help page of the "fslbet" algorithm, type "dwi2mask fslbet". More information on mask derivation from DWI data can be found at the following link: -https://mrtrix.readthedocs.io/en/3.0.4/dwi_preprocessing/masking.html +https://mrtrix.readthedocs.io/en/3.0.5/dwi_preprocessing/masking.html Options ------- diff --git a/docs/reference/commands/dwi2response.rst b/docs/reference/commands/dwi2response.rst index 6d45fecc30..1ee9ded94e 100644 --- a/docs/reference/commands/dwi2response.rst +++ b/docs/reference/commands/dwi2response.rst @@ -25,10 +25,10 @@ dwi2response offers different algorithms for performing various types of respons Each algorithm available has its own help page, including necessary references; e.g. to see the help page of the "fa" algorithm, type "dwi2response fa". More information on response function estimation for spherical deconvolution can be found at the following link: -https://mrtrix.readthedocs.io/en/3.0.4/constrained_spherical_deconvolution/response_function_estimation.html +https://mrtrix.readthedocs.io/en/3.0.5/constrained_spherical_deconvolution/response_function_estimation.html Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to derive an initial voxel exclusion mask. More information on mask derivation from DWI data can be found at: -https://mrtrix.readthedocs.io/en/3.0.4/dwi_preprocessing/masking.html +https://mrtrix.readthedocs.io/en/3.0.5/dwi_preprocessing/masking.html Options ------- diff --git a/docs/reference/commands/dwibiascorrect.rst b/docs/reference/commands/dwibiascorrect.rst index badacebdf9..d9618a7dea 100644 --- a/docs/reference/commands/dwibiascorrect.rst +++ b/docs/reference/commands/dwibiascorrect.rst @@ -21,7 +21,7 @@ Description ----------- Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to derive a mask that will be passed to the relevant bias field estimation command. More information on mask derivation from DWI data can be found at the following link: -https://mrtrix.readthedocs.io/en/3.0.4/dwi_preprocessing/masking.html +https://mrtrix.readthedocs.io/en/3.0.5/dwi_preprocessing/masking.html Options ------- diff --git a/docs/reference/commands/dwifslpreproc.rst b/docs/reference/commands/dwifslpreproc.rst index bc047ea0b0..cafaaabf98 100644 --- a/docs/reference/commands/dwifslpreproc.rst +++ b/docs/reference/commands/dwifslpreproc.rst @@ -24,10 +24,10 @@ Description This script is intended to provide convenience of use of the FSL software tools topup and eddy for performing DWI pre-processing, by encapsulating some of the surrounding image data and metadata processing steps. It is intended to simply these processing steps for most commonly-used DWI acquisition strategies, whilst also providing support for some more exotic acquisitions. The "example usage" section demonstrates the ways in which the script can be used based on the (compulsory) -rpe_* command-line options. More information on use of the dwifslpreproc command can be found at the following link: -https://mrtrix.readthedocs.io/en/3.0.4/dwi_preprocessing/dwifslpreproc.html +https://mrtrix.readthedocs.io/en/3.0.5/dwi_preprocessing/dwifslpreproc.html Note that the MRtrix3 command dwi2mask will automatically be called to derive a processing mask for the FSL command "eddy", which determines which voxels contribute to the estimation of geometric distortion parameters and possibly also the classification of outlier slices. If FSL command "topup" is used to estimate a susceptibility field, then dwi2mask will be executed on the resuts of running FSL command "applytopup" to the input DWIs; otherwise it will be executed directly on the input DWIs. Alternatively, the -eddy_mask option can be specified in order to manually provide such a processing mask. More information on mask derivation from DWI data can be found at: -https://mrtrix.readthedocs.io/en/3.0.4/dwi_preprocessing/masking.html +https://mrtrix.readthedocs.io/en/3.0.5/dwi_preprocessing/masking.html The "-topup_options" and "-eddy_options" command-line options allow the user to pass desired command-line options directly to the FSL commands topup and eddy. The available options for those commands may vary between versions of FSL; users can interrogate such by querying the help pages of the installed software, and/or the FSL online documentation: (topup) https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/topup/TopupUsersGuide ; diff --git a/docs/reference/commands/dwigradcheck.rst b/docs/reference/commands/dwigradcheck.rst index 7b8aea82da..d2ef3f73fe 100644 --- a/docs/reference/commands/dwigradcheck.rst +++ b/docs/reference/commands/dwigradcheck.rst @@ -23,7 +23,7 @@ Description Note that the corrected gradient table can be output using the -export_grad_{mrtrix,fsl} option. Note that if the -mask command-line option is not specified, the MRtrix3 command dwi2mask will automatically be called to derive a binary mask image to be used for streamline seeding and to constrain streamline propagation. More information on mask derivation from DWI data can be found at the following link: -https://mrtrix.readthedocs.io/en/3.0.4/dwi_preprocessing/masking.html +https://mrtrix.readthedocs.io/en/3.0.5/dwi_preprocessing/masking.html Options ------- diff --git a/docs/reference/commands/fixel2peaks.rst b/docs/reference/commands/fixel2peaks.rst index bd626309cf..380fe7548e 100644 --- a/docs/reference/commands/fixel2peaks.rst +++ b/docs/reference/commands/fixel2peaks.rst @@ -24,7 +24,7 @@ Description If a fixel data file is provided as input, then the 3-vectors in the output image will be scaled based on the data in that file. If the input is instead the fixel directory, or the index or directions file, then all output 3-vectors will possess unit norm. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixel2sh.rst b/docs/reference/commands/fixel2sh.rst index b2ee6ba62f..06b89fe214 100644 --- a/docs/reference/commands/fixel2sh.rst +++ b/docs/reference/commands/fixel2sh.rst @@ -24,10 +24,10 @@ Description This command generates spherical harmonic data from fixels that can be visualised using the ODF tool in MRview. The output ODF lobes are scaled according to the values in the input fixel image. The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixel2tsf.rst b/docs/reference/commands/fixel2tsf.rst index b8e9d14798..1723a25a77 100644 --- a/docs/reference/commands/fixel2tsf.rst +++ b/docs/reference/commands/fixel2tsf.rst @@ -25,7 +25,7 @@ Description This command is useful for visualising all brain fixels (e.g. the output from fixelcfestats) in 3D. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixel2voxel.rst b/docs/reference/commands/fixel2voxel.rst index 1b61405df3..f8a9ed99d2 100644 --- a/docs/reference/commands/fixel2voxel.rst +++ b/docs/reference/commands/fixel2voxel.rst @@ -37,7 +37,7 @@ Fixel data can be reduced to voxel data in a number of ways: The -weighted option deals with the case where there is some per-fixel metric of interest that you wish to collapse into a single scalar measure per voxel, but each fixel possesses a different volume, and you wish for those fixels with greater volume to have a greater influence on the calculation than fixels with lesser volume. For instance, when estimating a voxel-based measure of mean axon diameter from per-fixel mean axon diameters, a fixel's mean axon diameter should be weigthed by its relative volume within the voxel in the calculation of that voxel mean. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixelcfestats.rst b/docs/reference/commands/fixelcfestats.rst index 2425a6c3fe..44285f2036 100644 --- a/docs/reference/commands/fixelcfestats.rst +++ b/docs/reference/commands/fixelcfestats.rst @@ -32,7 +32,7 @@ Note that if the -mask option is used, the output fixel directory will still con In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experimental design. Hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixelconnectivity.rst b/docs/reference/commands/fixelconnectivity.rst index 48a284f902..3012671bdc 100644 --- a/docs/reference/commands/fixelconnectivity.rst +++ b/docs/reference/commands/fixelconnectivity.rst @@ -25,7 +25,7 @@ Description This command will generate a directory containing three images, which encodes the fixel-fixel connectivity matrix. Documentation regarding this format and how to use it will come in the future. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixelconvert.rst b/docs/reference/commands/fixelconvert.rst index 92f2fe0820..2c01afb669 100644 --- a/docs/reference/commands/fixelconvert.rst +++ b/docs/reference/commands/fixelconvert.rst @@ -22,7 +22,7 @@ Description ----------- Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Example usages -------------- diff --git a/docs/reference/commands/fixelcorrespondence.rst b/docs/reference/commands/fixelcorrespondence.rst index 884ff1965c..30aba7c81e 100644 --- a/docs/reference/commands/fixelcorrespondence.rst +++ b/docs/reference/commands/fixelcorrespondence.rst @@ -26,7 +26,7 @@ Description It is assumed that the subject image has already been spatially normalised and is aligned with the template. The output fixel image will have the same fixels (and directions) of the template. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixelcrop.rst b/docs/reference/commands/fixelcrop.rst index 317f77da10..7969a39ea5 100644 --- a/docs/reference/commands/fixelcrop.rst +++ b/docs/reference/commands/fixelcrop.rst @@ -25,7 +25,7 @@ Description The mask must be input as a fixel data file the same dimensions as the fixel data file(s) to be cropped. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixelfilter.rst b/docs/reference/commands/fixelfilter.rst index b764258940..bf01871b85 100644 --- a/docs/reference/commands/fixelfilter.rst +++ b/docs/reference/commands/fixelfilter.rst @@ -25,7 +25,7 @@ Description If the first input to the command is a specific fixel data file, then a filtered version of only that file will be generated by the command. Alternatively, if the input is the location of a fixel directory, then the command will create a duplicate of the fixel directory, and apply the specified filter operation to all fixel data files within the directory. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/fixelreorient.rst b/docs/reference/commands/fixelreorient.rst index c8d99b4143..950814f121 100644 --- a/docs/reference/commands/fixelreorient.rst +++ b/docs/reference/commands/fixelreorient.rst @@ -25,7 +25,7 @@ Description Reorientation is performed by transforming the vector representing the fixel direction with the Jacobian (local affine transform) computed at each voxel in the warp, then re-normalising the vector. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/for_each.rst b/docs/reference/commands/for_each.rst index 95f7900c49..d9dcbce432 100644 --- a/docs/reference/commands/for_each.rst +++ b/docs/reference/commands/for_each.rst @@ -25,7 +25,7 @@ Description This script greatly simplifies various forms of batch processing by enabling the execution of a command (or set of commands) independently for each of a set of inputs. More information on use of the for_each command can be found at the following link: -https://mrtrix.readthedocs.io/en/3.0.4/tips_and_tricks/batch_processing_with_foreach.html +https://mrtrix.readthedocs.io/en/3.0.5/tips_and_tricks/batch_processing_with_foreach.html The way that this batch processing capability is achieved is by providing basic text substitutions, which simplify the formation of valid command strings based on the unique components of the input strings on which the script is instructed to execute. This does however mean that the items to be passed as inputs to the for_each command (e.g. file / directory names) MUST NOT contain any instances of these substitution strings, as otherwise those paths will be corrupted during the course of the substitution. diff --git a/docs/reference/commands/labelconvert.rst b/docs/reference/commands/labelconvert.rst index 38a20d580f..08eb6bfba9 100644 --- a/docs/reference/commands/labelconvert.rst +++ b/docs/reference/commands/labelconvert.rst @@ -26,7 +26,7 @@ Description Typical usage is to convert a parcellation image provided by some other software, based on the lookup table provided by that software, to conform to a new lookup table, particularly one where the node indices increment from 1, in preparation for connectome construction; examples of such target lookup table files are provided in share//mrtrix3//labelconvert//, but can be created by the user to provide the desired node set // ordering // colours. A more thorough description of the operation and purpose of the labelconvert command can be found in the online documentation: |br| -https://mrtrix.readthedocs.io/en/3.0.4/quantitative_structural_connectivity/labelconvert_tutorial.html +https://mrtrix.readthedocs.io/en/3.0.5/quantitative_structural_connectivity/labelconvert_tutorial.html Example usages -------------- diff --git a/docs/reference/commands/peaks2fixel.rst b/docs/reference/commands/peaks2fixel.rst index 58a3274509..d1ecfd50c4 100644 --- a/docs/reference/commands/peaks2fixel.rst +++ b/docs/reference/commands/peaks2fixel.rst @@ -22,7 +22,7 @@ Description ----------- Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/sh2amp.rst b/docs/reference/commands/sh2amp.rst index 6e0ecaa141..570bdd94c7 100644 --- a/docs/reference/commands/sh2amp.rst +++ b/docs/reference/commands/sh2amp.rst @@ -35,7 +35,7 @@ If a full DW encoding is provided, the number of shells needs to match those fou If the input image contains multiple shells (its size along the 5th dimension is greater than one), the program will expect the direction set to contain multiple shells, which can only be provided as a full DW encodings (the last two options in the list above). The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Options ------- diff --git a/docs/reference/commands/sh2peaks.rst b/docs/reference/commands/sh2peaks.rst index 3123a9df7a..933eed90dd 100644 --- a/docs/reference/commands/sh2peaks.rst +++ b/docs/reference/commands/sh2peaks.rst @@ -26,7 +26,7 @@ Peaks of the spherical harmonic function in each voxel are located by commencing Within the output image, each successive triplet of volumes encodes the x, y & z components of a 3-vector; their directions in 3D space encode the orientation of the identified peaks, while the norm of each vector encodes the magnitude of the peaks. The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Options ------- diff --git a/docs/reference/commands/sh2power.rst b/docs/reference/commands/sh2power.rst index 1cf911c0ea..ea4cdcf4d9 100644 --- a/docs/reference/commands/sh2power.rst +++ b/docs/reference/commands/sh2power.rst @@ -24,7 +24,7 @@ Description This command computes the sum of squared SH coefficients, which equals the mean-squared amplitude of the spherical function it represents. The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Options ------- diff --git a/docs/reference/commands/sh2response.rst b/docs/reference/commands/sh2response.rst index e84c5f3fdd..49c90a81ab 100644 --- a/docs/reference/commands/sh2response.rst +++ b/docs/reference/commands/sh2response.rst @@ -24,7 +24,7 @@ Description ----------- The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Options ------- diff --git a/docs/reference/commands/shbasis.rst b/docs/reference/commands/shbasis.rst index fbb2e1ec34..08af6f7eb1 100644 --- a/docs/reference/commands/shbasis.rst +++ b/docs/reference/commands/shbasis.rst @@ -27,7 +27,7 @@ This command provides a mechanism for testing the basis used in storage of image Note that the "force_*" conversion choices should only be used in cases where this command has previously been unable to automatically determine the SH basis from the image data, but the user themselves are confident of the SH basis of the data. The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Options ------- diff --git a/docs/reference/commands/shconv.rst b/docs/reference/commands/shconv.rst index a70da08bf1..a8dfe4ef19 100644 --- a/docs/reference/commands/shconv.rst +++ b/docs/reference/commands/shconv.rst @@ -28,10 +28,10 @@ If multiple pairs of inputs are provided, their contributions will be summed int If the responses are multi-shell (with one line of coefficients per shell), the output will be a 5-dimensional image, with the SH coefficients of the signal in each shell stored at different indices along the 5th dimension. The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html The spherical harmonic coefficients are stored according to the conventions described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/concepts/spherical_harmonics.html +https://mrtrix.readthedocs.io/en/3.0.5/concepts/spherical_harmonics.html Options ------- diff --git a/docs/reference/commands/tckglobal.rst b/docs/reference/commands/tckglobal.rst index 25d3ef08c7..ffac4d3b79 100644 --- a/docs/reference/commands/tckglobal.rst +++ b/docs/reference/commands/tckglobal.rst @@ -25,7 +25,7 @@ Description This command will reconstruct the global white matter fibre tractogram that best explains the input DWI data, using a multi-tissue spherical convolution model. A more thorough description of the operation of global tractography in MRtrix3 can be found in the online documentation: |br| -https://mrtrix.readthedocs.io/en/3.0.4/quantitative_structural_connectivity/global_tractography.html +https://mrtrix.readthedocs.io/en/3.0.5/quantitative_structural_connectivity/global_tractography.html Example usages -------------- diff --git a/docs/reference/commands/voxel2fixel.rst b/docs/reference/commands/voxel2fixel.rst index f30030fa75..6392145aa5 100644 --- a/docs/reference/commands/voxel2fixel.rst +++ b/docs/reference/commands/voxel2fixel.rst @@ -26,7 +26,7 @@ Description This command is designed to enable CFE-based statistical analysis to be performed on voxel-wise measures. Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/reference/commands/warp2metric.rst b/docs/reference/commands/warp2metric.rst index 84baa5ab2c..0976846e9c 100644 --- a/docs/reference/commands/warp2metric.rst +++ b/docs/reference/commands/warp2metric.rst @@ -21,7 +21,7 @@ Description ----------- Fixel data are stored utilising the fixel directory format described in the main documentation, which can be found at the following link: |br| -https://mrtrix.readthedocs.io/en/3.0.4/fixel_based_analysis/fixel_directory_format.html +https://mrtrix.readthedocs.io/en/3.0.5/fixel_based_analysis/fixel_directory_format.html Options ------- diff --git a/docs/tips_and_tricks/dicom_handling.rst b/docs/tips_and_tricks/dicom_handling.rst index af9ff389b5..a9c7cee3d9 100644 --- a/docs/tips_and_tricks/dicom_handling.rst +++ b/docs/tips_and_tricks/dicom_handling.rst @@ -375,44 +375,71 @@ these are: - Explicit VR Big Endian (``1.2.840.10008.1.2.2``) -Any other transfer syntax will be flagged as unsupported, and *MRtrix3* will be -unable to read the data, providing an error message similar to this: +Any other transfer syntax will be flagged as unsupported. When this occurs, +whether or not the *MRtrix3* command can proceed will depend on whether that +command requires access to the underlying image data: + +- If *only header information* is required, then the command will be able to proceed, + albeit with a warning issued: .. code-block:: console - $ mrinfo DICOM - mrinfo: [done] scanning DICOM folder "DICOM" - mrinfo: [ERROR] unable to read DICOM images in "DICOM": - mrinfo: [ERROR] unsupported transfer syntax found in DICOM data - mrinfo: [ERROR] consider using third-party tools to convert your data to standard uncompressed encoding - mrinfo: [ERROR] See the MRtrix3 documentation on DICOM handling for details: - mrinfo: [ERROR] http://mrtrix.readthedocs.io/en/latest/tips_and_tricks/dicom_handling.html#error-unsupported-transfer-syntax - mrinfo: [ERROR] error opening image "DICOM" - -Thankfully, other tools exist that should be able to convert the data to a -format that *MRtrix3* (and other DICOM tools) will read. The `dcmtk -`__ DICOM toolkit in particular provides -the ``dcmdjpeg`` command to decompress data stored using JPEG transfer syntax. -On Linux, a directory of such files can be decompressed as follows (amend the -various ``PATH`` as required for your system): + $ mrinfo DICOM/ + mrinfo: [done] scanning folder "DICOM/" for DICOM data + mrinfo: [100%] Reading DICOM series "SeriesDescription" + mrinfo: [WARNING] unable to read DICOM images in "DICOM/": + mrinfo: [WARNING] unsupported transfer syntax found in DICOM data + mrinfo: [WARNING] consider using third-party tools to convert your data to standard uncompressed encoding + mrinfo: [WARNING] See the MRtrix3 documentation on DICOM handling for details: + mrinfo: [WARNING] http://mrtrix.readthedocs.io/en/latest/tips_and_tricks/dicom_handling.html#error-unsupported-transfer-syntax + ************************************************ + Image name: "SURNAME^FIRSTNAME [MR] SeriesDescription" + ************************************************ + Dimensions: 96 x 96 x 60 x 7 + Voxel size: 2.5 x 2.5 x 2.5 x ? + Data strides: [ -1 -2 3 4 ] + .... + +- If however a command requires *access to the underlying image intensities*, + then *MRtrix3* will be unable to read from such: + .. code-block:: console + + $ mrconvert DICOM/ data.mif + mrconvert: [done] scanning folder "DICOM/" for DICOM data + mrconvert: [100%] Reading DICOM series "SeriesDescription" + mrconvert: [WARNING] unable to read DICOM images in "DICOM/": + mrconvert: [WARNING] unsupported transfer syntax found in DICOM data + mrconvert: [WARNING] consider using third-party tools to convert your data to standard uncompressed encoding + mrconvert: [WARNING] See the MRtrix3 documentation on DICOM handling for details: + mrconvert: [WARNING] http://mrtrix.readthedocs.io/en/latest/tips_and_tricks/dicom_handling.html#error-unsupported-transfer-syntax + mrconvert: [ERROR] No suitable handler to access data in "SURNAME^FIRSTNAME [MR] SeriesDescription" + + Thankfully, other tools exist that should be able to convert the data to a + format that *MRtrix3* (and other DICOM tools) will read. The `dcmtk + `__ DICOM toolkit in particular provides + the ``dcmdjpeg`` command to decompress data stored using JPEG transfer syntax. + On Linux, a directory of such files can be decompressed as follows (amend the + various ``PATH`` as required for your system): + + .. code-block:: console - $ export PATH=/opt/dcmtk/bin:$PATH - $ export DCMDICTPATH=/opt/dcmtk/share/dcmtk/dicom.dic + $ export PATH=/opt/dcmtk/bin:$PATH + $ export DCMDICTPATH=/opt/dcmtk/share/dcmtk/dicom.dic - $ for img in dcmdir/* - > do - > dcmdjpeg $img ${img}.tmp - > mv ${img}.tmp $img - > done + $ for img in dcmdir/* + > do + > dcmdjpeg $img ${img}.tmp + > mv ${img}.tmp $img + > done -*MRtrix3* commands should now be able to read the directory successfully: + *MRtrix3* commands should now be able to read the directory successfully: -.. code-block:: console + .. code-block:: console - $ mrinfo dcmdir - mrinfo: [done] scanning DICOM folder "data/driss/t1" - mrinfo: [100%] reading DICOM series "AX FSPGR 3D ASSET C+" - ... + $ mrinfo dcmdir + mrinfo: [done] scanning DICOM folder "data/driss/t1" + mrinfo: [100%] reading DICOM series "AX FSPGR 3D ASSET C+" + ... diff --git a/python/mrtrix3/commands/5ttgen/fsl.py b/python/mrtrix3/commands/5ttgen/fsl.py index 081909a447..949be18fb5 100644 --- a/python/mrtrix3/commands/5ttgen/fsl.py +++ b/python/mrtrix3/commands/5ttgen/fsl.py @@ -205,11 +205,11 @@ def execute(): #pylint: disable=unused-variable first_brain_extracted_option = ['-b'] if app.ARGS.premasked else [] first_debug_option = [] if app.DO_CLEANUP else ['-d'] first_verbosity_option = ['-v'] if app.VERBOSITY == 3 else [] - run.command([first_cmd, '-m', 'none', '-s', ','.join(sgm_structures), '-i', first_input, '-o', 'first'] - + first_brain_extracted_option - + first_debug_option - + first_verbosity_option) - fsl.check_first('first', sgm_structures) + first_stdout = run.command([first_cmd, '-m', 'none', '-s', ','.join(sgm_structures), '-i', first_input, '-o', 'first'] + + first_brain_extracted_option + + first_debug_option + + first_verbosity_option).stdout + fsl.check_first('first', structures=sgm_structures, first_stdout=first_stdout) # Convert FIRST meshes to partial volume images pve_image_list = [ ] diff --git a/python/mrtrix3/commands/5ttgen/hsvs.py b/python/mrtrix3/commands/5ttgen/hsvs.py index dee1ae1237..843430bcd8 100644 --- a/python/mrtrix3/commands/5ttgen/hsvs.py +++ b/python/mrtrix3/commands/5ttgen/hsvs.py @@ -58,7 +58,8 @@ def usage(base_parser, subparsers): #pylint: disable=unused-variable parser.add_argument('-white_stem', action='store_true', default=None, - help='Classify the brainstem as white matter') + help='Classify the brainstem as white matter; ' + 'streamlines will not be permitted to terminate within this region') parser.add_citation('Smith, R.; Skoch, A.; Bajada, C.; Caspers, S.; Connelly, A. ' 'Hybrid Surface-Volume Segmentation for improved Anatomically-Constrained Tractography. ' 'In Proc OHBM 2020') @@ -579,8 +580,8 @@ def execute(): #pylint: disable=unused-variable from_first = { key: value for key, value in from_first.items() if 'Hippocampus' not in value and 'Amygdala' not in value } if thalami_method != 'first': from_first = { key: value for key, value in from_first.items() if 'Thalamus' not in value } - run.command([first_cmd, '-s', ','.join(from_first.keys()), '-i', 'T1.nii', '-b', '-o', 'first']) - fsl.check_first('first', from_first.keys()) + first_stdout = run.command([first_cmd, '-s', ','.join(from_first.keys()), '-i', 'T1.nii', '-b', '-o', 'first']).stdout + fsl.check_first('first', structures=from_first.keys(), first_stdout=first_stdout) app.cleanup(glob.glob('T1_to_std_sub.*')) progress = app.ProgressBar('Mapping FIRST segmentations to image', 2*len(from_first)) for key, value in from_first.items(): diff --git a/python/mrtrix3/commands/dwifslpreproc.py b/python/mrtrix3/commands/dwifslpreproc.py index 73fcc56782..48362b85ac 100644 --- a/python/mrtrix3/commands/dwifslpreproc.py +++ b/python/mrtrix3/commands/dwifslpreproc.py @@ -724,7 +724,9 @@ def scheme_times_match(one, two): overwrite_dwi_pe_scheme = True if manual_trt: # Compare manual specification to that read from the header - if not scheme_times_match(dwi_pe_scheme, dwi_manual_pe_scheme): + if len(dwi_pe_scheme[0]) == 4 \ + and app.ARGS.readout_time is not None \ + and not scheme_times_match(dwi_pe_scheme, dwi_manual_pe_scheme): app.warn('User-defined total readout time does not match what is stored in DWI image header; ' 'proceeding with user specification') overwrite_dwi_pe_scheme = True @@ -801,7 +803,8 @@ def scheme_times_match(one, two): 'proceeding with user specification') overwrite_se_epi_pe_scheme = True if manual_trt: - if not scheme_times_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): + if len(se_epi_pe_scheme[0]) == 4 \ + and not scheme_times_match(se_epi_pe_scheme, se_epi_manual_pe_scheme): app.warn('User-defined total readout time does not match what is stored in SE EPI image header; ' 'proceeding with user specification') overwrite_se_epi_pe_scheme = True @@ -1416,31 +1419,51 @@ def scheme_times_match(one, two): bvecs = matrix.load_matrix(bvecs_path) bvecs_combined_transpose = [ ] bvals_combined = [ ] + nans_present_bvecs = 0 for pair in volume_pairs: bvec_mean = [ 0.5*(bvecs[0][pair[0]] + bvecs[0][pair[1]]), 0.5*(bvecs[1][pair[0]] + bvecs[1][pair[1]]), 0.5*(bvecs[2][pair[0]] + bvecs[2][pair[1]]) ] - norm2 = matrix.dot(bvec_mean, bvec_mean) - - # If one diffusion sensitisation gradient direction is reversed with respect to - # the other, still want to enable their recombination; but need to explicitly - # account for this when averaging the two directions - if norm2 < 0.5: - bvec_mean = [ 0.5*(bvecs[0][pair[0]] - bvecs[0][pair[1]]), - 0.5*(bvecs[1][pair[0]] - bvecs[1][pair[1]]), - 0.5*(bvecs[2][pair[0]] - bvecs[2][pair[1]]) ] - norm2 = matrix.dot(bvec_mean, bvec_mean) - - # Occasionally a b=0 volume can have a zero vector - if norm2: - factor = 1.0 / math.sqrt(norm2) - new_vec = [ bvec_mean[0]*factor, bvec_mean[1]*factor, bvec_mean[2]*factor ] + if any(math.isnan(value) for value in bvec_mean): + nans_present_bvecs += 1 + new_bvec = [0.0, 0.0, 0.0] + new_bval = 0.0 else: - new_vec = [ 0.0, 0.0, 0.0 ] - bvecs_combined_transpose.append(new_vec) - bvals_combined.append(0.5 * (grad[pair[0]][3] + grad[pair[1]][3])) + norm2 = matrix.dot(bvec_mean, bvec_mean) + # If one diffusion sensitisation gradient direction is reversed with respect to + # the other, still want to enable their recombination; but need to explicitly + # account for this when averaging the two directions + if norm2 < 0.5: + bvec_mean = [ 0.5*(bvecs[0][pair[0]] - bvecs[0][pair[1]]), + 0.5*(bvecs[1][pair[0]] - bvecs[1][pair[1]]), + 0.5*(bvecs[2][pair[0]] - bvecs[2][pair[1]]) ] + norm2 = matrix.dot(bvec_mean, bvec_mean) + + # Occasionally a b=0 volume can have a zero vector + if norm2: + factor = 1.0 / math.sqrt(norm2) + new_bvec = [ bvec_mean[0]*factor, bvec_mean[1]*factor, bvec_mean[2]*factor ] + else: + new_bvec = [ 0.0, 0.0, 0.0 ] + + new_bval = 0.5 * (grad[pair[0]][3] + grad[pair[1]][3]) + # End check for NaN values in bvec + + bvecs_combined_transpose.append(new_bvec) + bvals_combined.append(new_bval) + + if nans_present_bvecs: + app.warn('FSL "eddy"\'s rotated bvecs contained' + + (f'{nans_present_bvecs} entries with NaN values;' + 'these have been read as zero-length vectors,' + 'and will be saved as b=0 volumes' + if nans_present_bvecs > 1 + else 'an entry with NaN values; ' + 'this has been read as a zero-length vector, ' + 'and will be saved as a b=0 volume') + + ' in the output series') bvecs_combined = matrix.transpose(bvecs_combined_transpose) matrix.save_matrix('bvecs_combined', bvecs_combined, add_to_command_history=False) matrix.save_vector('bvals_combined', bvals_combined, add_to_command_history=False) diff --git a/python/mrtrix3/commands/labelsgmfirst.py b/python/mrtrix3/commands/labelsgmfirst.py index 218ca9569e..d75a8ef4b3 100644 --- a/python/mrtrix3/commands/labelsgmfirst.py +++ b/python/mrtrix3/commands/labelsgmfirst.py @@ -119,8 +119,8 @@ def execute(): #pylint: disable=unused-variable if app.ARGS.premasked: first_input_is_brain_extracted = ' -b' structures_string = ','.join(structure_map.keys()) - run.command(f'{first_cmd} -m none -s {structures_string} -i T1.nii {first_input_is_brain_extracted} -o first') - fsl.check_first('first', structure_map.keys()) + first_stdout = run.command(f'{first_cmd} -m none -s {structures_string} -i T1.nii {first_input_is_brain_extracted} -o first').stdout + fsl.check_first('first', structures=structure_map.keys(), first_stdout=first_stdout) # Generate an empty image that will be used to construct the new SGM nodes run.command('mrcalc parc.mif 0 -min sgm.mif') diff --git a/python/mrtrix3/fsl.py b/python/mrtrix3/fsl.py index 10d5bd85d9..096cd8a34a 100644 --- a/python/mrtrix3/fsl.py +++ b/python/mrtrix3/fsl.py @@ -13,7 +13,7 @@ # # For more details, see http://www.mrtrix.org/. -import os, shutil +import os, shutil, subprocess from mrtrix3 import MRtrixError @@ -25,25 +25,79 @@ # Functions that may be useful for scripts that interface with FMRIB FSL tools -# FSL's run_first_all script can be difficult to wrap, since it does not provide -# a meaningful return code, and may run via SGE, which then requires waiting for -# the output files to appear. -def check_first(prefix, structures): #pylint: disable=unused-variable - from mrtrix3 import app, path #pylint: disable=import-outside-toplevel +# FSL's run_first_all script can be difficult to wrap, since: +# - It may or may not run via SGE or SLURM, and therefore execution control will +# return to Python even though those jobs have not yet executed / completed +# - The return code of run_first_all may be a job ID that can possibly be used +# to query whether or not jobs have completed +# - Sometimes a subset of the segmentation jobs may fail; when this happens, +# ideally the script should report an outright failure and raise an MRtrixError; +# but in some circumstances, it's possible that the requisite files may appear +# later because eg. they are being executed via SGE +# This function attempts to provide a unified interface for querying whether or not +# FIRST was successful, taking all of these into account +def check_first(prefix, structures=None, first_stdout=None): #pylint: disable=unused-variable + from mrtrix3 import app, path, utils #pylint: disable=import-outside-toplevel + job_id = None + if first_stdout: + try: + job_id = int(first_stdout.rstrip().splitlines()[-1]) + except ValueError: + app.debug('Unable to convert FIRST stdout contents to integer job ID') + execution_verified = False + if job_id: + # Eventually modify on dev to reflect Python3 prerequisite + # Create dummy fsl_sub job, use to monitor for completion + flag_file = utils.name_temporary('txt') + try: + with subprocess.Popen(['fsl_sub', + '-j', str(job_id), + '-T', '1', + 'touch', flag_file], + stdout=subprocess.PIPE) as proc: + (fslsub_stdout, _) = proc.communicate() + returncode = proc.returncode + if returncode: + app.debug(f'fsl_sub executed successfully, but returned error code {returncode}') + else: + app.debug('fsl_sub successfully executed; awaiting completion flag') + path.wait_for(flag_file) + execution_verified = True + app.debug('Flag file identified indicating completion of all run_first_all tasks') + try: + flag_jobid = int(fslsub_stdout.rstrip().splitlines()[-1]) + app.cleanup(['touch.' + stream + str(flag_jobid) for stream in ['o', 'e']]) + except ValueError: + app.debug('Unable to parse Job ID for fsl_sub "touch" job; could not erase stream files') + except OSError: + app.debug('Unable to execute fsl_sub to check status of FIRST execution') + finally: + app.cleanup(flag_file) + if not structures: + app.warn('No way to verify up-front whether FSL FIRST was successful' + ' due to no knowledge of set of structures to be segmented;' + ' execution will continue,' + 'but script may subsequently fail' + ' if an expected structure was not segmented successfully') + return vtk_files = [ prefix + '-' + struct + '_first.vtk' for struct in structures ] existing_file_count = sum(os.path.exists(filename) for filename in vtk_files) - if existing_file_count != len(vtk_files): - if 'SGE_ROOT' in os.environ and os.environ['SGE_ROOT']: - app.console('FSL FIRST job may have been run via SGE; ' - 'awaiting completion') - app.console('(note however that FIRST may fail silently, ' - 'and hence this script may hang indefinitely)') - path.wait_for(vtk_files) - else: - app.DO_CLEANUP = False - raise MRtrixError('FSL FIRST has failed; ' - f'{"only " if existing_file_count else ""}{existing_file_count} of {len(vtk_files)} structures were segmented successfully ' - f'(check {os.path.join(app.SCRATCH_DIR, "first.logs")})') + if existing_file_count == len(vtk_files): + app.debug(f'All {existing_file_count} expected FIRST .vtk files found') + return + if not execution_verified and 'SGE_ROOT' in os.environ and os.environ['SGE_ROOT']: + app.console('FSL FIRST job PID not found,' + ' but job may nevertheless complete later via SGE') + app.console('Script will wait to see if the expected .vtk files are generated later') + app.console('(note however that FIRST may fail silently,' + ' and hence this script may hang indefinitely)') + path.wait_for(vtk_files) + return + app.DO_CLEANUP = False + raise MRtrixError('FSL FIRST has failed; ' + f'{"only " if existing_file_count else ""}{existing_file_count} of {len(vtk_files)} structures ' + 'were segmented successfully ' + f'(check {os.path.join(app.SCRATCH_DIR, "first.logs")})') diff --git a/python/mrtrix3/path.py b/python/mrtrix3/path.py index 35e26f453d..e4a2877083 100644 --- a/python/mrtrix3/path.py +++ b/python/mrtrix3/path.py @@ -95,7 +95,7 @@ def in_use(path): # A fatal error will result in a non-zero code -> in_use() = False, so wait_for() can return return not subprocess.call(['fuser', '-s', path], shell=False, stdin=None, stdout=None, stderr=None) - def num_exit(data): + def num_exist(data): count = 0 for entry in data: if os.path.exists(entry): @@ -127,22 +127,22 @@ def num_in_use(data): app.debug(str(paths)) # Wait until all files exist - num_exist = num_exit(paths) - if num_exist != len(paths): + current_num_exist = num_exist(paths) + if current_num_exist != len(paths): item_message = f'new item "{paths[0]}"' if len(paths) == 1 else f'{len(paths)} new items' progress = app.ProgressBar(f'Waiting for creation of {item_message}', len(paths)) for _ in range(num_exist): progress.increment() delay = 1.0/1024.0 - while not num_exist == len(paths): + while not current_num_exist == len(paths): time.sleep(delay) - new_num_exist = num_exit(paths) - if new_num_exist == num_exist: - delay = max(60.0, delay*2.0) - elif new_num_exist > num_exist: - for _ in range(new_num_exist - num_exist): + new_num_exist = num_exist(paths) + if new_num_exist == current_num_exist: + delay = min(60.0, delay*2.0) + elif new_num_exist > current_num_exist: + for _ in range(new_num_exist - current_num_exist): progress.increment() - num_exist = new_num_exist + current_num_exist = new_num_exist delay = 1.0/1024.0 progress.done() else: @@ -160,29 +160,29 @@ def num_in_use(data): return # Can we query the in-use status of any of these paths - num_in_use = num_in_use(paths) - if num_in_use is None: + current_num_in_use = num_in_use(paths) + if current_num_in_use is None: app.debug('Unable to test for finalization of new files') return # Wait until all files are not in use - if not num_in_use: + if not current_num_in_use: app.debug(f'{"Items" if len(paths) > 1 else "Item"} immediately ready') return item_message = f'new file "{paths[0]}"' if len(paths) == 1 else f'{len(paths)} new files' progress = app.ProgressBar('Waiting for finalization of {item_message}') - for _ in range(len(paths) - num_in_use): + for _ in range(len(paths) - current_num_in_use): progress.increment() delay = 1.0/1024.0 - while num_in_use: + while current_num_in_use: time.sleep(delay) - new_num_in_use = num_in_use(paths) - if new_num_in_use == num_in_use: - delay = max(60.0, delay*2.0) - elif new_num_in_use < num_in_use: - for _ in range(num_in_use - new_num_in_use): + new_num_in_use = current_num_in_use(paths) + if new_num_in_use == current_num_in_use: + delay = min(60.0, delay*2.0) + elif new_num_in_use < current_num_in_use: + for _ in range(current_num_in_use - new_num_in_use): progress.increment() - num_in_use = new_num_in_use + current_num_in_use = new_num_in_use delay = 1.0/1024.0 progress.done() diff --git a/testing/binaries/CMakeLists.txt b/testing/binaries/CMakeLists.txt index b637065f07..9b47cb1bfb 100644 --- a/testing/binaries/CMakeLists.txt +++ b/testing/binaries/CMakeLists.txt @@ -202,6 +202,7 @@ add_bash_binary_test(mrcalc/expression_2) add_bash_binary_test(mrcalc/expression_3) add_bash_binary_test(mrcalc/expression_4) add_bash_binary_test(mrcalc/expression_5) +add_bash_binary_test(mrcalc/dwscheme_preservation_imprecision) add_bash_binary_test(mrcat/axis) add_bash_binary_test(mrcat/single_input) diff --git a/testing/binaries/tests/mrcalc/dwscheme_preservation_imprecision b/testing/binaries/tests/mrcalc/dwscheme_preservation_imprecision new file mode 100644 index 0000000000..d293b74e08 --- /dev/null +++ b/testing/binaries/tests/mrcalc/dwscheme_preservation_imprecision @@ -0,0 +1,30 @@ +# Tests pertaining to the preservation or omission of diffusion gradient tables +# upon merging of image headers +# Related to #3027. + +# If two diffusion gradient tables are not an exact string match, +# but are more-or-less identical, +# then it should be possible to merge those two headers and preserve dw_scheme +# This is simulated here by truncating ust the last character +# from the last entry in the table +mrinfo dwi.mif -dwgrad > tmp.txt +truncate -s-1 tmp.txt +mrconvert dwi.mif -grad tmp.txt - | \ + mrcalc - dwi.mif -add 0.5 -mult - | \ + mrinfo - -dwgrad + +# If, when merging two image headers, +# they both contain a diffusion gradient table, +# but are distinctly different from one another, +# then the contents of the gradient table should be lost +# when that merge takes place. +# That is simulated here by flipping an image along one axis +# (which causes a corresponding reflection of the gradient table), +# then merging the resulting header back with the original image +if [ mrtransform dwi.mif -flip 0 - | \ + mrcalc - dwi.mif -add 0.5 -mult - | \ + mrinfo - -property dw_scheme ]; then + exit 1 +else + exit 0 +fi diff --git a/testing/scripts/CMakeLists.txt b/testing/scripts/CMakeLists.txt index 280daaccab..e3863508dd 100644 --- a/testing/scripts/CMakeLists.txt +++ b/testing/scripts/CMakeLists.txt @@ -192,6 +192,7 @@ add_bash_script_test(dwicat/whitespace) add_bash_script_test(dwifslpreproc/axis_padding) add_bash_script_test(dwifslpreproc/eddyqc) +add_bash_script_test(dwifslpreproc/header_pe_dir_no_trt) add_bash_script_test(dwifslpreproc/permuted_volumes) add_bash_script_test(dwifslpreproc/piping) add_bash_script_test(dwifslpreproc/rpeall) diff --git a/testing/scripts/tests/dwifslpreproc/header_pe_dir_no_trt b/testing/scripts/tests/dwifslpreproc/header_pe_dir_no_trt new file mode 100644 index 0000000000..a63400cb94 --- /dev/null +++ b/testing/scripts/tests/dwifslpreproc/header_pe_dir_no_trt @@ -0,0 +1,16 @@ +# Test scenario where: +# - The image header contains phase encoding information, +# but only the phase encoding direction, not the readout time +# - The user specifies a total readout time at the command-line +# Prior to #2722, this would cause an unhandled exception, +# due to attempting to read from the absent fourth column of the phase encoding table + +mrconvert BIDS/sub-04/dwi/sub-04_dwi.nii.gz \ + -fslgrad BIDS/sub-04/dwi/sub-04_dwi.bvec BIDS/sub-04/dwi/sub-04_dwi.bval \ + -json_import BIDS/sub-04/dwi/sub-04_dwi.json tmp-sub-04_dwi.mif \ + -strides 0,0,0,1 \ + -clear_property TotalReadoutTime \ + -force + +dwifslpreproc tmp-sub-04_trt-none ../tmp/dwifslpreproc/no_trt_header.mif \ + -pe_dir ap -readout_time 0.1 -force