Skip to content

Commit

Permalink
#2488 : In/out extraction by examination id endpoint added
Browse files Browse the repository at this point in the history
  • Loading branch information
DuckflipXYZ committed Dec 2, 2024
1 parent c3f87d6 commit e2ab702
Show file tree
Hide file tree
Showing 6 changed files with 184 additions and 79 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1132,4 +1132,31 @@ public boolean HasRightOnEveryDatasetOfProcessings(List<Long> processingIds, Str
}
return hasRight;
}

/**
* Check that the connected user has the given right for the given examination.
*
* @param examinationIds the examination ids
* @param rightStr the right
* @return true or false
* @throws EntityNotFoundException
*/
public boolean hasRightOnExaminations(List<Long> examinationIds, String rightStr) throws EntityNotFoundException {
if (KeycloakUtil.getTokenRoles().contains(ROLE_ADMIN)) {
return true;
}
for (Long examinationId : examinationIds) {
Examination exam = examinationRepository.findById(examinationId).orElse(null);
if (exam == null) {
throw new EntityNotFoundException("Cannot find examination with id " + examinationId);
}
if (exam.getStudyId() == null) {
return false;
}
if(!this.hasRightOnStudyCenter(exam.getCenterId(), exam.getStudyId(), rightStr)){
return false;
}
}
return true;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -137,4 +137,19 @@ void massiveDownloadByProcessingId(
@Parameter(description = "outputs to extract") @Valid
@RequestParam(value = "resultOnly") boolean resultOnly, HttpServletResponse response) throws RestServiceException;

@Operation(summary = "massiveDownloadProcessingDatasetsByExaminationIds", description = "If exists, returns a zip file of the inputs/outputs per processing corresponding to the given examination IDs")
@ApiResponses(value = {
@ApiResponse(responseCode = "200", description = "zip file"),
@ApiResponse(responseCode = "401", description = "unauthorized"),
@ApiResponse(responseCode = "403", description = "forbidden"),
@ApiResponse(responseCode = "404", description = "no dataset found"),
@ApiResponse(responseCode = "500", description = "unexpected error") })
@GetMapping(value = "/massiveDownloadProcessingByExamination")
@PreAuthorize("hasRole('ADMIN') or (hasAnyRole('EXPERT', 'USER') and @datasetSecurityService.hasRightOnExaminations(#examinationIds, 'CAN_DOWNLOAD'))")
void massiveDownloadProcessingByExaminationId(
@Parameter(description = "id of the examination", required=true) @Valid
@RequestParam(value = "examiantionIds") List<Long> examinationIds,
@Parameter(description = "outputs to extract") @Valid
@RequestParam(value = "resultOnly") boolean resultOnly, HttpServletResponse response) throws RestServiceException;

}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
import org.shanoir.ng.dataset.model.Dataset;
import org.shanoir.ng.dataset.model.DatasetExpressionFormat;
import org.shanoir.ng.dataset.service.DatasetService;
import org.shanoir.ng.examination.model.Examination;
import org.shanoir.ng.examination.service.ExaminationService;
import org.shanoir.ng.processing.dto.DatasetProcessingDTO;
import org.shanoir.ng.processing.dto.mapper.DatasetProcessingMapper;
import org.shanoir.ng.processing.model.DatasetProcessing;
Expand Down Expand Up @@ -70,8 +72,8 @@ public class DatasetProcessingApiController implements DatasetProcessingApi {
@Autowired
private ProcessingDownloaderServiceImpl processingDownloaderService;

/** Number of downloadable datasets. */
private static final int DATASET_LIMIT = 500;
@Autowired
private ExaminationService examinationService;

public DatasetProcessingApiController(){

Expand Down Expand Up @@ -201,4 +203,33 @@ public void massiveDownloadByProcessingId(

processingDownloaderService.massiveDownload(processingList, resultOnly, "dcm" , response, false, null);
}

@Override
public void massiveDownloadProcessingByExaminationId(
@Parameter(description = "ids of examination", required=true) @Valid
@RequestParam(value = "examinationIds") List<Long> examinationIds,
@Parameter(description = "outputs to extract") @Valid
@RequestParam(value = "resultOnly") boolean resultOnly,
HttpServletResponse response) throws RestServiceException {

List<Examination> examinationList = new ArrayList<>();
for (Long examinationId : examinationIds) {
Examination examination = null;
try {
if(examinationId == null){
throw new Exception();
}
examination = examinationService.findById(examinationId);

if(Objects.isNull(examination)){
throw new Exception();
}
examinationList.add(examination);
}catch (Exception e) {
throw new RestServiceException(
new ErrorModel(HttpStatus.FORBIDDEN.value(), examinationId + " is not a valid examination id."));
}
}
processingDownloaderService.massiveDownloadByExamination(examinationList, resultOnly, "dcm" , response, false, null);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import java.util.List;
import java.util.Optional;
import org.shanoir.ng.processing.model.DatasetProcessing;
import org.springframework.data.jpa.repository.Query;
import org.springframework.data.repository.CrudRepository;

/**
Expand All @@ -43,4 +44,17 @@ public interface DatasetProcessingRepository extends CrudRepository<DatasetProce
List<DatasetProcessing> findAllByInputDatasets_Id(Long datasetId);

List<DatasetProcessing> findAllByParentId(Long id);

/**
* Find all processings that are linked to given examinations
*
* @param examinationIds
* @return
*/
@Query(value="SELECT processing.id FROM dataset_processing as processing " +
"INNER JOIN input_of_dataset_processing as input ON processing.id=input.processing_id " +
"INNER JOIN dataset as dataset ON dataset.id=input.dataset_id " +
"INNER JOIN dataset_acquisition as acquisition ON acquisition.id=dataset.dataset_acquisition_id " +
"WHERE acquisition.examination_id IN (:examinationIds)", nativeQuery = true)
List<Long> findAllIdsByExaminationIds(List<Long> examinationIds);
}
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,10 @@ public Optional<DatasetProcessing> findById(final Long id) {
public List<DatasetProcessing> findAll() {
return Utils.toList(repository.findAll());
}

public List<DatasetProcessing> findAllById(List<Long> idList) {
return idList.stream().flatMap(it -> findById(it).stream()).toList();
}

@Override
public DatasetProcessing create(final DatasetProcessing entity) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
import org.shanoir.ng.download.WADODownloaderService;
import org.shanoir.ng.examination.model.Examination;
import org.shanoir.ng.processing.model.DatasetProcessing;
import org.shanoir.ng.processing.model.DatasetProcessingType;
import org.shanoir.ng.processing.repository.DatasetProcessingRepository;
import org.shanoir.ng.shared.event.ShanoirEvent;
import org.shanoir.ng.shared.event.ShanoirEventType;
import org.shanoir.ng.shared.exception.ErrorModel;
Expand All @@ -22,6 +24,7 @@
import org.springframework.http.HttpStatus;
import org.springframework.stereotype.Service;

import java.io.IOException;
import java.net.URL;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
Expand All @@ -39,6 +42,10 @@ public class ProcessingDownloaderServiceImpl extends DatasetDownloaderServiceImp

@Autowired
private WADODownloaderService downloader;
@Autowired
private DatasetProcessingRepository datasetProcessingRepository;
@Autowired
private DatasetProcessingServiceImpl datasetProcessingService;

public void massiveDownload(List<DatasetProcessing> processingList, boolean resultOnly, String format, HttpServletResponse response, boolean withManifest, Long converterId) throws RestServiceException {
manageResultOnly(processingList, resultOnly);
Expand All @@ -49,75 +56,7 @@ public void massiveDownload(List<DatasetProcessing> processingList, boolean resu
Map<Long, List<String>> filesByAcquisitionId = new HashMap<>();

try (ZipOutputStream zipOutputStream = new ZipOutputStream(response.getOutputStream())) {
for (DatasetProcessing processing : processingList) {
String processingFilePath = getExecFilepath(processing.getId(), getExaminationDatas(processing.getInputDatasets()));
String subjectName = getProcessingSubject(processing);
for (Dataset dataset : Stream.concat(processing.getInputDatasets().stream(), processing.getOutputDatasets().stream()).toList()) {
if (!dataset.isDownloadable()) {
downloadResults.put(dataset.getId(), new DatasetDownloadError("Dataset not downloadable", DatasetDownloadError.ERROR));
continue;
}
DatasetDownloadError downloadResult = new DatasetDownloadError();
downloadResults.put(dataset.getId(), downloadResult);

List<URL> pathURLs = new ArrayList<>();

if (dataset.getDatasetProcessing() != null) {
// DOWNLOAD PROCESSED DATASET
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.NIFTI_SINGLE_FILE, downloadResult);
DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, true, processingFilePath);
} else if (dataset instanceof EegDataset) {
// DOWNLOAD EEG
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.EEG, downloadResult);
DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, false, processingFilePath);
} else if (dataset instanceof BidsDataset) {
// DOWNLOAD BIDS
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.BIDS, downloadResult);
DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, true, processingFilePath);
// Manage errors here
} else if (Objects.equals("dcm", format)) {
// DOWNLOAD DICOM
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.DICOM, downloadResult);
List<String> files = downloader.downloadDicomFilesForURLsAsZip(pathURLs, zipOutputStream, subjectName, dataset, processingFilePath + "/" + shapeForPath(dataset.getName()), downloadResult);
if (withManifest) {
filesByAcquisitionId.putIfAbsent(dataset.getDatasetAcquisition().getId(), new ArrayList<>());
filesByAcquisitionId.get(dataset.getDatasetAcquisition().getId()).addAll(files);
}
} else if (Objects.equals("nii", format)) {
// Check if we have a specific converter -> nifti reconversion
if (converterId != null) {
reconvertToNifti(format, converterId, dataset, pathURLs, downloadResult, subjectName, zipOutputStream);
} else {
// Check that we have existing nifti, otherwise reconvert using dcm2niix by default.
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.NIFTI_SINGLE_FILE, downloadResult);
if (!pathURLs.isEmpty()) {
List<String> files = DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, false, processingFilePath + "/" + shapeForPath(dataset.getName()));
} else {
// Reconvert using dcm2niix by default.
reconvertToNifti(format, DEFAULT_NIFTI_CONVERTER_ID, dataset, pathURLs, downloadResult, subjectName, zipOutputStream);
}
}
} else {
downloadResult.update("Dataset format was not adapted to dataset download choosen", DatasetDownloadError.ERROR);
}

if (downloadResult.getStatus() == null) {
downloadResults.remove(dataset.getId());
}
}
}
if(!filesByAcquisitionId.isEmpty()){
DatasetFileUtils.writeManifestForExport(zipOutputStream, filesByAcquisitionId);
}

// Write errors to the file
if (!downloadResults.isEmpty()) {
ZipEntry zipEntry = new ZipEntry(JSON_RESULT_FILENAME);
zipEntry.setTime(System.currentTimeMillis());
zipOutputStream.putNextEntry(zipEntry);
zipOutputStream.write(objectMapper.writeValueAsString(downloadResults).getBytes());
zipOutputStream.closeEntry();
}
manageProcessingsDownload(processingList, downloadResults, zipOutputStream, format, withManifest, filesByAcquisitionId, converterId);

String ids = String.join(",", Stream.concat(processingList.stream().map(DatasetProcessing::getInputDatasets), processingList.stream().map(DatasetProcessing::getOutputDatasets)).map(dataset -> ((Dataset) dataset).getId().toString()).collect(Collectors.toList()));
ShanoirEvent event = new ShanoirEvent(ShanoirEventType.DOWNLOAD_DATASET_EVENT, ids,
Expand All @@ -133,6 +72,89 @@ public void massiveDownload(List<DatasetProcessing> processingList, boolean resu
}
}

private void manageProcessingsDownload(List<DatasetProcessing> processingList, Map<Long, DatasetDownloadError> downloadResults, ZipOutputStream zipOutputStream, String format, boolean withManifest, Map<Long, List<String>> filesByAcquisitionId, Long converterId) throws RestServiceException, IOException {
for (DatasetProcessing processing : processingList) {
String processingFilePath = getExecFilepath(processing.getId(), getExaminationDatas(processing.getInputDatasets()));
String subjectName = getProcessingSubject(processing);
for (Dataset dataset : Stream.concat(processing.getInputDatasets().stream(), processing.getOutputDatasets().stream()).toList()) {
manageDatasetDownload(dataset, downloadResults, zipOutputStream, subjectName, processingFilePath, format, withManifest, filesByAcquisitionId, converterId);

}
}
if(!filesByAcquisitionId.isEmpty()){
DatasetFileUtils.writeManifestForExport(zipOutputStream, filesByAcquisitionId);
}

// Write errors to the file
if (!downloadResults.isEmpty()) {
ZipEntry zipEntry = new ZipEntry(JSON_RESULT_FILENAME);
zipEntry.setTime(System.currentTimeMillis());
zipOutputStream.putNextEntry(zipEntry);
zipOutputStream.write(objectMapper.writeValueAsString(downloadResults).getBytes());
zipOutputStream.closeEntry();
}
}

private void manageDatasetDownload(Dataset dataset, Map<Long, DatasetDownloadError> downloadResults, ZipOutputStream zipOutputStream, String subjectName, String processingFilePath, String format, boolean withManifest, Map<Long, List<String>> filesByAcquisitionId, Long converterId) throws IOException, RestServiceException {
if (!dataset.isDownloadable()) {
downloadResults.put(dataset.getId(), new DatasetDownloadError("Dataset not downloadable", DatasetDownloadError.ERROR));
return;
}
DatasetDownloadError downloadResult = new DatasetDownloadError();
downloadResults.put(dataset.getId(), downloadResult);

List<URL> pathURLs = new ArrayList<>();

if (dataset.getDatasetProcessing() != null) {
// DOWNLOAD PROCESSED DATASET
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.NIFTI_SINGLE_FILE, downloadResult);
DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, true, processingFilePath);
} else if (dataset instanceof EegDataset) {
// DOWNLOAD EEG
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.EEG, downloadResult);
DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, false, processingFilePath);
} else if (dataset instanceof BidsDataset) {
// DOWNLOAD BIDS
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.BIDS, downloadResult);
DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, true, processingFilePath);
// Manage errors here
} else if (Objects.equals("dcm", format)) {
// DOWNLOAD DICOM
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.DICOM, downloadResult);
List<String> files = downloader.downloadDicomFilesForURLsAsZip(pathURLs, zipOutputStream, subjectName, dataset, processingFilePath + "/" + shapeForPath(dataset.getName()), downloadResult);
if (withManifest) {
filesByAcquisitionId.putIfAbsent(dataset.getDatasetAcquisition().getId(), new ArrayList<>());
filesByAcquisitionId.get(dataset.getDatasetAcquisition().getId()).addAll(files);
}
} else if (Objects.equals("nii", format)) {
// Check if we have a specific converter -> nifti reconversion
if (converterId != null) {
reconvertToNifti(format, converterId, dataset, pathURLs, downloadResult, subjectName, zipOutputStream);
} else {
// Check that we have existing nifti, otherwise reconvert using dcm2niix by default.
DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.NIFTI_SINGLE_FILE, downloadResult);
if (!pathURLs.isEmpty()) {
List<String> files = DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, false, processingFilePath + "/" + shapeForPath(dataset.getName()));
} else {
// Reconvert using dcm2niix by default.
reconvertToNifti(format, DEFAULT_NIFTI_CONVERTER_ID, dataset, pathURLs, downloadResult, subjectName, zipOutputStream);
}
}
} else {
downloadResult.update("Dataset format was not adapted to dataset download choosen", DatasetDownloadError.ERROR);
}

if (downloadResult.getStatus() == null) {
downloadResults.remove(dataset.getId());
}
}

public void massiveDownloadByExamination(List<Examination> examinationList, boolean resultOnly, String format, HttpServletResponse response, boolean withManifest, Long converterId) throws RestServiceException {
List<Long> processingIdsList = datasetProcessingRepository.findAllIdsByExaminationIds(examinationList.stream().map(Examination::getId).toList());
List<DatasetProcessing> processingList = datasetProcessingService.findAllById(processingIdsList).stream().filter(it -> Objects.equals(it.getDatasetProcessingType(), DatasetProcessingType.SEGMENTATION)).toList();
massiveDownload(processingList, resultOnly, format, response, withManifest, converterId);
}

private void manageResultOnly(List<DatasetProcessing> processingList, boolean resultOnly) {
if(resultOnly){
processingList.forEach(it -> {it.setOutputDatasets(it.getOutputDatasets().stream().filter(file -> Objects.equals(file.getName(), "result.yaml")).toList()); it.setInputDatasets(new ArrayList<>());});
Expand All @@ -153,14 +175,6 @@ private String getProcessingSubject(DatasetProcessing processing) {
return "noSubject";
}

private void checkSize(List<Dataset> inputs, List<Dataset> outputs) throws RestServiceException {
int size = inputs.size() + outputs.size();
if (size > DATASET_LIMIT) {
throw new RestServiceException(
new ErrorModel(HttpStatus.FORBIDDEN.value(), "This processing has " + size + " datasets. You can't download more than " + DATASET_LIMIT + " datasets." ));
}
}

private Pair<Long, String> getExaminationDatas(List<Dataset> inputs) {
Examination exam = null;
for (Dataset dataset : inputs){
Expand Down

0 comments on commit e2ab702

Please sign in to comment.