diff --git a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/dataset/security/DatasetSecurityService.java b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/dataset/security/DatasetSecurityService.java index 4df61c6de8..6d793093a4 100644 --- a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/dataset/security/DatasetSecurityService.java +++ b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/dataset/security/DatasetSecurityService.java @@ -1132,4 +1132,31 @@ public boolean HasRightOnEveryDatasetOfProcessings(List processingIds, Str } return hasRight; } + + /** + * Check that the connected user has the given right for the given examination. + * + * @param examinationIds the examination ids + * @param rightStr the right + * @return true or false + * @throws EntityNotFoundException + */ + public boolean hasRightOnExaminations(List examinationIds, String rightStr) throws EntityNotFoundException { + if (KeycloakUtil.getTokenRoles().contains(ROLE_ADMIN)) { + return true; + } + for (Long examinationId : examinationIds) { + Examination exam = examinationRepository.findById(examinationId).orElse(null); + if (exam == null) { + throw new EntityNotFoundException("Cannot find examination with id " + examinationId); + } + if (exam.getStudyId() == null) { + return false; + } + if(!this.hasRightOnStudyCenter(exam.getCenterId(), exam.getStudyId(), rightStr)){ + return false; + } + } + return true; + } } diff --git a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/controler/DatasetProcessingApi.java b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/controler/DatasetProcessingApi.java index 6c5241ecec..361a859f9d 100644 --- a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/controler/DatasetProcessingApi.java +++ b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/controler/DatasetProcessingApi.java @@ -137,4 +137,19 @@ void massiveDownloadByProcessingId( @Parameter(description = "outputs to extract") @Valid @RequestParam(value = "resultOnly") boolean resultOnly, HttpServletResponse response) throws RestServiceException; + @Operation(summary = "massiveDownloadProcessingDatasetsByExaminationIds", description = "If exists, returns a zip file of the inputs/outputs per processing corresponding to the given examination IDs") + @ApiResponses(value = { + @ApiResponse(responseCode = "200", description = "zip file"), + @ApiResponse(responseCode = "401", description = "unauthorized"), + @ApiResponse(responseCode = "403", description = "forbidden"), + @ApiResponse(responseCode = "404", description = "no dataset found"), + @ApiResponse(responseCode = "500", description = "unexpected error") }) + @GetMapping(value = "/massiveDownloadProcessingByExamination") + @PreAuthorize("hasRole('ADMIN') or (hasAnyRole('EXPERT', 'USER') and @datasetSecurityService.hasRightOnExaminations(#examinationIds, 'CAN_DOWNLOAD'))") + void massiveDownloadProcessingByExaminationId( + @Parameter(description = "id of the examination", required=true) @Valid + @RequestParam(value = "examiantionIds") List examinationIds, + @Parameter(description = "outputs to extract") @Valid + @RequestParam(value = "resultOnly") boolean resultOnly, HttpServletResponse response) throws RestServiceException; + } diff --git a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/controler/DatasetProcessingApiController.java b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/controler/DatasetProcessingApiController.java index 0dcc882e9a..385f533ba4 100644 --- a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/controler/DatasetProcessingApiController.java +++ b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/controler/DatasetProcessingApiController.java @@ -24,6 +24,8 @@ import org.shanoir.ng.dataset.model.Dataset; import org.shanoir.ng.dataset.model.DatasetExpressionFormat; import org.shanoir.ng.dataset.service.DatasetService; +import org.shanoir.ng.examination.model.Examination; +import org.shanoir.ng.examination.service.ExaminationService; import org.shanoir.ng.processing.dto.DatasetProcessingDTO; import org.shanoir.ng.processing.dto.mapper.DatasetProcessingMapper; import org.shanoir.ng.processing.model.DatasetProcessing; @@ -70,8 +72,8 @@ public class DatasetProcessingApiController implements DatasetProcessingApi { @Autowired private ProcessingDownloaderServiceImpl processingDownloaderService; - /** Number of downloadable datasets. */ - private static final int DATASET_LIMIT = 500; + @Autowired + private ExaminationService examinationService; public DatasetProcessingApiController(){ @@ -201,4 +203,33 @@ public void massiveDownloadByProcessingId( processingDownloaderService.massiveDownload(processingList, resultOnly, "dcm" , response, false, null); } + + @Override + public void massiveDownloadProcessingByExaminationId( + @Parameter(description = "ids of examination", required=true) @Valid + @RequestParam(value = "examinationIds") List examinationIds, + @Parameter(description = "outputs to extract") @Valid + @RequestParam(value = "resultOnly") boolean resultOnly, + HttpServletResponse response) throws RestServiceException { + + List examinationList = new ArrayList<>(); + for (Long examinationId : examinationIds) { + Examination examination = null; + try { + if(examinationId == null){ + throw new Exception(); + } + examination = examinationService.findById(examinationId); + + if(Objects.isNull(examination)){ + throw new Exception(); + } + examinationList.add(examination); + }catch (Exception e) { + throw new RestServiceException( + new ErrorModel(HttpStatus.FORBIDDEN.value(), examinationId + " is not a valid examination id.")); + } + } + processingDownloaderService.massiveDownloadByExamination(examinationList, resultOnly, "dcm" , response, false, null); + } } diff --git a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/repository/DatasetProcessingRepository.java b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/repository/DatasetProcessingRepository.java index 549d8f31f0..99b1808ec9 100644 --- a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/repository/DatasetProcessingRepository.java +++ b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/repository/DatasetProcessingRepository.java @@ -17,6 +17,7 @@ import java.util.List; import java.util.Optional; import org.shanoir.ng.processing.model.DatasetProcessing; +import org.springframework.data.jpa.repository.Query; import org.springframework.data.repository.CrudRepository; /** @@ -43,4 +44,17 @@ public interface DatasetProcessingRepository extends CrudRepository findAllByInputDatasets_Id(Long datasetId); List findAllByParentId(Long id); + + /** + * Find all processings that are linked to given examinations + * + * @param examinationIds + * @return + */ + @Query(value="SELECT processing.id FROM dataset_processing as processing " + + "INNER JOIN input_of_dataset_processing as input ON processing.id=input.processing_id " + + "INNER JOIN dataset as dataset ON dataset.id=input.dataset_id " + + "INNER JOIN dataset_acquisition as acquisition ON acquisition.id=dataset.dataset_acquisition_id " + + "WHERE acquisition.examination_id IN (:examinationIds)", nativeQuery = true) + List findAllIdsByExaminationIds(List examinationIds); } diff --git a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/service/DatasetProcessingServiceImpl.java b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/service/DatasetProcessingServiceImpl.java index 7ddcbb78f1..052b1cef8d 100644 --- a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/service/DatasetProcessingServiceImpl.java +++ b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/service/DatasetProcessingServiceImpl.java @@ -84,6 +84,10 @@ public Optional findById(final Long id) { public List findAll() { return Utils.toList(repository.findAll()); } + + public List findAllById(List idList) { + return idList.stream().flatMap(it -> findById(it).stream()).toList(); + } @Override public DatasetProcessing create(final DatasetProcessing entity) { diff --git a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/service/ProcessingDownloaderServiceImpl.java b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/service/ProcessingDownloaderServiceImpl.java index bdff9e7944..97612ce472 100644 --- a/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/service/ProcessingDownloaderServiceImpl.java +++ b/shanoir-ng-datasets/src/main/java/org/shanoir/ng/processing/service/ProcessingDownloaderServiceImpl.java @@ -12,6 +12,8 @@ import org.shanoir.ng.download.WADODownloaderService; import org.shanoir.ng.examination.model.Examination; import org.shanoir.ng.processing.model.DatasetProcessing; +import org.shanoir.ng.processing.model.DatasetProcessingType; +import org.shanoir.ng.processing.repository.DatasetProcessingRepository; import org.shanoir.ng.shared.event.ShanoirEvent; import org.shanoir.ng.shared.event.ShanoirEventType; import org.shanoir.ng.shared.exception.ErrorModel; @@ -22,6 +24,7 @@ import org.springframework.http.HttpStatus; import org.springframework.stereotype.Service; +import java.io.IOException; import java.net.URL; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; @@ -39,6 +42,10 @@ public class ProcessingDownloaderServiceImpl extends DatasetDownloaderServiceImp @Autowired private WADODownloaderService downloader; + @Autowired + private DatasetProcessingRepository datasetProcessingRepository; + @Autowired + private DatasetProcessingServiceImpl datasetProcessingService; public void massiveDownload(List processingList, boolean resultOnly, String format, HttpServletResponse response, boolean withManifest, Long converterId) throws RestServiceException { manageResultOnly(processingList, resultOnly); @@ -49,75 +56,7 @@ public void massiveDownload(List processingList, boolean resu Map> filesByAcquisitionId = new HashMap<>(); try (ZipOutputStream zipOutputStream = new ZipOutputStream(response.getOutputStream())) { - for (DatasetProcessing processing : processingList) { - String processingFilePath = getExecFilepath(processing.getId(), getExaminationDatas(processing.getInputDatasets())); - String subjectName = getProcessingSubject(processing); - for (Dataset dataset : Stream.concat(processing.getInputDatasets().stream(), processing.getOutputDatasets().stream()).toList()) { - if (!dataset.isDownloadable()) { - downloadResults.put(dataset.getId(), new DatasetDownloadError("Dataset not downloadable", DatasetDownloadError.ERROR)); - continue; - } - DatasetDownloadError downloadResult = new DatasetDownloadError(); - downloadResults.put(dataset.getId(), downloadResult); - - List pathURLs = new ArrayList<>(); - - if (dataset.getDatasetProcessing() != null) { - // DOWNLOAD PROCESSED DATASET - DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.NIFTI_SINGLE_FILE, downloadResult); - DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, true, processingFilePath); - } else if (dataset instanceof EegDataset) { - // DOWNLOAD EEG - DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.EEG, downloadResult); - DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, false, processingFilePath); - } else if (dataset instanceof BidsDataset) { - // DOWNLOAD BIDS - DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.BIDS, downloadResult); - DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, true, processingFilePath); - // Manage errors here - } else if (Objects.equals("dcm", format)) { - // DOWNLOAD DICOM - DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.DICOM, downloadResult); - List files = downloader.downloadDicomFilesForURLsAsZip(pathURLs, zipOutputStream, subjectName, dataset, processingFilePath + "/" + shapeForPath(dataset.getName()), downloadResult); - if (withManifest) { - filesByAcquisitionId.putIfAbsent(dataset.getDatasetAcquisition().getId(), new ArrayList<>()); - filesByAcquisitionId.get(dataset.getDatasetAcquisition().getId()).addAll(files); - } - } else if (Objects.equals("nii", format)) { - // Check if we have a specific converter -> nifti reconversion - if (converterId != null) { - reconvertToNifti(format, converterId, dataset, pathURLs, downloadResult, subjectName, zipOutputStream); - } else { - // Check that we have existing nifti, otherwise reconvert using dcm2niix by default. - DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.NIFTI_SINGLE_FILE, downloadResult); - if (!pathURLs.isEmpty()) { - List files = DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, false, processingFilePath + "/" + shapeForPath(dataset.getName())); - } else { - // Reconvert using dcm2niix by default. - reconvertToNifti(format, DEFAULT_NIFTI_CONVERTER_ID, dataset, pathURLs, downloadResult, subjectName, zipOutputStream); - } - } - } else { - downloadResult.update("Dataset format was not adapted to dataset download choosen", DatasetDownloadError.ERROR); - } - - if (downloadResult.getStatus() == null) { - downloadResults.remove(dataset.getId()); - } - } - } - if(!filesByAcquisitionId.isEmpty()){ - DatasetFileUtils.writeManifestForExport(zipOutputStream, filesByAcquisitionId); - } - - // Write errors to the file - if (!downloadResults.isEmpty()) { - ZipEntry zipEntry = new ZipEntry(JSON_RESULT_FILENAME); - zipEntry.setTime(System.currentTimeMillis()); - zipOutputStream.putNextEntry(zipEntry); - zipOutputStream.write(objectMapper.writeValueAsString(downloadResults).getBytes()); - zipOutputStream.closeEntry(); - } + manageProcessingsDownload(processingList, downloadResults, zipOutputStream, format, withManifest, filesByAcquisitionId, converterId); String ids = String.join(",", Stream.concat(processingList.stream().map(DatasetProcessing::getInputDatasets), processingList.stream().map(DatasetProcessing::getOutputDatasets)).map(dataset -> ((Dataset) dataset).getId().toString()).collect(Collectors.toList())); ShanoirEvent event = new ShanoirEvent(ShanoirEventType.DOWNLOAD_DATASET_EVENT, ids, @@ -133,6 +72,89 @@ public void massiveDownload(List processingList, boolean resu } } + private void manageProcessingsDownload(List processingList, Map downloadResults, ZipOutputStream zipOutputStream, String format, boolean withManifest, Map> filesByAcquisitionId, Long converterId) throws RestServiceException, IOException { + for (DatasetProcessing processing : processingList) { + String processingFilePath = getExecFilepath(processing.getId(), getExaminationDatas(processing.getInputDatasets())); + String subjectName = getProcessingSubject(processing); + for (Dataset dataset : Stream.concat(processing.getInputDatasets().stream(), processing.getOutputDatasets().stream()).toList()) { + manageDatasetDownload(dataset, downloadResults, zipOutputStream, subjectName, processingFilePath, format, withManifest, filesByAcquisitionId, converterId); + + } + } + if(!filesByAcquisitionId.isEmpty()){ + DatasetFileUtils.writeManifestForExport(zipOutputStream, filesByAcquisitionId); + } + + // Write errors to the file + if (!downloadResults.isEmpty()) { + ZipEntry zipEntry = new ZipEntry(JSON_RESULT_FILENAME); + zipEntry.setTime(System.currentTimeMillis()); + zipOutputStream.putNextEntry(zipEntry); + zipOutputStream.write(objectMapper.writeValueAsString(downloadResults).getBytes()); + zipOutputStream.closeEntry(); + } + } + + private void manageDatasetDownload(Dataset dataset, Map downloadResults, ZipOutputStream zipOutputStream, String subjectName, String processingFilePath, String format, boolean withManifest, Map> filesByAcquisitionId, Long converterId) throws IOException, RestServiceException { + if (!dataset.isDownloadable()) { + downloadResults.put(dataset.getId(), new DatasetDownloadError("Dataset not downloadable", DatasetDownloadError.ERROR)); + return; + } + DatasetDownloadError downloadResult = new DatasetDownloadError(); + downloadResults.put(dataset.getId(), downloadResult); + + List pathURLs = new ArrayList<>(); + + if (dataset.getDatasetProcessing() != null) { + // DOWNLOAD PROCESSED DATASET + DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.NIFTI_SINGLE_FILE, downloadResult); + DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, true, processingFilePath); + } else if (dataset instanceof EegDataset) { + // DOWNLOAD EEG + DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.EEG, downloadResult); + DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, false, processingFilePath); + } else if (dataset instanceof BidsDataset) { + // DOWNLOAD BIDS + DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.BIDS, downloadResult); + DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, true, processingFilePath); + // Manage errors here + } else if (Objects.equals("dcm", format)) { + // DOWNLOAD DICOM + DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.DICOM, downloadResult); + List files = downloader.downloadDicomFilesForURLsAsZip(pathURLs, zipOutputStream, subjectName, dataset, processingFilePath + "/" + shapeForPath(dataset.getName()), downloadResult); + if (withManifest) { + filesByAcquisitionId.putIfAbsent(dataset.getDatasetAcquisition().getId(), new ArrayList<>()); + filesByAcquisitionId.get(dataset.getDatasetAcquisition().getId()).addAll(files); + } + } else if (Objects.equals("nii", format)) { + // Check if we have a specific converter -> nifti reconversion + if (converterId != null) { + reconvertToNifti(format, converterId, dataset, pathURLs, downloadResult, subjectName, zipOutputStream); + } else { + // Check that we have existing nifti, otherwise reconvert using dcm2niix by default. + DatasetFileUtils.getDatasetFilePathURLs(dataset, pathURLs, DatasetExpressionFormat.NIFTI_SINGLE_FILE, downloadResult); + if (!pathURLs.isEmpty()) { + List files = DatasetFileUtils.copyNiftiFilesForURLs(pathURLs, zipOutputStream, dataset, subjectName, false, processingFilePath + "/" + shapeForPath(dataset.getName())); + } else { + // Reconvert using dcm2niix by default. + reconvertToNifti(format, DEFAULT_NIFTI_CONVERTER_ID, dataset, pathURLs, downloadResult, subjectName, zipOutputStream); + } + } + } else { + downloadResult.update("Dataset format was not adapted to dataset download choosen", DatasetDownloadError.ERROR); + } + + if (downloadResult.getStatus() == null) { + downloadResults.remove(dataset.getId()); + } + } + + public void massiveDownloadByExamination(List examinationList, boolean resultOnly, String format, HttpServletResponse response, boolean withManifest, Long converterId) throws RestServiceException { + List processingIdsList = datasetProcessingRepository.findAllIdsByExaminationIds(examinationList.stream().map(Examination::getId).toList()); + List processingList = datasetProcessingService.findAllById(processingIdsList).stream().filter(it -> Objects.equals(it.getDatasetProcessingType(), DatasetProcessingType.SEGMENTATION)).toList(); + massiveDownload(processingList, resultOnly, format, response, withManifest, converterId); + } + private void manageResultOnly(List processingList, boolean resultOnly) { if(resultOnly){ processingList.forEach(it -> {it.setOutputDatasets(it.getOutputDatasets().stream().filter(file -> Objects.equals(file.getName(), "result.yaml")).toList()); it.setInputDatasets(new ArrayList<>());}); @@ -153,14 +175,6 @@ private String getProcessingSubject(DatasetProcessing processing) { return "noSubject"; } - private void checkSize(List inputs, List outputs) throws RestServiceException { - int size = inputs.size() + outputs.size(); - if (size > DATASET_LIMIT) { - throw new RestServiceException( - new ErrorModel(HttpStatus.FORBIDDEN.value(), "This processing has " + size + " datasets. You can't download more than " + DATASET_LIMIT + " datasets." )); - } - } - private Pair getExaminationDatas(List inputs) { Examination exam = null; for (Dataset dataset : inputs){