diff --git a/README.md b/README.md index c0b317d..6941dad 100644 --- a/README.md +++ b/README.md @@ -27,14 +27,14 @@ The CLI provides options to specify a reference model and validate a DICOM file. Command Syntax ```bash -dcm-check --ref --type --scan --in [--fields ] [--out ] +dcm-check --ref --type --acquisition --in [--fields ] [--out ] ``` **Arguments:** - `--ref`: Path to the reference file (JSON, DICOM, or Python module). - `--type`: Type of reference model (json, dicom, or pydantic). -- `--scan`: Scan type (e.g., T1, T2w, etc.) when using JSON or Pydantic references; inferred if not given. +- `--acquisition`: Acquisition type (e.g., T1, T2w, etc.) when using JSON or Pydantic references; inferred if not given. - `--in`: Path to the DICOM file to validate. - `--fields`: (Optional) List of specific DICOM fields to include in validation for DICOM reference types. - `--out`: (Optional) Path to save the compliance report as a JSON file. @@ -44,7 +44,7 @@ dcm-check --ref --type --scan Validate a DICOM file using a JSON reference model: ```bash -dcm-check --ref reference.json --scan T1 --in dicom_file.dcm +dcm-check --ref reference.json --acquisition T1 --in dicom_file.dcm ``` Validate a DICOM file using another DICOM as a reference: @@ -56,7 +56,7 @@ dcm-check --ref reference.dcm --in dicom_file.dcm --fields EchoTime RepetitionTi Validate a DICOM file using a Pydantic model in a Python module: ```bash -dcm-check --ref reference.py --scan T1_MPR --in dicom_file.dcm +dcm-check --ref reference.py --acquisition T1_MPR --in dicom_file.dcm ``` **Output** diff --git a/dcm_check/cli.py b/dcm_check/cli.py index 4071bc0..8fe34e4 100644 --- a/dcm_check/cli.py +++ b/dcm_check/cli.py @@ -32,8 +32,8 @@ def main(): parser = argparse.ArgumentParser(description="Check DICOM compliance against a reference model.") parser.add_argument("--ref", required=True, help="Reference JSON file, DICOM file, or Python module to use for compliance.") parser.add_argument("--type", choices=["json", "dicom", "pydantic"], help="Reference type: 'json', 'dicom', or 'pydantic'.") - parser.add_argument("--scan", required=False, help="Scan type when using a JSON or Pydantic reference.") - parser.add_argument("--group", required=False, help="Specific group name within the acquisition for JSON references.") + parser.add_argument("--acquisition", required=False, help="Acquisition name when using a JSON or Pydantic reference.") + parser.add_argument("--series", required=False, help="Specific series name within the acquisition for JSON references.") parser.add_argument("--in", dest="in_file", required=True, help="Path to the DICOM file to check.") parser.add_argument("--fields", nargs="*", help="Optional: List of DICOM fields to include in validation for DICOM reference.") parser.add_argument("--out", required=False, help="Path to save the compliance report in JSON format.") @@ -43,34 +43,34 @@ def main(): ref_type = args.type or infer_type_from_extension(args.ref) if ref_type == "json": - # Include group if specified - if args.group: - reference_model = load_ref_json(args.ref, args.scan, group_name=args.group) + # Include series if specified + if args.series: + reference_model = load_ref_json(args.ref, args.acquisition, series_name=args.series) else: - reference_model = load_ref_json(args.ref, args.scan) + reference_model = load_ref_json(args.ref, args.acquisition) elif ref_type == "dicom": ref_dicom_values = load_dicom(args.ref) reference_model = load_ref_dicom(ref_dicom_values, args.fields) elif ref_type == "pydantic": - # check if scan is provided - if not args.scan: - print("Error: Scan type is required (--scan) when using a Pydantic reference.", file=sys.stderr) + # check if acquisition is provided + if not args.acquisition: + print("Error: Acquisition type is required (--acquisition) when using a Pydantic reference.", file=sys.stderr) sys.exit(1) - reference_model = load_ref_pydantic(args.ref, args.scan) + reference_model = load_ref_pydantic(args.ref, args.acquisition) else: print(f"Error: Unsupported reference type '{ref_type}'", file=sys.stderr) sys.exit(1) in_dicom_values = load_dicom(args.in_file) - results = get_compliance_summary(reference_model, in_dicom_values, args.scan, args.group) + results = get_compliance_summary(reference_model, in_dicom_values, args.acquisition, args.series) df = pd.DataFrame(results) - # remove "Acquisition" and/or "Group" columns if they are empty + # remove "Acquisition" and/or "Series" columns if they are empty if "Acquisition" in df.columns and df["Acquisition"].isnull().all(): df.drop(columns=["Acquisition"], inplace=True) - if "Group" in df.columns and df["Group"].isnull().all(): - df.drop(columns=["Group"], inplace=True) + if "Series" in df.columns and df["Series"].isnull().all(): + df.drop(columns=["Series"], inplace=True) if len(df) == 0: print("DICOM file is compliant with the reference model.") diff --git a/dcm_check/dcm_check.py b/dcm_check/dcm_check.py index 6dad906..05811d7 100644 --- a/dcm_check/dcm_check.py +++ b/dcm_check/dcm_check.py @@ -103,13 +103,13 @@ def contains_check(cls, v): # Create model with dynamically added validators return create_model("ReferenceModel", **model_fields, __validators__=validators) -def load_ref_json(json_path: str, scan_type: str, group_name: Optional[str] = None) -> BaseModel: - """Load a JSON configuration file and create a reference model for a specified scan type and group. +def load_ref_json(json_path: str, acquisition: str, series_name: Optional[str] = None) -> BaseModel: + """Load a JSON configuration file and create a reference model for a specified acquisition and series. Args: json_path (str): Path to the JSON configuration file. - scan_type (str): Acquisition scan type to load (e.g., "T1"). - group_name (Optional[str]): Specific group name to validate within the acquisition. + acquisition (str): Acquisition to load (e.g., "T1"). + series_name (Optional[str]): Specific series name to validate within the acquisition. Returns: reference_model (BaseModel): A Pydantic model based on the JSON configuration. @@ -118,9 +118,9 @@ def load_ref_json(json_path: str, scan_type: str, group_name: Optional[str] = No config = json.load(f) # Load acquisition configuration - acquisition_config = config.get("acquisitions", {}).get(scan_type) + acquisition_config = config.get("acquisitions", {}).get(acquisition) if not acquisition_config: - raise ValueError(f"Scan type '{scan_type}' not found in JSON configuration.") + raise ValueError(f"Acquisition '{acquisition}' not found in JSON configuration.") # Load the reference DICOM if specified ref_file = acquisition_config.get("ref", None) @@ -133,19 +133,19 @@ def load_ref_json(json_path: str, scan_type: str, group_name: Optional[str] = No # Always include acquisition-level fields reference_values.update(acquisition_reference) - # Check if a group_name is specified and retrieve its configuration - group_fields = [] - if group_name: - group = next((grp for grp in acquisition_config.get("groups", []) if grp["name"] == group_name), None) - if not group: - raise ValueError(f"Group '{group_name}' not found in acquisition '{scan_type}'.") + # Check if a series_name is specified and retrieve its configuration + series_fields = [] + if series_name: + series = next((grp for grp in acquisition_config.get("series", []) if grp["name"] == series_name), None) + if not series: + raise ValueError(f"Series '{series_name}' not found in acquisition '{acquisition}'.") - group_fields = group.get("fields", []) - group_reference = {field["field"]: field.get("value") for field in group_fields if "value" in field} - reference_values.update(group_reference) + series_fields = series.get("fields", []) + series_reference = {field["field"]: field.get("value") for field in series_fields if "value" in field} + reference_values.update(series_reference) - # Combine acquisition and group fields for the reference model creation - combined_fields_config = fields_config + group_fields + # Combine acquisition and series fields for the reference model creation + combined_fields_config = fields_config + series_fields return create_reference_model(reference_values, combined_fields_config) @@ -165,34 +165,34 @@ def load_ref_dicom(dicom_values: Dict[str, Any], fields: Optional[List[str]] = N fields_config = [{"field": field} for field in fields] if fields else [{"field": key} for key in dicom_values] return create_reference_model(dicom_values, fields_config) -def load_ref_pydantic(module_path: str, scan_type: str) -> BaseModel: - """Load a Pydantic model from a specified Python file for the given scan type. +def load_ref_pydantic(module_path: str, acquisition: str) -> BaseModel: + """Load a Pydantic model from a specified Python file for the given acquisition. Args: - module_path (str): Path to the Python file containing the scan models. - scan_type (str): The scan type to retrieve (e.g., "T1_MPR"). + module_path (str): Path to the Python file containing the acquisition models. + acquisition (str): The acquisition to retrieve (e.g., "T1_MPR"). Returns: - reference_model (BaseModel): The Pydantic model for the specified scan type. + reference_model (BaseModel): The Pydantic model for the specified acquisition type. """ # Load the module from the specified file path spec = importlib.util.spec_from_file_location("ref_module", module_path) ref_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(ref_module) - # Retrieve SCAN_MODELS from the loaded module - scan_models: Dict[str, Any] = getattr(ref_module, "SCAN_MODELS", None) - if not scan_models: - raise ValueError(f"No SCAN_MODELS found in the module '{module_path}'.") + # Retrieve ACQUISITION_MODELS from the loaded module + acquisition_models: Dict[str, Any] = getattr(ref_module, "ACQUISITION_MODELS", None) + if not acquisition_models: + raise ValueError(f"No ACQUISITION_MODELS found in the module '{module_path}'.") - # Retrieve the specific model for the given scan type - reference_model = scan_models.get(scan_type) + # Retrieve the specific model for the given acquisition + reference_model = acquisition_models.get(acquisition) if not reference_model: - raise ValueError(f"Scan type '{scan_type}' is not defined in SCAN_MODELS.") + raise ValueError(f"Acquisition '{acquisition}' is not defined in ACQUISITION_MODELS.") return reference_model -def get_compliance_summary(reference_model: BaseModel, dicom_values: Dict[str, Any], acquisition: str = None, group: str = None, raise_errors: bool = False) -> List[Dict[str, Any]]: +def get_compliance_summary(reference_model: BaseModel, dicom_values: Dict[str, Any], acquisition: str = None, series: str = None, raise_errors: bool = False) -> List[Dict[str, Any]]: """Validate a DICOM file against the reference model.""" compliance_summary = [] @@ -209,7 +209,7 @@ def get_compliance_summary(reference_model: BaseModel, dicom_values: Dict[str, A actual = dicom_values.get(param, "N/A") if param != "Model-Level Error" else "N/A" compliance_summary.append({ "Acquisition": acquisition, - "Group": group, + "Series": series, "Parameter": param, "Value": actual, "Expected": expected diff --git a/dcm_check/generate_json_ref.py b/dcm_check/generate_json_ref.py index d829f10..b65e84a 100755 --- a/dcm_check/generate_json_ref.py +++ b/dcm_check/generate_json_ref.py @@ -90,7 +90,7 @@ def generate_json_ref(in_session_dir, out_json_ref, acquisition_fields, referenc ] if group_fields: groups.append({ - "name": f"Group {group_number}", # Assign default name + "name": f"Series {group_number}", # Assign default name "fields": group_fields, "ref": ref_path }) @@ -120,7 +120,7 @@ def generate_json_ref(in_session_dir, out_json_ref, acquisition_fields, referenc acquisitions[final_series_name] = { "ref": unique_row['dicom_path'], "fields": acquisition_fields_list, - "groups": groups + "series": groups } else: # No changing groups, so we store only the acquisition-level fields diff --git a/dcm_check/read_session.py b/dcm_check/read_session.py index b7e9102..259f8a5 100644 --- a/dcm_check/read_session.py +++ b/dcm_check/read_session.py @@ -70,16 +70,16 @@ def find_closest_matches(session_df, acquisitions_info): acq_name = acq_info["name"] acq_diff_score = calculate_total_difference(acq_info, row) - if not acq_info["groups"]: # Acquisitions without groups (assign group as None) + if not acq_info["series"]: # Acquisitions without groups (assign group as None) row_costs.append(acq_diff_score) row_assignments.append((i, acq_name, None, acq_diff_score)) else: - for group in acq_info["groups"]: - group_name = group["name"] - group_diff_score = calculate_total_difference(group, row) - total_score = acq_diff_score + group_diff_score + for series in acq_info["series"]: + series_name = series["name"] + series_diff_score = calculate_total_difference(series, row) + total_score = acq_diff_score + series_diff_score row_costs.append(total_score) - row_assignments.append((i, acq_name, group_name, total_score)) + row_assignments.append((i, acq_name, series_name, total_score)) cost_matrix.append(row_costs) possible_assignments.append(row_assignments) @@ -88,16 +88,16 @@ def find_closest_matches(session_df, acquisitions_info): row_indices, col_indices = linear_sum_assignment(cost_matrix) best_acquisitions = [None] * len(session_df) - best_groups = [None] * len(session_df) + best_series = [None] * len(session_df) best_scores = [None] * len(session_df) # Use NaN for unmatched scores for row_idx, col_idx in zip(row_indices, col_indices): - _, acq_name, group_name, score = possible_assignments[row_idx][col_idx] + _, acq_name, series_name, score = possible_assignments[row_idx][col_idx] best_acquisitions[row_idx] = acq_name - best_groups[row_idx] = group_name + best_series[row_idx] = series_name best_scores[row_idx] = score if acq_name else None # Only assign score if acquisition is matched - return best_acquisitions, best_groups, best_scores + return best_acquisitions, best_series, best_scores def read_session(reference_json, session_dir): @@ -110,21 +110,21 @@ def read_session(reference_json, session_dir): "fields": {field["field"]: field.get("value", field.get("contains")) for field in acquisition.get("fields", [])}, "tolerance": {field["field"]: field["tolerance"] for field in acquisition.get("fields", []) if "tolerance" in field}, "contains": {field["field"]: field["contains"] for field in acquisition.get("fields", []) if "contains" in field}, - "groups": [ + "series": [ { - "name": group["name"], - "fields": {field["field"]: field.get("value", field.get("contains")) for field in group.get("fields", [])}, - "tolerance": {field["field"]: field["tolerance"] for field in group.get("fields", []) if "tolerance" in field}, - "contains": {field["field"]: field["contains"] for field in group.get("fields", []) if "contains" in field} + "name": series["name"], + "fields": {field["field"]: field.get("value", field.get("contains")) for field in series.get("fields", [])}, + "tolerance": {field["field"]: field["tolerance"] for field in series.get("fields", []) if "tolerance" in field}, + "contains": {field["field"]: field["contains"] for field in series.get("fields", []) if "contains" in field} } - for group in acquisition.get("groups", []) + for series in acquisition.get("series", []) ] } for acq_name, acquisition in reference_data.get("acquisitions", {}).items() ] all_fields = {field for acq in acquisitions_info for field in acq["fields"].keys()} - all_fields.update({field for acq in acquisitions_info for group in acq["groups"] for field in group["fields"].keys()}) + all_fields.update({field for acq in acquisitions_info for series in acq["series"] for field in series["fields"].keys()}) session_data = [] for root, _, files in os.walk(session_dir): @@ -160,18 +160,18 @@ def read_session(reference_json, session_dir): .reset_index() ) - acquisitions, groups, scores = find_closest_matches(series_count_df, acquisitions_info) + acquisitions, series, scores = find_closest_matches(series_count_df, acquisitions_info) series_count_df["Acquisition"] = acquisitions - series_count_df["Group"] = groups + series_count_df["Series"] = series series_count_df["Match_Score"] = scores - series_count_df.sort_values(["Acquisition", "Group", "Match_Score"], inplace=True) + series_count_df.sort_values(["Acquisition", "Series", "Match_Score"], inplace=True) return series_count_df def main(): - parser = argparse.ArgumentParser(description="Map a DICOM session directory to a JSON reference file and print the closest acquisition and group matches.") + parser = argparse.ArgumentParser(description="Map a DICOM session directory to a JSON reference file and print the closest acquisition and series matches.") parser.add_argument("--ref", required=True, help="Path to the reference JSON file.") parser.add_argument("--session_dir", required=True, help="Directory containing DICOM files for the session.") args = parser.parse_args() diff --git a/dcm_check/session_check.py b/dcm_check/session_check.py index c70fed7..f689de3 100644 --- a/dcm_check/session_check.py +++ b/dcm_check/session_check.py @@ -18,49 +18,48 @@ def get_compliance_summaries_json(json_ref: str, in_session: str, output_json: s Returns: pd.DataFrame: Compliance summary DataFrame. """ - # Step 1: Identify matched acquisitions and groups in the session + # Step 1: Identify matched acquisitions and series in the session session_df = read_session(json_ref, in_session) grouped_compliance = {} - # Step 2: Iterate over each matched acquisition-group pair + # Step 2: Iterate over each matched acquisition-series pair for _, row in session_df.dropna(subset=["Acquisition"]).iterrows(): acquisition = row["Acquisition"] - group = row["Group"] + series = row["Series"] first_dicom_path = row["First_DICOM"] try: - # Step 3: Load the reference model for the matched acquisition and group - reference_model = load_ref_json(json_ref, acquisition, group) + # Step 3: Load the reference model for the matched acquisition and series + reference_model = load_ref_json(json_ref, acquisition, series) # Step 4: Load DICOM values for the first DICOM in the series dicom_values = load_dicom(first_dicom_path) # Step 5: Run compliance check and gather results - compliance_summary = get_compliance_summary(reference_model, dicom_values, acquisition, group) - print(compliance_summary) + compliance_summary = get_compliance_summary(reference_model, dicom_values, acquisition, series) # Organize results in nested format without "Model_Name" if acquisition not in grouped_compliance: - grouped_compliance[acquisition] = {"Acquisition": acquisition, "Groups": []} + grouped_compliance[acquisition] = {"Acquisition": acquisition, "Series": []} - if group: - group_entry = next((g for g in grouped_compliance[acquisition]["Groups"] if g["Name"] == group), None) - if not group_entry: - group_entry = {"Name": group, "Parameters": []} - grouped_compliance[acquisition]["Groups"].append(group_entry) + if series: + series_entry = next((g for g in grouped_compliance[acquisition]["Series"] if g["Name"] == series), None) + if not series_entry: + series_entry = {"Name": series, "Parameters": []} + grouped_compliance[acquisition]["Series"].append(series_entry) for entry in compliance_summary: entry.pop("Acquisition", None) - entry.pop("Group", None) - group_entry["Parameters"].extend(compliance_summary) + entry.pop("Series", None) + series_entry["Parameters"].extend(compliance_summary) else: - # If no group, add parameters directly under acquisition + # If no series, add parameters directly under acquisition for entry in compliance_summary: entry.pop("Acquisition", None) - entry.pop("Group", None) + entry.pop("Series", None) grouped_compliance[acquisition]["Parameters"] = compliance_summary except Exception as e: - print(f"Error processing acquisition '{acquisition}' and group '{group}': {e}") + print(f"Error processing acquisition '{acquisition}' and series '{series}': {e}") # Convert the grouped data to a list for JSON serialization grouped_compliance_list = list(grouped_compliance.values()) @@ -72,14 +71,14 @@ def get_compliance_summaries_json(json_ref: str, in_session: str, output_json: s # Convert the compliance summary to a DataFrame for tabulated output compliance_df = pd.json_normalize( grouped_compliance_list, - record_path=["Groups", "Parameters"], - meta=["Acquisition", ["Groups", "Name"]], + record_path=["Series", "Parameters"], + meta=["Acquisition", ["Series", "Name"]], errors="ignore" ) - # Rename "Groups.name" to "Group" and reorder columns - compliance_df.rename(columns={"Groups.Name": "Group"}, inplace=True) - compliance_df = compliance_df[["Acquisition", "Group", "Parameter", "Value", "Expected"]] + # Rename "Series.name" to "Series" and reorder columns + compliance_df.rename(columns={"Series.Name": "Series"}, inplace=True) + compliance_df = compliance_df[["Acquisition", "Series", "Parameter", "Value", "Expected"]] return compliance_df diff --git a/dcm_check/tests/ref_json.json b/dcm_check/tests/ref_json.json index 6863b9c..bbb7a10 100644 --- a/dcm_check/tests/ref_json.json +++ b/dcm_check/tests/ref_json.json @@ -6,9 +6,9 @@ {"field": "RepetitionTime", "value": 8.0}, {"field": "SeriesDescription", "value": "*T1*"} ], - "groups": [ + "series": [ { - "name": "Group 1", + "name": "Series 1", "fields": [ {"field": "ImageType", "contains": "M"} ] diff --git a/dcm_check/tests/ref_pydantic.py b/dcm_check/tests/ref_pydantic.py index 05bcc14..8889584 100644 --- a/dcm_check/tests/ref_pydantic.py +++ b/dcm_check/tests/ref_pydantic.py @@ -75,8 +75,8 @@ def validate_pixel_spacing(cls, v): raise ValueError("PixelSpacing must have exactly 2 values") return v -# Dictionary to map scan types to their respective config models (without instantiation) -SCAN_MODELS = { +# Dictionary to map acquisitions to their respective config models (without instantiation) +ACQUISITION_MODELS = { "T1_MPR": T1_MPR_Config, "T2w_SPC": T2w_SPC_Config, "Diff_1k": DiffusionConfig, diff --git a/dcm_check/tests/test_cli.py b/dcm_check/tests/test_cli.py index 5f83362..7d0f8a5 100644 --- a/dcm_check/tests/test_cli.py +++ b/dcm_check/tests/test_cli.py @@ -16,13 +16,13 @@ COMPLIANT_MESSAGE = "DICOM file is compliant with the reference model." SAVED_MESSAGE = "Compliance report saved to compliance_output.json" -def test_cli_json_reference_with_group(): +def test_cli_json_reference_with_series(): command = [ CLI_SCRIPT, "--ref", JSON_REF, "--type", "json", - "--scan", "T1", - "--group", "Group 1", + "--acquisition", "T1", + "--series", "Series 1", "--in", DICOM_FILE ] @@ -35,8 +35,8 @@ def test_cli_json_reference_with_group(): assert expected_output in result.stdout -def test_cli_json_reference_compliant_no_group(): - command = [CLI_SCRIPT, "--ref", JSON_REF, "--type", "json", "--scan", "T1", "--in", DICOM_FILE] +def test_cli_json_reference_compliant_no_series(): + command = [CLI_SCRIPT, "--ref", JSON_REF, "--type", "json", "--acquisition", "T1", "--in", DICOM_FILE] print(f"Running command: {' '.join(command)}") result = subprocess.run(command, capture_output=True, text=True) @@ -44,13 +44,13 @@ def test_cli_json_reference_compliant_no_group(): assert result.returncode == 0 assert COMPLIANT_MESSAGE in result.stdout -def test_cli_output_file_compliant_with_group(): +def test_cli_output_file_compliant_with_series(): command = [ CLI_SCRIPT, "--ref", JSON_REF, "--type", "json", - "--scan", "T1", - "--group", "Group 1", + "--acquisition", "T1", + "--series", "Series 1", "--in", DICOM_FILE, "--out", OUTPUT_JSON ] @@ -71,7 +71,7 @@ def test_cli_output_file_compliant_with_group(): os.remove(OUTPUT_JSON) -def test_cli_output_file_not_compliant_with_group(): +def test_cli_output_file_not_compliant_with_series(): # Modify the DICOM file to make it non-compliant dicom = pydicom.dcmread(DICOM_FILE) dicom.ImageType = ["ORIGINAL", "PRIMARY", "P", "N"] @@ -82,8 +82,8 @@ def test_cli_output_file_not_compliant_with_group(): CLI_SCRIPT, "--ref", JSON_REF, "--type", "json", - "--scan", "T1", - "--group", "Group 1", + "--acquisition", "T1", + "--series", "Series 1", "--in", non_compliant_dicom, "--out", OUTPUT_JSON ] @@ -93,7 +93,7 @@ def test_cli_output_file_not_compliant_with_group(): expected_output = tabulate(pd.DataFrame({ # as above "Acquisition": ["T1"], - "Group": ["Group 1"], + "Series": ["Series 1"], "Parameter": ["ImageType"], "Value": [['ORIGINAL', 'PRIMARY', 'P', 'N']], "Expected": ["Value error, ImageType must contain 'M'"] @@ -116,7 +116,7 @@ def test_cli_output_file_not_compliant_with_group(): os.remove(non_compliant_dicom) def test_cli_json_reference(): - command = [CLI_SCRIPT, "--ref", JSON_REF, "--type", "json", "--scan", "T1", "--in", DICOM_FILE] + command = [CLI_SCRIPT, "--ref", JSON_REF, "--type", "json", "--acquisition", "T1", "--in", DICOM_FILE] print(f"Running command: {' '.join(command)}") result = subprocess.run(command, capture_output=True, text=True) @@ -125,7 +125,7 @@ def test_cli_json_reference(): assert COMPLIANT_MESSAGE in result.stdout def test_cli_json_reference_inferred_type(): - command = [CLI_SCRIPT, "--ref", JSON_REF, "--scan", "T1", "--in", DICOM_FILE] + command = [CLI_SCRIPT, "--ref", JSON_REF, "--acquisition", "T1", "--in", DICOM_FILE] print(f"Running command: {' '.join(command)}") result = subprocess.run(command, capture_output=True, text=True) @@ -172,7 +172,7 @@ def test_cli_dicom_reference_non_compliant(): assert expected_output in result.stdout def test_cli_pydantic_reference(): - command = [CLI_SCRIPT, "--ref", PYDANTIC_REF, "--type", "pydantic", "--scan", "T1_MPR", "--in", DICOM_FILE] + command = [CLI_SCRIPT, "--ref", PYDANTIC_REF, "--type", "pydantic", "--acquisition", "T1_MPR", "--in", DICOM_FILE] print(f"Running command: {' '.join(command)}") result = subprocess.run(command, capture_output=True, text=True) @@ -187,7 +187,7 @@ def test_cli_pydantic_reference(): assert expected_output in result.stdout # Validate that output includes compliance info def test_cli_pydantic_reference_inferred_type(): - command = [CLI_SCRIPT, "--ref", PYDANTIC_REF, "--scan", "T1_MPR", "--in", DICOM_FILE] + command = [CLI_SCRIPT, "--ref", PYDANTIC_REF, "--acquisition", "T1_MPR", "--in", DICOM_FILE] print(f"Running command: {' '.join(command)}") result = subprocess.run(command, capture_output=True, text=True) @@ -201,11 +201,11 @@ def test_cli_pydantic_reference_inferred_type(): assert result.returncode == 0 assert expected_output in result.stdout # Validate that output includes compliance info -@pytest.mark.parametrize("ref_type,scan", [("json", "T1"), ("pydantic", "T1_MPR"), ("dicom", DICOM_FILE)]) -def test_cli_output_file_creation(ref_type, scan): +@pytest.mark.parametrize("ref_type,acquisition", [("json", "T1"), ("pydantic", "T1_MPR"), ("dicom", DICOM_FILE)]) +def test_cli_output_file_creation(ref_type, acquisition): ref_path = JSON_REF if ref_type == "json" else PYDANTIC_REF if ref_type == "pydantic" else DICOM_FILE subprocess.run( - [CLI_SCRIPT, "--ref", ref_path, "--type", ref_type, "--scan", scan, "--in", DICOM_FILE, "--out", OUTPUT_JSON], + [CLI_SCRIPT, "--ref", ref_path, "--type", ref_type, "--acquisition", acquisition, "--in", DICOM_FILE, "--out", OUTPUT_JSON], check=True ) assert os.path.isfile(OUTPUT_JSON) diff --git a/dcm_check/tests/test_ref_json.py b/dcm_check/tests/test_ref_json.py index 784d80d..931acad 100644 --- a/dcm_check/tests/test_ref_json.py +++ b/dcm_check/tests/test_ref_json.py @@ -33,9 +33,9 @@ def json_ref_no_dcm(tmp_path_factory): {"field": "RepetitionTime", "value": 8.0}, {"field": "SeriesDescription", "value": "*T1*"} ], - "groups": [ + "series": [ { - "name": "Group 1", + "name": "Series 1", "fields": [ {"field": "ImageType", "contains": "M"} ] @@ -63,9 +63,9 @@ def json_ref_with_dcm(tmp_path_factory, dicom_test_file): {"field": "RepetitionTime"}, {"field": "SeriesDescription"} ], - "groups": [ + "series": [ { - "name": "Group 1", + "name": "Series 1", "fields": [ {"field": "ImageType", "contains": "M"} ] @@ -83,7 +83,7 @@ def json_ref_with_dcm(tmp_path_factory, dicom_test_file): def test_load_ref_json(json_ref_no_dcm): """Test that JSON configuration can be loaded and generates a reference model.""" - reference_model = dcm_check.load_ref_json(json_path=json_ref_no_dcm, scan_type="T1", group_name="Group 1") + reference_model = dcm_check.load_ref_json(json_path=json_ref_no_dcm, acquisition="T1", series_name="Series 1") # Verify that the model was created correctly with exact and pattern matching fields assert reference_model is not None @@ -105,9 +105,9 @@ def test_load_ref_json(json_ref_no_dcm): assert reference_model.model_fields["SeriesDescription"].metadata[0].pattern == ".*T1.*" def test_json_compliance_within_tolerance_with_dcm(json_ref_with_dcm, dicom_test_file): - """Test compliance when values are within tolerance for JSON configuration with group.""" + """Test compliance when values are within tolerance for JSON configuration with series.""" t1_dicom_values = dcm_check.load_dicom(dicom_test_file) - reference_model = dcm_check.load_ref_json(json_path=json_ref_with_dcm, scan_type="T1", group_name="Group 1") + reference_model = dcm_check.load_ref_json(json_path=json_ref_with_dcm, acquisition="T1", series_name="Series 1") # Adjust EchoTime within tolerance (original value is 3.0, tolerance 0.1) t1_dicom_values["EchoTime"] = 3.05 @@ -117,9 +117,9 @@ def test_json_compliance_within_tolerance_with_dcm(json_ref_with_dcm, dicom_test assert len(compliance_summary) == 0 def test_json_compliance_outside_tolerance_with_dcm(json_ref_with_dcm, dicom_test_file): - """Test compliance when values exceed tolerance for JSON configuration with group.""" + """Test compliance when values exceed tolerance for JSON configuration with series.""" t1_dicom_values = dcm_check.load_dicom(dicom_test_file) - reference_model = dcm_check.load_ref_json(json_path=json_ref_with_dcm, scan_type="T1", group_name="Group 1") + reference_model = dcm_check.load_ref_json(json_path=json_ref_with_dcm, acquisition="T1", series_name="Series 1") # Adjust EchoTime beyond tolerance (original value is 3.0, tolerance 0.1) t1_dicom_values["EchoTime"] = 3.2 @@ -130,9 +130,9 @@ def test_json_compliance_outside_tolerance_with_dcm(json_ref_with_dcm, dicom_tes assert compliance_summary[0]["Value"] == 3.2 def test_json_compliance_pattern_match(json_ref_no_dcm, dicom_test_file): - """Test compliance with a pattern match for SeriesDescription within group.""" + """Test compliance with a pattern match for SeriesDescription within series.""" t1_dicom_values = dcm_check.load_dicom(dicom_test_file) - reference_model = dcm_check.load_ref_json(json_path=json_ref_no_dcm, scan_type="T1", group_name="Group 1") + reference_model = dcm_check.load_ref_json(json_path=json_ref_no_dcm, acquisition="T1", series_name="Series 1") # Change SeriesDescription to match pattern "*T1*" t1_dicom_values["SeriesDescription"] = "Another_T1_Sequence" diff --git a/dcm_check/tests/test_ref_pydantic.py b/dcm_check/tests/test_ref_pydantic.py index dced19b..44e10fe 100644 --- a/dcm_check/tests/test_ref_pydantic.py +++ b/dcm_check/tests/test_ref_pydantic.py @@ -1,6 +1,6 @@ import pytest import dcm_check.dcm_check as dcm_check -from dcm_check.tests.ref_pydantic import SCAN_MODELS # Import the SCAN_MODELS dictionary from the ref_pydantic module +from dcm_check.tests.ref_pydantic import ACQUISITION_MODELS # Import the ACQUISITION_MODELS dictionary from the ref_pydantic module from pydantic import ValidationError @pytest.fixture @@ -39,13 +39,13 @@ def test_load_ref_pydantic_models(t1_mpr_dicom_values): def test_load_ref_pydantic_invalid_module(): """Test loading a Pydantic model from an invalid module.""" module_path = "dcm_check/tests/ref_pydantic.py" - with pytest.raises(ValueError, match="Scan type 'FAKE' is not defined in SCAN_MODELS."): + with pytest.raises(ValueError, match="Acquisition 'FAKE' is not defined in ACQUISITION_MODELS."): dcm_check.load_ref_pydantic(module_path, "FAKE") -def test_load_ref_pydantic_no_scan_models(): - """Test loading a Pydantic model from a module without SCAN_MODELS.""" +def test_load_ref_pydantic_no_acquisition_models(): + """Test loading a Pydantic model from a module without ACQUISITION_MODELS.""" module_path = "dcm_check/tests/ref_pydantic_no_models.py" - with pytest.raises(ValueError, match="No SCAN_MODELS found in the module 'dcm_check/tests/ref_pydantic_no_models.py'."): + with pytest.raises(ValueError, match="No ACQUISITION_MODELS found in the module 'dcm_check/tests/ref_pydantic_no_models.py'."): dcm_check.load_ref_pydantic(module_path, "T1_MPR") def test_t1_mpr_compliance(t1_mpr_dicom_values): @@ -104,7 +104,7 @@ def test_t1_mpr_repetition_vs_echo_rule(t1_mpr_dicom_values): assert compliance_summary[0]["Value"] == "N/A" def test_diffusion_config_compliance(): - """Test DiffusionConfig compliance for a sample diffusion scan.""" + """Test DiffusionConfig compliance for a sample diffusion acquisition.""" module_path = "dcm_check/tests/ref_pydantic.py" diffusion_model = dcm_check.load_ref_pydantic(module_path, "Diff_1k") diff --git a/guidelines/hcp/hcp.py b/guidelines/hcp/hcp.py index e023e54..57685e5 100644 --- a/guidelines/hcp/hcp.py +++ b/guidelines/hcp/hcp.py @@ -70,8 +70,8 @@ def validate_pixel_spacing(cls, v): raise ValueError("PixelSpacing must have exactly 2 values") return v -# Dictionary to map scan types to their respective config models (without instantiation) -SCAN_MODELS = { +# Dictionary to map acquisitions to their respective config models (without instantiation) +ACQUISITION_MODELS = { "T1_MPR": T1_MPR_Config, "T2w_SPC": T2w_SPC_Config, "Diff_1k": DiffusionConfig,