From 8622513ab544dfeba3e68976c6af21b614dfa77f Mon Sep 17 00:00:00 2001 From: jungheejung Date: Thu, 30 May 2024 02:37:22 -0400 Subject: [PATCH 1/6] DEV: logger --- spacetop_prep/events/bidsify_dryrun_social.py | 126 ++++++++++-------- 1 file changed, 74 insertions(+), 52 deletions(-) diff --git a/spacetop_prep/events/bidsify_dryrun_social.py b/spacetop_prep/events/bidsify_dryrun_social.py index 9037b66..feca4ce 100644 --- a/spacetop_prep/events/bidsify_dryrun_social.py +++ b/spacetop_prep/events/bidsify_dryrun_social.py @@ -35,6 +35,21 @@ # Functions # ------------------------------------------------------------------------------ +# Configure the logger globally +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') + +def setup_logger(name, log_file, level=logging.INFO): + """Function to setup as many loggers as you want""" + handler = logging.FileHandler(log_file, mode='w') + handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) + + logger = logging.getLogger(name) + logger.setLevel(level) + logger.addHandler(handler) + + return logger + + def run_command(command): result = subprocess.run(command, shell=True, capture_output=True, text=True) if result.returncode != 0: @@ -143,7 +158,7 @@ def is_equivalent(val1, val2, tolerance=1): scans_list = sorted(glob.glob('sub-*/**/*scans*.tsv', recursive=True)) for scan_fname in scans_list: # NOTE: Step 1: Get the scans.tsv using datalad - #run_command(f"datalad get {scan_fname}") + run_command(f"datalad get {scan_fname}") print(f"datalad get {scan_fname} ") # Check if scans_file is not empty and unlock it using git annex if os.path.exists(scan_fname) and os.path.getsize(scan_fname) > 0: @@ -236,13 +251,14 @@ def is_equivalent(val1, val2, tolerance=1): # Step 2: Configure the logging system -logging.basicConfig(filename='task-cue_vicarious.log', # Log file path - filemode='w', # Append mode ('w' for overwrite) - level=logging.INFO, # Logging level - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Log message format +# logging.basicConfig(filename='task-cue_vicarious.log', # Log file path +# filemode='w', # Append mode ('w' for overwrite) +# level=logging.INFO, # Logging level +# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Log message format -# Step 3: Create a logger object -logger = logging.getLogger('cognitive') +# # Step 3: Create a logger object +# logger = logging.getLogger('cognitive') +cognitive_logger = setup_logger('cognitive', 'task-cue_cognitive.log') for cognitive_fpath in sorted(filtered_cognitive_flist): @@ -259,7 +275,7 @@ def is_equivalent(val1, val2, tolerance=1): expect = bids_beh.copy(); stim = bids_beh.copy(); outcome = bids_beh.copy(); - logger.info(f"\n\n{cognitive_fpath}") + cognitive_logger.info(f"\n\n{cognitive_fpath}") # 2. extract metadata from original behavioral file ________________________ @@ -269,7 +285,7 @@ def is_equivalent(val1, val2, tolerance=1): run_bids = re.search(r'run-\d+', cognitive_fname).group(0) runtype = re.search(r'run-\d+-(\w+?)_', cognitive_fname).group(1) - logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") + cognitive_logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") beh_savedir = join(bids_dir, sub_bids, ses_bids, 'func') beh_df = pd.read_csv(cognitive_fpath) trigger = beh_df['param_trigger_onset'][0] @@ -286,11 +302,11 @@ def is_equivalent(val1, val2, tolerance=1): raise FileNotFoundError("Trajectory preproc DOES NOT exist") except FileNotFoundError as e: - logger.warning(str(e)) + cognitive_logger.warning(str(e)) continue except Exception as e: # This catches any other exceptions that might occur - logger.error("An error occurred while processing the trajectory file: %s", str(e)) + cognitive_logger.error("An error occurred while processing the trajectory file: %s", str(e)) continue @@ -314,7 +330,7 @@ def is_equivalent(val1, val2, tolerance=1): if expect_overall_flag: discrepancy_indices = traj_df[traj_df['comparison_flag']].index for idx in discrepancy_indices: - logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") + cognitive_logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") beh_df['event04_outcome_fillna'] = beh_df['event04_actual_angle'].round(2) beh_df['event04_outcome_fillna'].fillna(traj_df['adjusted_outcomeangle_degrees'].round(2), inplace=True) @@ -325,7 +341,7 @@ def is_equivalent(val1, val2, tolerance=1): if outcome_overall_flag: discrepancy_indices = traj_df[traj_df['outcome_comparisonflag']].index for idx in discrepancy_indices: - logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") + cognitive_logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") # map it to new label @@ -344,7 +360,7 @@ def is_equivalent(val1, val2, tolerance=1): if (beh_df['event01_cue_type'] == beh_df['param_cue_type']).all(): cue['cue'] = beh_df['event01_cue_type'] else: - logger.error(f"4-1. cue parameter does not match") + cognitive_logger.error(f"4-1. cue parameter does not match") continue cue['stimulusintensity'] = "n/a" # cue['stim_file'] = beh_df["event01_cue_filename"] @@ -449,7 +465,7 @@ def is_equivalent(val1, val2, tolerance=1): if os.path.exists(beh_savedir) and os.path.isdir(beh_savedir): events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_{run_bids}_events.tsv"), sep='\t', index=False) else: - logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") + cognitive_logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") # extract bids info and save as new file @@ -464,23 +480,27 @@ def is_equivalent(val1, val2, tolerance=1): # %% # Create a custom logger _______________________________________________________ -logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) +# logger = logging.getLogger(__name__) +# logger.setLevel(logging.DEBUG) + +# # Create handlers +# info_handler = logging.FileHandler('task-cue_pain_info.log', mode='w') +# info_handler.setLevel(logging.INFO) -# Create handlers -info_handler = logging.FileHandler('task-cue_pain_info.log', mode='w') -info_handler.setLevel(logging.INFO) +# warning_handler = logging.FileHandler('task-cue_pain_warning.log', mode='w') +# warning_handler.setLevel(logging.WARNING) -warning_handler = logging.FileHandler('task-cue_pain_warning.log', mode='w') -warning_handler.setLevel(logging.WARNING) +# # Create formatters and add them to the handlers +# info_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +# warning_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +# info_handler.setFormatter(info_format) +# warning_handler.setFormatter(warning_format) +# logger.addHandler(info_handler) +# logger.addHandler(warning_handler) + +pain_info_logger = setup_logger('pain_info', 'task-cue_pain_info.log', level=logging.INFO) +pain_warning_logger = setup_logger('pain_warning', 'task-cue_pain_warning.log', level=logging.WARNING) -# Create formatters and add them to the handlers -info_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -warning_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -info_handler.setFormatter(info_format) -warning_handler.setFormatter(warning_format) -logger.addHandler(info_handler) -logger.addHandler(warning_handler) for pain_fpath in sorted(filtered_pain_flist): @@ -497,7 +517,7 @@ def is_equivalent(val1, val2, tolerance=1): expect = bids_beh.copy(); stim = bids_beh.copy(); outcome = bids_beh.copy(); - logger.info(f"\n\n{pain_fpath}") + pain_info_logger.info(f"\n\n{pain_fpath}") # 2. extract metadata from original behavioral file ________________________ @@ -507,7 +527,7 @@ def is_equivalent(val1, val2, tolerance=1): run_bids = re.search(r'run-\d+', pain_fname).group(0) runtype = re.search(r'run-\d+-(\w+?)_', pain_fname).group(1) - logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") + pain_info_logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") beh_savedir = join(bids_dir, sub_bids, ses_bids, 'func') beh_df = pd.read_csv(pain_fpath) trigger = beh_df['param_trigger_onset'][0] @@ -524,12 +544,12 @@ def is_equivalent(val1, val2, tolerance=1): raise FileNotFoundError("Trajectory preproc DOES NOT EXIST") except FileNotFoundError as e: - logger.warning(str(e)) - logger.warning("Trajectory preproc DOES NOT EXIST") + pain_warning_logger.warning(str(e)) + pain_warning_logger.warning("Trajectory preproc DOES NOT EXIST") continue except Exception as e: # This catches any other exceptions that might occur - logger.error("An error occurred while processing the trajectory file: %s", str(e)) + pain_warning_logger.error("An error occurred while processing the trajectory file: %s", str(e)) continue @@ -550,7 +570,7 @@ def is_equivalent(val1, val2, tolerance=1): if expect_overall_flag: discrepancy_indices = traj_df[traj_df['comparison_flag']].index for idx in discrepancy_indices: - logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") + pain_info_logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") beh_df['event04_outcome_fillna'] = beh_df['event04_actual_angle'].round(2) beh_df['event04_outcome_fillna'].fillna(traj_df['adjusted_outcomeangle_degrees'].round(2), inplace=True) @@ -561,7 +581,7 @@ def is_equivalent(val1, val2, tolerance=1): if outcome_overall_flag: discrepancy_indices = traj_df[traj_df['outcome_comparisonflag']].index for idx in discrepancy_indices: - logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") + pain_info_logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") @@ -580,7 +600,7 @@ def is_equivalent(val1, val2, tolerance=1): if (beh_df['event01_cue_type'] == beh_df['param_cue_type']).all(): cue['cue'] = beh_df['event01_cue_type'] else: - logger.error(f"4-1. cue parameter does not match") + pain_info_logger.error(f"4-1. cue parameter does not match") continue cue['stimulusintensity'] = "n/a" cue['stim_file'] = beh_df["event01_cue_filename"].apply( @@ -642,7 +662,7 @@ def is_equivalent(val1, val2, tolerance=1): for i, ttl_row in ttl_df.iterrows(): ttl_df.loc[i] = calculate_ttl_values(stimulus_times, ttl_row, beh_df.loc[i]) else: - logger.info("TTL dataframe non existent.") + pain_info_logger.info("TTL dataframe non existent.") beh_df['total_stimulus_time'] = beh_df['event03_stimulus_type'].apply(lambda x: sum(stimulus_times[x].values())) temperature_map = { @@ -717,7 +737,7 @@ def is_equivalent(val1, val2, tolerance=1): if os.path.exists(beh_savedir) and os.path.isdir(beh_savedir): events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_{run_bids}_events.tsv"), sep='\t', index=False) else: - logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") + pain_warning_logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") # extract bids info and save as new file @@ -728,13 +748,15 @@ def is_equivalent(val1, val2, tolerance=1): vicarious_flist = glob.glob(join(beh_inputdir,'sub-*', '**','task-social', '**', f'*{task_name}*.csv'), recursive=True) filtered_vicarious_flist = [file for file in vicarious_flist if "sub-0001" not in file] # 0. Configure the logging system -logging.basicConfig(filename='task-cue_vicarious.log', # Log file path - filemode='w', # Append mode ('w' for overwrite) - level=logging.INFO, # Logging level - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Log message format +# logging.basicConfig(filename='task-cue_vicarious.log', # Log file path +# filemode='w', # Append mode ('w' for overwrite) +# level=logging.INFO, # Logging level +# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Log message format # Step 3: Create a logger object -logger = logging.getLogger('vicarious') +# logger = logging.getLogger('vicarious') +vicarious_logger = setup_logger('vicarious', 'task-cue_vicarious.log') + for vicarious_fpath in sorted(filtered_vicarious_flist): # 1. create an empty dataframe to host new BIDS data _______________________ @@ -749,7 +771,7 @@ def is_equivalent(val1, val2, tolerance=1): expect = bids_beh.copy(); stim = bids_beh.copy(); outcome = bids_beh.copy(); - logger.info(f"\n\n{vicarious_fpath}") + vicarious_logger.info(f"\n\n{vicarious_fpath}") # 2. extract metadata from original behavioral file ________________________ vicarious_fname = os.path.basename(vicarious_fpath) sub_bids = re.search(r'sub-\d+', vicarious_fname).group(0) @@ -758,7 +780,7 @@ def is_equivalent(val1, val2, tolerance=1): runtype = re.search(r'run-\d+-(\w+?)_', vicarious_fname).group(1) - logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") + vicarious_logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") beh_savedir = join(bids_dir, sub_bids, ses_bids, 'func') beh_df = pd.read_csv(vicarious_fpath) trigger = beh_df['param_trigger_onset'][0] @@ -775,11 +797,11 @@ def is_equivalent(val1, val2, tolerance=1): raise FileNotFoundError("Trajectory preproc is empty.") except FileNotFoundError as e: - logger.warning(str(e)) + vicarious_logger.warning(str(e)) continue except Exception as e: # This catches any other exceptions that might occur - logger.error("An error occurred while processing the trajectory file: %s", str(e)) + vicarious_logger.error("An error occurred while processing the trajectory file: %s", str(e)) continue @@ -799,7 +821,7 @@ def is_equivalent(val1, val2, tolerance=1): if expect_overall_flag: discrepancy_indices = traj_df[traj_df['comparison_flag']].index for idx in discrepancy_indices: - logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") + vicarious_logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") beh_df['event04_outcome_fillna'] = beh_df['event04_actual_angle'].round(2) beh_df['event04_outcome_fillna'].fillna(traj_df['adjusted_outcomeangle_degrees'].round(2), inplace=True) @@ -810,7 +832,7 @@ def is_equivalent(val1, val2, tolerance=1): if outcome_overall_flag: discrepancy_indices = traj_df[traj_df['outcome_comparisonflag']].index for idx in discrepancy_indices: - logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") + vicarious_logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") # grab the intersection raise warning if dont match @@ -831,7 +853,7 @@ def is_equivalent(val1, val2, tolerance=1): if (beh_df['event01_cue_type'] == beh_df['param_cue_type']).all(): cue['cue'] = beh_df['event01_cue_type'] else: - logger.error(f"4-1. cue parameter does not match") + vicarious_logger.error(f"4-1. cue parameter does not match") continue cue['stimulusintensity'] = "n/a" cue['stim_file'] = beh_df["event01_cue_filename"].apply( @@ -931,6 +953,6 @@ def is_equivalent(val1, val2, tolerance=1): if os.path.exists(beh_savedir) and os.path.isdir(beh_savedir): events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_{run_bids}_events.tsv"), sep='\t', index=False) else: - logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") + vicarious_logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") # extract bids info and save as new file \ No newline at end of file From 4a1250f4acb547286bd9deb1361f1fc7914f793d Mon Sep 17 00:00:00 2001 From: jungheejung Date: Thu, 30 May 2024 02:46:57 -0400 Subject: [PATCH 2/6] BUG: wrong column name --- spacetop_prep/events/bidsify_dryrun_social.py | 28 +++++++++++++++---- 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/spacetop_prep/events/bidsify_dryrun_social.py b/spacetop_prep/events/bidsify_dryrun_social.py index feca4ce..62a38b8 100644 --- a/spacetop_prep/events/bidsify_dryrun_social.py +++ b/spacetop_prep/events/bidsify_dryrun_social.py @@ -314,12 +314,18 @@ def is_equivalent(val1, val2, tolerance=1): # 3-2. Calculate the angle in radians and then convert to degrees - traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + traj_df['adjusted_expectangle_degrees'] = calc_adjusted_angle_df( traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) - traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + traj_df['adjusted_outcomeangle_degrees'] = calc_adjusted_angle_df( traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + # traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) + # traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + + # 3-3. check if the calculated new degree matches the one in beh_df beh_df['event02_expect_fillna'] = beh_df['event02_expect_angle'].round(2) @@ -555,12 +561,17 @@ def is_equivalent(val1, val2, tolerance=1): # 3-1. calculate degree based on x, y coordinate # 3-2. Calculate the angle in radians and then convert to degrees - traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) + # traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + traj_df['adjusted_expectangle_degrees'] = calc_adjusted_angle_df( traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) - traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + traj_df['adjusted_outcomeangle_degrees'] = calc_adjusted_angle_df( traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + # 3-3. check if the calculated new degree matches the one in beh_df beh_df['event02_expect_fillna'] = beh_df['event02_expect_angle'].round(2) beh_df['event02_expect_fillna'].fillna(traj_df['adjusted_expectangle_degrees'].round(2), inplace=True) @@ -806,12 +817,17 @@ def is_equivalent(val1, val2, tolerance=1): # 3-1. calculate degree based on x, y coordinate - traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) + # traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + traj_df['adjusted_expectangle_degrees'] = calc_adjusted_angle_df( traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) - traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + traj_df['adjusted_outcomeangle_degrees'] = calc_adjusted_angle_df( traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + # 3-3. check if the calculated new degree matches the one in beh_df beh_df['event02_expect_fillna'] = beh_df['event02_expect_angle'].round(2) beh_df['event02_expect_fillna'].fillna(traj_df['adjusted_expectangle_degrees'].round(2), inplace=True) From d7fe18de6bcb31c92057d504c3cbd1f06a515b11 Mon Sep 17 00:00:00 2001 From: jungheejung Date: Thu, 30 May 2024 13:25:29 -0400 Subject: [PATCH 3/6] BUG: wrong column name --- spacetop_prep/events/bidsify_dryrun_social.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/spacetop_prep/events/bidsify_dryrun_social.py b/spacetop_prep/events/bidsify_dryrun_social.py index 62a38b8..39c06df 100644 --- a/spacetop_prep/events/bidsify_dryrun_social.py +++ b/spacetop_prep/events/bidsify_dryrun_social.py @@ -706,9 +706,9 @@ def is_equivalent(val1, val2, tolerance=1): stim['pain_onset_ttl4'] = (ttl_df['TTL4']).round(2) else: stim['pain_onset_ttl1'] = (beh_df['event03_stimulus_displayonset'] - trigger).round(2) - stim['pain_onset_ttl2'] = (stim['onset_ttl1'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['rampup'])).round(2) - stim['pain_onset_ttl3'] = (stim['onset_ttl2'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['plateau'])).round(2) - stim['pain_onset_ttl4'] = (stim['onset_ttl3'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['rampdown'])).round(2) + stim['pain_onset_ttl2'] = (stim['pain_onset_ttl1'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['rampup'])).round(2) + stim['pain_onset_ttl3'] = (stim['pain_onset_ttl2'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['plateau'])).round(2) + stim['pain_onset_ttl4'] = (stim['pain_onset_ttl3'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['rampdown'])).round(2) stim['pain_stimulus_delivery_success'] = beh_df['event03_stimulus_P_trigger'].apply(lambda x: "success" if x == "Command Recieved: TRIGGER_AND_Response: RESULT_OK" else "fail") stim['cognitive_correct_response'] = "n/a" stim['cognitive_participant_response'] = "n/a" From 1c1a150250ba62051dee85eba6be47d7cb9dd4af Mon Sep 17 00:00:00 2001 From: jungheejung Date: Thu, 30 May 2024 14:19:17 -0400 Subject: [PATCH 4/6] DEV: bidsify task-social --- spacetop_prep/events/bidsify_social.py | 150 +++++++++++++------------ 1 file changed, 81 insertions(+), 69 deletions(-) diff --git a/spacetop_prep/events/bidsify_social.py b/spacetop_prep/events/bidsify_social.py index 2d179e4..752fdd7 100644 --- a/spacetop_prep/events/bidsify_social.py +++ b/spacetop_prep/events/bidsify_social.py @@ -35,6 +35,21 @@ # Functions # ------------------------------------------------------------------------------ +# Configure the logger globally +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') + +def setup_logger(name, log_file, level=logging.INFO): + """Function to setup as many loggers as you want""" + handler = logging.FileHandler(log_file, mode='w') + handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) + + logger = logging.getLogger(name) + logger.setLevel(level) + logger.addHandler(handler) + + return logger + + def run_command(command): result = subprocess.run(command, shell=True, capture_output=True, text=True) if result.returncode != 0: @@ -144,9 +159,11 @@ def is_equivalent(val1, val2, tolerance=1): for scan_fname in scans_list: # NOTE: Step 1: Get the scans.tsv using datalad run_command(f"datalad get {scan_fname}") + print(f"datalad get {scan_fname} ") # Check if scans_file is not empty and unlock it using git annex if os.path.exists(scan_fname) and os.path.getsize(scan_fname) > 0: run_command(f"git annex unlock {scan_fname}") + print(f"unlock {scan_fname}") scans_df = pd.read_csv(scan_fname, sep='\t') @@ -173,13 +190,14 @@ def is_equivalent(val1, val2, tolerance=1): if orphan_files: for orphan_file in orphan_files: print(f"Removing {orphan_file}") - run_command(f"git rm {orphan_file}") + #run_command(f"git rm {orphan_file}") scans_df = scans_df[scans_df['filename'] != os.path.basename(orphan_file)] # Save the updated DataFrame back to the scans_file scans_df.to_csv(scan_fname, index=False) # Add the updated scans_file back to git annex + print(f"made edits to events file and deleted nifti files if not harmonized: {scan_fname}") run_command(f"git annex add {scan_fname}") run_command(f"git commit -m 'DOC: update scans tsv with task-social runtype metadata and remove orphan NIfTI files'") @@ -190,7 +208,9 @@ def is_equivalent(val1, val2, tolerance=1): for event_fname in cue_event_files: event_fpath = os.path.join(cue_events_dir, event_fname) run_command(f"git rm {event_fpath}") + print(f"remove all the task-cue events files {event_fpath}") run_command(f"git commit -m 'DEP: delete non-bids compliant events file'") + print("run_command(git commit -m DEP: delete non-bids compliant events file") @@ -230,14 +250,7 @@ def is_equivalent(val1, val2, tolerance=1): filtered_cognitive_flist = [file for file in cognitive_flist if "sub-0001" not in file] -# Step 2: Configure the logging system -logging.basicConfig(filename='task-cue_vicarious.log', # Log file path - filemode='w', # Append mode ('w' for overwrite) - level=logging.INFO, # Logging level - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Log message format - -# Step 3: Create a logger object -logger = logging.getLogger('cognitive') +cognitive_logger = setup_logger('cognitive', 'task-cue_cognitive.log') for cognitive_fpath in sorted(filtered_cognitive_flist): @@ -254,7 +267,7 @@ def is_equivalent(val1, val2, tolerance=1): expect = bids_beh.copy(); stim = bids_beh.copy(); outcome = bids_beh.copy(); - logger.info(f"\n\n{cognitive_fpath}") + cognitive_logger.info(f"\n\n{cognitive_fpath}") # 2. extract metadata from original behavioral file ________________________ @@ -264,7 +277,7 @@ def is_equivalent(val1, val2, tolerance=1): run_bids = re.search(r'run-\d+', cognitive_fname).group(0) runtype = re.search(r'run-\d+-(\w+?)_', cognitive_fname).group(1) - logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") + cognitive_logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") beh_savedir = join(bids_dir, sub_bids, ses_bids, 'func') beh_df = pd.read_csv(cognitive_fpath) trigger = beh_df['param_trigger_onset'][0] @@ -281,11 +294,11 @@ def is_equivalent(val1, val2, tolerance=1): raise FileNotFoundError("Trajectory preproc DOES NOT exist") except FileNotFoundError as e: - logger.warning(str(e)) + cognitive_logger.warning(str(e)) continue except Exception as e: # This catches any other exceptions that might occur - logger.error("An error occurred while processing the trajectory file: %s", str(e)) + cognitive_logger.error("An error occurred while processing the trajectory file: %s", str(e)) continue @@ -293,12 +306,18 @@ def is_equivalent(val1, val2, tolerance=1): # 3-2. Calculate the angle in radians and then convert to degrees - traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + traj_df['adjusted_expectangle_degrees'] = calc_adjusted_angle_df( traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) - traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + traj_df['adjusted_outcomeangle_degrees'] = calc_adjusted_angle_df( traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + # traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) + # traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + + # 3-3. check if the calculated new degree matches the one in beh_df beh_df['event02_expect_fillna'] = beh_df['event02_expect_angle'].round(2) @@ -309,7 +328,7 @@ def is_equivalent(val1, val2, tolerance=1): if expect_overall_flag: discrepancy_indices = traj_df[traj_df['comparison_flag']].index for idx in discrepancy_indices: - logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") + cognitive_logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") beh_df['event04_outcome_fillna'] = beh_df['event04_actual_angle'].round(2) beh_df['event04_outcome_fillna'].fillna(traj_df['adjusted_outcomeangle_degrees'].round(2), inplace=True) @@ -320,7 +339,7 @@ def is_equivalent(val1, val2, tolerance=1): if outcome_overall_flag: discrepancy_indices = traj_df[traj_df['outcome_comparisonflag']].index for idx in discrepancy_indices: - logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") + cognitive_logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") # map it to new label @@ -339,7 +358,7 @@ def is_equivalent(val1, val2, tolerance=1): if (beh_df['event01_cue_type'] == beh_df['param_cue_type']).all(): cue['cue'] = beh_df['event01_cue_type'] else: - logger.error(f"4-1. cue parameter does not match") + cognitive_logger.error(f"4-1. cue parameter does not match") continue cue['stimulusintensity'] = "n/a" # cue['stim_file'] = beh_df["event01_cue_filename"] @@ -442,9 +461,9 @@ def is_equivalent(val1, val2, tolerance=1): events_sorted = events.sort_values(by='onset') events_sorted.fillna('n/a', inplace=True) if os.path.exists(beh_savedir) and os.path.isdir(beh_savedir): - events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_{run_bids}_events.tsv"), sep='\t', index=False) + events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_acq-mb8_{run_bids}_events.tsv"), sep='\t', index=False) else: - logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") + cognitive_logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") # extract bids info and save as new file @@ -459,23 +478,11 @@ def is_equivalent(val1, val2, tolerance=1): # %% # Create a custom logger _______________________________________________________ -logger = logging.getLogger(__name__) -logger.setLevel(logging.DEBUG) -# Create handlers -info_handler = logging.FileHandler('task-cue_pain_info.log', mode='w') -info_handler.setLevel(logging.INFO) -warning_handler = logging.FileHandler('task-cue_pain_warning.log', mode='w') -warning_handler.setLevel(logging.WARNING) +pain_info_logger = setup_logger('pain_info', 'task-cue_pain_info.log', level=logging.INFO) +pain_warning_logger = setup_logger('pain_warning', 'task-cue_pain_warning.log', level=logging.WARNING) -# Create formatters and add them to the handlers -info_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -warning_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') -info_handler.setFormatter(info_format) -warning_handler.setFormatter(warning_format) -logger.addHandler(info_handler) -logger.addHandler(warning_handler) for pain_fpath in sorted(filtered_pain_flist): @@ -492,7 +499,7 @@ def is_equivalent(val1, val2, tolerance=1): expect = bids_beh.copy(); stim = bids_beh.copy(); outcome = bids_beh.copy(); - logger.info(f"\n\n{pain_fpath}") + pain_info_logger.info(f"\n\n{pain_fpath}") # 2. extract metadata from original behavioral file ________________________ @@ -502,7 +509,7 @@ def is_equivalent(val1, val2, tolerance=1): run_bids = re.search(r'run-\d+', pain_fname).group(0) runtype = re.search(r'run-\d+-(\w+?)_', pain_fname).group(1) - logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") + pain_info_logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") beh_savedir = join(bids_dir, sub_bids, ses_bids, 'func') beh_df = pd.read_csv(pain_fpath) trigger = beh_df['param_trigger_onset'][0] @@ -519,23 +526,28 @@ def is_equivalent(val1, val2, tolerance=1): raise FileNotFoundError("Trajectory preproc DOES NOT EXIST") except FileNotFoundError as e: - logger.warning(str(e)) - logger.warning("Trajectory preproc DOES NOT EXIST") + pain_warning_logger.warning(str(e)) + pain_warning_logger.warning("Trajectory preproc DOES NOT EXIST") continue except Exception as e: # This catches any other exceptions that might occur - logger.error("An error occurred while processing the trajectory file: %s", str(e)) + pain_warning_logger.error("An error occurred while processing the trajectory file: %s", str(e)) continue # 3-1. calculate degree based on x, y coordinate # 3-2. Calculate the angle in radians and then convert to degrees - traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) + # traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + traj_df['adjusted_expectangle_degrees'] = calc_adjusted_angle_df( traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) - traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + traj_df['adjusted_outcomeangle_degrees'] = calc_adjusted_angle_df( traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + # 3-3. check if the calculated new degree matches the one in beh_df beh_df['event02_expect_fillna'] = beh_df['event02_expect_angle'].round(2) beh_df['event02_expect_fillna'].fillna(traj_df['adjusted_expectangle_degrees'].round(2), inplace=True) @@ -545,7 +557,7 @@ def is_equivalent(val1, val2, tolerance=1): if expect_overall_flag: discrepancy_indices = traj_df[traj_df['comparison_flag']].index for idx in discrepancy_indices: - logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") + pain_info_logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") beh_df['event04_outcome_fillna'] = beh_df['event04_actual_angle'].round(2) beh_df['event04_outcome_fillna'].fillna(traj_df['adjusted_outcomeangle_degrees'].round(2), inplace=True) @@ -556,7 +568,7 @@ def is_equivalent(val1, val2, tolerance=1): if outcome_overall_flag: discrepancy_indices = traj_df[traj_df['outcome_comparisonflag']].index for idx in discrepancy_indices: - logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") + pain_info_logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") @@ -575,7 +587,7 @@ def is_equivalent(val1, val2, tolerance=1): if (beh_df['event01_cue_type'] == beh_df['param_cue_type']).all(): cue['cue'] = beh_df['event01_cue_type'] else: - logger.error(f"4-1. cue parameter does not match") + pain_info_logger.error(f"4-1. cue parameter does not match") continue cue['stimulusintensity'] = "n/a" cue['stim_file'] = beh_df["event01_cue_filename"].apply( @@ -637,7 +649,7 @@ def is_equivalent(val1, val2, tolerance=1): for i, ttl_row in ttl_df.iterrows(): ttl_df.loc[i] = calculate_ttl_values(stimulus_times, ttl_row, beh_df.loc[i]) else: - logger.info("TTL dataframe non existent.") + pain_info_logger.info("TTL dataframe non existent.") beh_df['total_stimulus_time'] = beh_df['event03_stimulus_type'].apply(lambda x: sum(stimulus_times[x].values())) temperature_map = { @@ -670,9 +682,9 @@ def is_equivalent(val1, val2, tolerance=1): stim['pain_onset_ttl4'] = (ttl_df['TTL4']).round(2) else: stim['pain_onset_ttl1'] = (beh_df['event03_stimulus_displayonset'] - trigger).round(2) - stim['pain_onset_ttl2'] = (stim['onset_ttl1'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['rampup'])).round(2) - stim['pain_onset_ttl3'] = (stim['onset_ttl2'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['plateau'])).round(2) - stim['pain_onset_ttl4'] = (stim['onset_ttl3'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['rampdown'])).round(2) + stim['pain_onset_ttl2'] = (stim['pain_onset_ttl1'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['rampup'])).round(2) + stim['pain_onset_ttl3'] = (stim['pain_onset_ttl2'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['plateau'])).round(2) + stim['pain_onset_ttl4'] = (stim['pain_onset_ttl3'] + beh_df['event03_stimulus_type'].apply(lambda x: stimulus_times[x]['rampdown'])).round(2) stim['pain_stimulus_delivery_success'] = beh_df['event03_stimulus_P_trigger'].apply(lambda x: "success" if x == "Command Recieved: TRIGGER_AND_Response: RESULT_OK" else "fail") stim['cognitive_correct_response'] = "n/a" stim['cognitive_participant_response'] = "n/a" @@ -710,9 +722,9 @@ def is_equivalent(val1, val2, tolerance=1): events_sorted = events.sort_values(by='onset') events_sorted.fillna('n/a', inplace=True) if os.path.exists(beh_savedir) and os.path.isdir(beh_savedir): - events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_{run_bids}_events.tsv"), sep='\t', index=False) + events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_acq-mb8_{run_bids}_events.tsv"), sep='\t', index=False) else: - logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") + pain_warning_logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") # extract bids info and save as new file @@ -723,13 +735,9 @@ def is_equivalent(val1, val2, tolerance=1): vicarious_flist = glob.glob(join(beh_inputdir,'sub-*', '**','task-social', '**', f'*{task_name}*.csv'), recursive=True) filtered_vicarious_flist = [file for file in vicarious_flist if "sub-0001" not in file] # 0. Configure the logging system -logging.basicConfig(filename='task-cue_vicarious.log', # Log file path - filemode='w', # Append mode ('w' for overwrite) - level=logging.INFO, # Logging level - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Log message format -# Step 3: Create a logger object -logger = logging.getLogger('vicarious') +vicarious_logger = setup_logger('vicarious', 'task-cue_vicarious.log') + for vicarious_fpath in sorted(filtered_vicarious_flist): # 1. create an empty dataframe to host new BIDS data _______________________ @@ -744,7 +752,7 @@ def is_equivalent(val1, val2, tolerance=1): expect = bids_beh.copy(); stim = bids_beh.copy(); outcome = bids_beh.copy(); - logger.info(f"\n\n{vicarious_fpath}") + vicarious_logger.info(f"\n\n{vicarious_fpath}") # 2. extract metadata from original behavioral file ________________________ vicarious_fname = os.path.basename(vicarious_fpath) sub_bids = re.search(r'sub-\d+', vicarious_fname).group(0) @@ -753,7 +761,7 @@ def is_equivalent(val1, val2, tolerance=1): runtype = re.search(r'run-\d+-(\w+?)_', vicarious_fname).group(1) - logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") + vicarious_logger.info(f"_______ {sub_bids} {ses_bids} {run_bids} {runtype} _______") beh_savedir = join(bids_dir, sub_bids, ses_bids, 'func') beh_df = pd.read_csv(vicarious_fpath) trigger = beh_df['param_trigger_onset'][0] @@ -770,21 +778,26 @@ def is_equivalent(val1, val2, tolerance=1): raise FileNotFoundError("Trajectory preproc is empty.") except FileNotFoundError as e: - logger.warning(str(e)) + vicarious_logger.warning(str(e)) continue except Exception as e: # This catches any other exceptions that might occur - logger.error("An error occurred while processing the trajectory file: %s", str(e)) + vicarious_logger.error("An error occurred while processing the trajectory file: %s", str(e)) continue # 3-1. calculate degree based on x, y coordinate - traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df['expectangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) + # traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + # traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + traj_df['adjusted_expectangle_degrees'] = calc_adjusted_angle_df( traj_df, 'expectrating_end_x', 'expectrating_end_y', trajectory_x, trajectory_y) - traj_df['outcomeangle_degrees'] = calc_adjusted_angle_df( + traj_df['adjusted_outcomeangle_degrees'] = calc_adjusted_angle_df( traj_df, 'outcomerating_end_x', 'outcomerating_end_y', trajectory_x, trajectory_y) + # 3-3. check if the calculated new degree matches the one in beh_df beh_df['event02_expect_fillna'] = beh_df['event02_expect_angle'].round(2) beh_df['event02_expect_fillna'].fillna(traj_df['adjusted_expectangle_degrees'].round(2), inplace=True) @@ -794,7 +807,7 @@ def is_equivalent(val1, val2, tolerance=1): if expect_overall_flag: discrepancy_indices = traj_df[traj_df['comparison_flag']].index for idx in discrepancy_indices: - logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") + vicarious_logger.info(f"\tExpect Rating {idx}: (traj_df): {traj_df.loc[idx]['adjusted_expectangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event02_expect_fillna']}") beh_df['event04_outcome_fillna'] = beh_df['event04_actual_angle'].round(2) beh_df['event04_outcome_fillna'].fillna(traj_df['adjusted_outcomeangle_degrees'].round(2), inplace=True) @@ -805,7 +818,7 @@ def is_equivalent(val1, val2, tolerance=1): if outcome_overall_flag: discrepancy_indices = traj_df[traj_df['outcome_comparisonflag']].index for idx in discrepancy_indices: - logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") + vicarious_logger.info(f"\tOutcome Rating {idx} (traj_df): {traj_df.loc[idx]['adjusted_outcomeangle_degrees'].round(2)} \t(beh_df): {beh_df.loc[idx]['event04_outcome_fillna']}") # grab the intersection raise warning if dont match @@ -826,7 +839,7 @@ def is_equivalent(val1, val2, tolerance=1): if (beh_df['event01_cue_type'] == beh_df['param_cue_type']).all(): cue['cue'] = beh_df['event01_cue_type'] else: - logger.error(f"4-1. cue parameter does not match") + vicarious_logger.error(f"4-1. cue parameter does not match") continue cue['stimulusintensity'] = "n/a" cue['stim_file'] = beh_df["event01_cue_filename"].apply( @@ -884,7 +897,7 @@ def is_equivalent(val1, val2, tolerance=1): stim['rating_mousedur'] = "n/a" stim['cue'] = beh_df['event01_cue_type'] # if same as param_cue_type stim['stimulusintensity'] = beh_df['event03_stimulus_type'] - stim['stim_file'] = '/task-social/cue/runtype-{task_name}/' + beh_df['event03_stimulus_V_filename'] + stim['stim_file'] = f'/task-social/cue/runtype-{task_name}/' + beh_df['event03_stimulus_V_filename'] stim['pain_onset_ttl1'] = "n/a" stim['pain_onset_ttl2'] = "n/a" stim['pain_onset_ttl3'] = "n/a" @@ -919,13 +932,12 @@ def is_equivalent(val1, val2, tolerance=1): outcome['cognitive_correct_response'] = "n/a" outcome['cognitive_participant_response'] = "n/a" outcome['cognitive_response_accuracy'] = "n/a" - events = pd.concat([cue, expect, stim, outcome], ignore_index=True) events_sorted = events.sort_values(by='onset') events_sorted.fillna('n/a', inplace=True) if os.path.exists(beh_savedir) and os.path.isdir(beh_savedir): - events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_{run_bids}_events.tsv"), sep='\t', index=False) + events_sorted.to_csv(join(beh_savedir, f"{sub_bids}_{ses_bids}_task-social_acq-mb8_{run_bids}_events.tsv"), sep='\t', index=False) else: - logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") + vicarious_logger.critical(f"WARNING: The directory {beh_savedir} does not exist.") # extract bids info and save as new file \ No newline at end of file From d090ca7f8e05ae54096008c26314c3370dc41362 Mon Sep 17 00:00:00 2001 From: jungheejung Date: Thu, 30 May 2024 14:45:00 -0400 Subject: [PATCH 5/6] BUG: stimulus duration --- spacetop_prep/events/bidsify_social.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/spacetop_prep/events/bidsify_social.py b/spacetop_prep/events/bidsify_social.py index 752fdd7..5e73733 100644 --- a/spacetop_prep/events/bidsify_social.py +++ b/spacetop_prep/events/bidsify_social.py @@ -406,7 +406,7 @@ def is_equivalent(val1, val2, tolerance=1): # 6. stim __________________________________________________________________ stim['onset'] = (beh_df['event03_stimulus_displayonset'] - trigger).round(2) - stim['duration'] = (beh_df['ISI03_onset'] - beh_df['event03_stimulus_displayonset']).round(2) + stim['duration'] = 5 #(beh_df['ISI03_onset'] - beh_df['event03_stimulus_displayonset']).round(2) stim['run_type'] = task_name stim['trial_type'] = 'stimulus' stim['trial_index'] = beh_df.index +1 @@ -653,16 +653,17 @@ def is_equivalent(val1, val2, tolerance=1): beh_df['total_stimulus_time'] = beh_df['event03_stimulus_type'].apply(lambda x: sum(stimulus_times[x].values())) temperature_map = { - 'high_stim': '50c', - 'med_stim': '49c', - 'low_stim': '48c' + 'high_stim': '50_celsius', + 'med_stim': '49_celsius', + 'low_stim': '48_celsius' } stim['onset'] = (beh_df['event03_stimulus_displayonset'] - trigger).round(2) if ttl_glob: stim['duration'] = (ttl_df['TTL4'] - ttl_df['TTL1']).round(2) else: - stim['duration'] = ((beh_df['event03_stimulus_displayonset']-trigger) + beh_df['total_stimulus_time']).round(2) + stim['duration'] = ((beh_df['event03_stimulus_displayonset']-trigger) + beh_df['total_stimulus_time']).round(2) - (beh_df['event03_stimulus_displayonset'] - trigger).round(2) + stim['run_type'] = task_name stim['trial_type'] = 'stimulus' stim['trial_index'] = beh_df.index +1 @@ -885,7 +886,7 @@ def is_equivalent(val1, val2, tolerance=1): # 6. stim __________________________________________________________________ stim['onset'] = (beh_df['event03_stimulus_displayonset'] - trigger).round(2) - stim['duration'] = (beh_df['ISI03_onset'] - beh_df['event03_stimulus_displayonset']).round(2) + stim['duration'] = 5 #(beh_df['ISI03_onset'] - beh_df['event03_stimulus_displayonset']).round(2) stim['run_type'] = task_name stim['trial_type'] = 'stimulus' stim['trial_index'] = beh_df.index +1 From 993e1bed4a3abc4c626b1b56165cb5804dd4ec39 Mon Sep 17 00:00:00 2001 From: jungheejung Date: Thu, 30 May 2024 15:21:12 -0400 Subject: [PATCH 6/6] BUG: tab separate --- spacetop_prep/events/bidsify_social.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacetop_prep/events/bidsify_social.py b/spacetop_prep/events/bidsify_social.py index 5e73733..88924d9 100644 --- a/spacetop_prep/events/bidsify_social.py +++ b/spacetop_prep/events/bidsify_social.py @@ -194,7 +194,7 @@ def is_equivalent(val1, val2, tolerance=1): scans_df = scans_df[scans_df['filename'] != os.path.basename(orphan_file)] # Save the updated DataFrame back to the scans_file - scans_df.to_csv(scan_fname, index=False) + scans_df.to_csv(scan_fname, index=False, sep='\t') # Add the updated scans_file back to git annex print(f"made edits to events file and deleted nifti files if not harmonized: {scan_fname}")