diff --git a/.gitignore b/.gitignore index 62a5837..6eb8900 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ *.DS_Store data/ -Geometry/ \ No newline at end of file +Geometry/ +outputs/ \ No newline at end of file diff --git a/AddBiomechanicsDataset.py b/AddBiomechanicsDataset.py index c82ab28..5376409 100644 --- a/AddBiomechanicsDataset.py +++ b/AddBiomechanicsDataset.py @@ -20,7 +20,7 @@ class OutputDataKeys: class AddBiomechanicsDataset(Dataset): - folder_path: str + data_path: str window_size: int stride: int device: torch.device @@ -29,8 +29,8 @@ class AddBiomechanicsDataset(Dataset): input_dof_indices: List[int] windows: List[Tuple[nimble.biomechanics.SubjectOnDisk, int, int, str]] - def __init__(self, folder_path: str, window_size: int, stride: int, input_dofs: List[str], device: torch.device = torch.device('cpu')): - self.folder_path = folder_path + def __init__(self, data_path: str, window_size: int, stride: int, input_dofs: List[str], device: torch.device = torch.device('cpu')): + self.data_path = data_path self.window_size = window_size self.stride = stride self.input_dofs = input_dofs @@ -40,36 +40,42 @@ def __init__(self, folder_path: str, window_size: int, stride: int, input_dofs: # Walk the folder path, and check for any with the ".bin" extension (indicating that they are AddBiomechanics binary data files) num_skipped = 0 - for root, dirs, files in os.walk(folder_path): - for file in files: - if file.endswith(".bin"): - # Create a subject object for each file. This will load just the header from this file, and keep that around in memory - subject_path = os.path.join(root, file) - subject = nimble.biomechanics.SubjectOnDisk( - subject_path) - # Add the subject to the list of subjects - self.subjects.append(subject) - # Also, count how many random windows we could select from this subject - for trial in range(subject.getNumTrials()): - probably_missing: List[bool] = subject.getProbablyMissingGRF(trial) - - trial_length = subject.getTrialLength(trial) - for window_start in range(max(trial_length - (window_size * stride) + 1, 0)): - # Check if any of the frames in this window are probably missing GRF data - # If so, skip this window - skip = False - for i in range(window_start, window_start + window_size): - if probably_missing[i]: - skip = True - break - if not skip: - self.windows.append( - (subject, trial, window_start, subject_path)) - else: - num_skipped += 1 - - print('Num windows: ' + str(len(self.windows))) - print('Num skipped due to missing GRF: ' + str(num_skipped)) + subject_paths = [] + if os.path.isdir(data_path): + for root, dirs, files in os.walk(data_path): + for file in files: + if file.endswith(".bin"): + subject_paths.append(os.path.join(root, file)) + else: + assert data_path.endswith(".bin") + subject_paths.append(data_path) + + for subject_path in subject_paths: + # Create a subject object for each file. This will load just the header from this file, and keep that around in memory + subject = nimble.biomechanics.SubjectOnDisk( + subject_path) + # Add the subject to the list of subjects + self.subjects.append(subject) + # Also, count how many random windows we could select from this subject + for trial in range(subject.getNumTrials()): + probably_missing: List[bool] = subject.getProbablyMissingGRF(trial) + + trial_length = subject.getTrialLength(trial) + # print(trial_length, window_size, stride) + # print(max(trial_length - (window_size * stride) + 1, 0)) + for window_start in range(max(trial_length - (window_size * stride) + 1, 0)): + # Check if any of the frames in this window are probably missing GRF data + # If so, skip this window + skip = False + for i in range(window_start, window_start + window_size): + if probably_missing[i]: + skip = True + break + if not skip: + self.windows.append( + (subject, trial, window_start, subject_path)) + else: + num_skipped += 1 # Read the dofs from the first subject (assuming they are all the same) self.input_dof_indices = [] @@ -113,26 +119,29 @@ def __getitem__(self, index: int): numpy_input_dict[InputDataKeys.ACC] = np.column_stack([frame.acc[self.input_dof_indices] for frame in frames]) numpy_input_dict[InputDataKeys.COM_ACC] = np.column_stack([frame.comAcc for frame in frames]) - # numpy_output_dict[OutputDataKeys.CONTACT] = np.column_stack([np.array(frame.contact, dtype=np.float64) for frame in frames]) - + numpy_output_dict[OutputDataKeys.CONTACT] = np.column_stack([np.array(frame.contact, dtype=np.float64) for frame in frames]) + correct = subject.getContactBodies()[0][-1] == 'l' + left = 0 if correct else 1 + right = 1 - left contact_class = 0 - if frames[-1].contact[0] == 0 and frames[-1].contact[1] == 0: + if frames[-1].contact[left] == 0 and frames[-1].contact[right] == 0: # Flight phase contact_class = 0 - elif frames[-1].contact[0] == 1 and frames[-1].contact[1] == 0: + elif frames[-1].contact[left] == 1 and frames[-1].contact[right] == 0: # Left foot stance contact_class = 1 - elif frames[-1].contact[0] == 0 and frames[-1].contact[1] == 1: + elif frames[-1].contact[left] == 0 and frames[-1].contact[right] == 1: # Right foot stance contact_class = 2 - elif frames[-1].contact[0] == 1 and frames[-1].contact[1] == 1: + elif frames[-1].contact[left] == 1 and frames[-1].contact[right] == 1: # Double stance contact_class = 3 one_hot_contact = np.zeros(4, dtype=np.float32) one_hot_contact[contact_class] = 1 numpy_output_dict[OutputDataKeys.CONTACT] = one_hot_contact - + numpy_output_dict[OutputDataKeys.CONTACT_FORCES] = frames[-1].groundContactForce if correct else frames[-1].groundContactForce[[3,4,5,0,1,2]] + # print(f"{numpy_output_dict[OutputDataKeys.CONTACT_FORCES]=}") # ################################################### # # Plotting # import matplotlib.pyplot as plt @@ -178,3 +187,14 @@ def __getitem__(self, index: int): # Return the input and output dictionaries at this timestep return input_dict, label_dict + +if __name__ == "__main__": + window_size = 50 + stride = 20 + batch_size = 32 + device = 'cpu' + + # Input dofs to train on + input_dofs = ['knee_angle_l', 'knee_angle_r', 'hip_flexion_l', 'hip_flexion_r', 'hip_adduction_l', 'hip_adduction_r'] + data_path = "/Users/rishi/Documents/Academics/stanford/human-body-dynamics/InferBiomechanics/data/processed/standardized/rajagopal_no_arms/data/protected/us-west-2:be72ee5a-acdb-4e07-b288-a55886ca1e3b/data/c1ab/5dd9f9149f8e8064442a852d79e77050a17c772bdc1199cfe088177b9387a657/5dd9f9149f8e8064442a852d79e77050a17c772bdc1199cfe088177b9387a657.bin" + AddBiomechanicsDataset(data_path, window_size, stride, input_dofs=input_dofs, device=torch.device(device)) \ No newline at end of file diff --git a/FasterDataset.py b/FasterDataset.py new file mode 100644 index 0000000..cf374de --- /dev/null +++ b/FasterDataset.py @@ -0,0 +1,151 @@ +import nimblephysics as nimble +import torch +from torch.utils.data import Dataset +from typing import List, Dict, Tuple +import os +import numpy as np + + +class InputDataKeys: + POS = 'pos' + VEL = 'vel' + ACC = 'acc' + COM_ACC = 'com_acc' + + +class OutputDataKeys: + CONTACT = 'contact' + COM_ACC = 'com_acc' + CONTACT_FORCES = 'contact_forces' + + +class AddBiomechanicsDataset(Dataset): + data_path: str + window_size: int + stride: int + device: torch.device + subjects: List[nimble.biomechanics.SubjectOnDisk] + input_dofs: List[str] + input_dof_indices: List[int] + windows: List[Tuple[nimble.biomechanics.SubjectOnDisk, int, int, str]] + + def __init__(self, subject_paths: List[str], window_size: int, stride: int, input_dofs: List[str], device: torch.device = torch.device('cpu')): + self.subject_paths = subject_paths + self.window_size = window_size + self.stride = stride + self.input_dofs = input_dofs + self.device = device + self.subjects = [] + self.windows = [] + + for subject_path in subject_paths: + # Create a subject object for each file. This will load just the header from this file, and keep that around in memory + subject = nimble.biomechanics.SubjectOnDisk( + subject_path) + # Add the subject to the list of subjects + self.subjects.append(subject) + + # Read the dofs from the first subject (assuming they are all the same) + self.input_dof_indices = [] + skel = self.subjects[0].readSkel() + dof_names = [] + for i in range(skel.getNumDofs()): + dof_name = skel.getDofByIndex(i).getName() + dof_names.append(dof_name) + + for dof_name in input_dofs: + index = dof_names.index(dof_name) + if index >= 0: + self.input_dof_indices.append(index) + else: + # Throw an exception + raise Exception('Dof ' + dof_name + ' not found in input dofs') + + index = 0 + num_skipped = 0 + for subject in self.subjects: + # Also, count how many random windows we could select from this subject + for trial in range(subject.getNumTrials()): + probably_missing: List[bool] = subject.getProbablyMissingGRF(trial) + + trial_length = subject.getTrialLength(trial) + all_frames: List[nimble.biomechanics.Frame] = subject.readFrames(trial, 0, numFramesToRead=trial_length // self.stride, stride=self.stride, contactThreshold=0.1) + for window_start in range(max(len(all_frames) - (window_size) + 1, 0)): + # Check if any of the frames in this window are probably missing GRF data + # If so, skip this window + skip = False + for i in range(window_start, window_start + window_size): + if probably_missing[i]: + skip = True + break + if not skip: + np.random.seed(index) + frames = all_frames[window_start:window_start+window_size] + # print(f"{len(frames)=}") + # We first assemble the data into numpy arrays, and then convert to tensors, to save from spurious memory copies which slow down data loading + numpy_input_dict: Dict[str, np.ndarray] = {} + numpy_output_dict: Dict[str, np.ndarray] = {} + + numpy_input_dict[InputDataKeys.POS] = np.column_stack([frame.pos[self.input_dof_indices] for frame in frames]) + numpy_input_dict[InputDataKeys.VEL] = np.column_stack([frame.vel[self.input_dof_indices] for frame in frames]) + numpy_input_dict[InputDataKeys.ACC] = np.column_stack([frame.acc[self.input_dof_indices] for frame in frames]) + numpy_input_dict[InputDataKeys.COM_ACC] = np.column_stack([frame.comAcc for frame in frames]) + + numpy_output_dict[OutputDataKeys.CONTACT] = np.column_stack([np.array(frame.contact, dtype=np.float64) for frame in frames]) + correct = subject.getContactBodies()[0][-1] == 'l' + left = 0 if correct else 1 + right = 1 - left + contact_class = 0 + if frames[-1].contact[left] == 0 and frames[-1].contact[right] == 0: + # Flight phase + contact_class = 0 + elif frames[-1].contact[left] == 1 and frames[-1].contact[right] == 0: + # Left foot stance + contact_class = 1 + elif frames[-1].contact[left] == 0 and frames[-1].contact[right] == 1: + # Right foot stance + contact_class = 2 + elif frames[-1].contact[left] == 1 and frames[-1].contact[right] == 1: + # Double stance + contact_class = 3 + one_hot_contact = np.zeros(4, dtype=np.float32) + one_hot_contact[contact_class] = 1 + + numpy_output_dict[OutputDataKeys.CONTACT] = one_hot_contact + numpy_output_dict[OutputDataKeys.CONTACT_FORCES] = (frames[-1].groundContactForce if correct else frames[-1].groundContactForce[[3,4,5,0,1,2]]) / (np.array([1.,9.8,1.,1.,9.8,1.]) * subject.getMassKg()) + + # Doing things inside torch.no_grad() suppresses warnings and gradient tracking + with torch.no_grad(): + input_dict: Dict[str, torch.Tensor] = {} + for key in numpy_input_dict: + input_dict[key] = torch.tensor( + numpy_input_dict[key], dtype=torch.float32, device=self.device) + + label_dict: Dict[str, torch.Tensor] = {} + for key in numpy_output_dict: + label_dict[key] = torch.tensor( + numpy_output_dict[key], dtype=torch.float32, device=self.device) + + self.windows.append((input_dict, label_dict)) + index += 1 + else: + num_skipped += 1 + + def __len__(self): + return len(self.windows) + + def __getitem__(self, index: int): + input_dict, label_dict = self.windows[index] + + return input_dict, label_dict + +if __name__ == "__main__": + window_size = 50 + stride = 20 + batch_size = 32 + device = 'cpu' + + # Input dofs to train on + input_dofs = ['knee_angle_l', 'knee_angle_r', 'hip_flexion_l', 'hip_flexion_r', 'hip_adduction_l', 'hip_adduction_r'] + data_path = "/Users/rishi/Documents/Academics/stanford/human-body-dynamics/InferBiomechanics/data/processed/standardized/rajagopal_no_arms/data/protected/us-west-2:be72ee5a-acdb-4e07-b288-a55886ca1e3b/data/c1ab/5dd9f9149f8e8064442a852d79e77050a17c772bdc1199cfe088177b9387a657/5dd9f9149f8e8064442a852d79e77050a17c772bdc1199cfe088177b9387a657.bin" + AddBiomechanicsDataset(data_path, window_size, stride, input_dofs=input_dofs, device=torch.device(device)) \ No newline at end of file diff --git a/RegressionLossEvaluator.py b/RegressionLossEvaluator.py new file mode 100644 index 0000000..baf0497 --- /dev/null +++ b/RegressionLossEvaluator.py @@ -0,0 +1,55 @@ +import torch +from b3dDataset import OutputDataKeys +from typing import Dict +import numpy as np +import logging + +class RegressionLossEvaluator: + num_evaluations: int + sum_loss: float + sum_timesteps: int + sum_correct_foot_classifications: float + sum_com_acc_squared_error: np.ndarray + sum_contact_forces_squared_error: np.ndarray + confusion_matrix: np.ndarray + + def __init__(self, contact_forces_weight=1.0): + self.contact_forces_criterion = torch.nn.MSELoss() + self.contact_forces_weight = contact_forces_weight + + self.num_evaluations = 0 + self.sum_timesteps = 0 + self.sum_contact_forces_N_error = np.zeros((1,6)) + self.forces = [] + + def __call__(self, outputs: Dict[str, torch.Tensor], labels: Dict[str, torch.Tensor]) -> torch.Tensor: + # Compute the loss + loss = self.contact_forces_weight * torch.sum((outputs[OutputDataKeys.CONTACT_FORCES] - labels[OutputDataKeys.CONTACT_FORCES]) ** 2, dim=0, keepdim=True) + # Keep track of various performance metrics to report + with torch.no_grad(): + self.num_evaluations += 1 + timesteps = outputs[OutputDataKeys.CONTACT_FORCES].shape[0] + self.sum_timesteps += timesteps + self.sum_contact_forces_N_error += loss.numpy() + self.forces.append(labels[OutputDataKeys.CONTACT_FORCES].numpy()) + return torch.sum(loss) / timesteps + + def print_report(self): + logging.info(f'\tLoss: {np.sqrt(np.sum(self.sum_contact_forces_N_error) / self.sum_timesteps)}') + self.forces = np.abs(np.concatenate(self.forces)) + logging.info(f"max={np.max(self.forces, axis=0)}, min={np.min(self.forces, axis=0)}, mean={np.mean(self.forces, axis=0)}") + logging.info('\tContact force avg N error (per axis), foot 1: ' + + str(np.sqrt(self.sum_contact_forces_N_error[:,:3] / self.sum_timesteps))) + logging.info('\tContact force avg N error (per axis), foot 2: ' + + str(np.sqrt(self.sum_contact_forces_N_error[:,3:] / self.sum_timesteps))) + + # Reset + self.num_evaluations = 0 + # self.sum_loss = 0.0 + self.sum_timesteps = 0 + # self.sum_correct_foot_classifications = 0.0 + # self.sum_com_acc_mpss_error = np.zeros(3) + self.sum_contact_forces_N_error = np.zeros((1,6)) + # self.confusion_matrix = np.zeros((4,4), dtype=np.int64) + self.forces = [] + pass diff --git a/analyse.py b/analyse.py new file mode 100644 index 0000000..4fbdcd6 --- /dev/null +++ b/analyse.py @@ -0,0 +1,57 @@ +import torch +from torch.utils.data import DataLoader +from main import get_model +from AddBiomechanicsDataset import AddBiomechanicsDataset +from LossEvaluator import LossEvaluator +from typing import Dict, Tuple, List +import glob +import pickle + +import warnings +warnings.filterwarnings("ignore") + +window_size = 50 +stride = 20 +batch_size = 32 +device = 'cpu' + +# Input dofs to train on +input_dofs = ['knee_angle_l', 'knee_angle_r', 'hip_flexion_l', 'hip_flexion_r', 'hip_adduction_l', 'hip_adduction_r'] + +# load trained model +model = get_model() +load_epoch = 0 +load_batch = 88000 +model_path = f"./outputs/models/epoch_{load_epoch}_batch_{load_batch}.pt" +checkpoint = torch.load(model_path) +model.load_state_dict(checkpoint["model_state_dict"]) + +# analyze a given file +def analyse_file(file_path): + analyse_dataset = AddBiomechanicsDataset(file_path, window_size, stride, input_dofs=input_dofs, device=torch.device(device)) + analyse_dataloader = DataLoader(analyse_dataset, batch_size=batch_size, shuffle=False) + + analysis_evaluator = LossEvaluator(contact_weight=1.0, com_acc_weight=1e-3, contact_forces_weight=1e-3) + + with torch.no_grad(): + for i, batch in enumerate(analyse_dataloader): + if i % 100 == 0: + print(' - Dev Batch ' + str(i) + '/' + str(len(analyse_dataloader))) + inputs: Dict[str, torch.Tensor] + labels: Dict[str, torch.Tensor] + inputs, labels = batch + outputs = model(inputs) + loss = analysis_evaluator(outputs, labels) + return analysis_evaluator + +def analyse_folder(folder_path): + files = glob.glob(f"{folder_path}/**/*.bin", recursive=True) + for i, file in enumerate(files): + analysis_evaluator = analyse_file(file) + pickle.dump((file, analysis_evaluator), open(f"./outputs/analysis/{i}.pkl", "wb")) + +if __name__ == "__main__": + # file_path = "/Users/rishi/Documents/Academics/stanford/human-body-dynamics/InferBiomechanics/data/processed/standardized/rajagopal_no_arms/data/protected/us-west-2:43f17b51-2473-445e-8701-feae8881071f/data/S02/4af1b16b78e1fb1a36964be976ad5bb530b1c9f9e9302a04b5d96282a6d80876/4af1b16b78e1fb1a36964be976ad5bb530b1c9f9e9302a04b5d96282a6d80876.bin" + # analyse_file(file_path) + folder_path = "/Users/rishi/Documents/Academics/stanford/human-body-dynamics/InferBiomechanics/data/processed" + analyse_folder(folder_path) \ No newline at end of file diff --git a/b3dDataset.py b/b3dDataset.py new file mode 100644 index 0000000..d8aef66 --- /dev/null +++ b/b3dDataset.py @@ -0,0 +1,162 @@ +import nimblephysics as nimble +import torch +from torch.utils.data import Dataset +from typing import List, Dict, Tuple +import os +import numpy as np +import argparse + +class InputDataKeys: + POS = 'pos' + VEL = 'vel' + ACC = 'acc' + COM_ACC = 'com_acc' + PELVIS_POS = 'pelvis_pos' + PELVIS_ROT = 'pelvis_rot' + PELVIS_LIN_VEL = 'pelvis_lin_vel' + PELVIS_ANG_VEL = 'pelvis_ang_vel' + + +class OutputDataKeys: + CONTACT = 'contact' + COM_ACC = 'com_acc' + CONTACT_FORCES = 'contact_forces' + + +class AddBiomechanicsDataset(Dataset): + data_path: str + window_size: int + stride: int + device: torch.device + subjects: List[nimble.biomechanics.SubjectOnDisk] + input_dofs: List[str] + input_dof_indices: List[int] + windows: List[Tuple[nimble.biomechanics.SubjectOnDisk, int, int, str]] + + def __init__(self, subject_paths: List[str], args: argparse.Namespace): + self.subject_paths = subject_paths + self.window_size = args.window_size + self.stride = args.stride + self.input_dofs = args.input_dofs + self.device = torch.device(args.device) + self.subjects = [] + self.windows = [] + self.args = args + + for subject_path in subject_paths: + # Create a subject object for each file. This will load just the header from this file, and keep that around in memory + subject = nimble.biomechanics.SubjectOnDisk( + subject_path) + # Add the subject to the list of subjects + self.subjects.append(subject) + + # Read the dofs from the first subject (assuming they are all the same) + self.input_dof_indices = [] + skel = self.subjects[0].readSkel(0) # 0 processing pass - kinematics + dof_names = [] + for i in range(skel.getNumDofs()): + dof_name = skel.getDofByIndex(i).getName() + dof_names.append(dof_name) + + for dof_name in self.input_dofs: + index = dof_names.index(dof_name) + if index >= 0: + self.input_dof_indices.append(index) + else: + # Throw an exception + raise Exception('Dof ' + dof_name + ' not found in input dofs') + + index = 0 + num_skipped = 0 + for subject in self.subjects: + # Also, count how many random windows we could select from this subject + for trial in range(subject.getNumTrials()): + probably_missing: List[nimble.biomechanics.MissingGRFReason] = subject.getMissingGRF(trial) + + trial_length = subject.getTrialLength(trial) + all_frames: List[nimble.biomechanics.Frame] = subject.readFrames(trial, 0, numFramesToRead=trial_length // self.stride, stride=self.stride, contactThreshold=0.1) + # print([len(frame.processingPasses) for frame in all_frames]) + all_frames_processing_pass: List[nimble.biomechanics.FrameProcessingPass] = [frame.processingPasses[self.args.processing_pass] for frame in all_frames] + + for window_start in range(max(len(all_frames_processing_pass) - (self.window_size) + 1, 0)): + # Check if any of the frames in this window are probably missing GRF data + # If so, skip this window + skip = False + for i in range(window_start, window_start + self.window_size): + if probably_missing[i] == nimble.biomechanics.MissingGRFReason.notMissingGRF: + skip = True + break + if not skip: + np.random.seed(index) + frames = all_frames_processing_pass[window_start:window_start+self.window_size] + + numpy_input_dict: Dict[str, np.ndarray] = {} + numpy_output_dict: Dict[str, np.ndarray] = {} + + numpy_input_dict[InputDataKeys.POS] = np.column_stack([frame.pos[self.input_dof_indices] for frame in frames]) + numpy_input_dict[InputDataKeys.VEL] = np.column_stack([frame.vel[self.input_dof_indices] for frame in frames]) + numpy_input_dict[InputDataKeys.ACC] = np.column_stack([frame.acc[self.input_dof_indices] for frame in frames]) + numpy_input_dict[InputDataKeys.PELVIS_POS] = np.column_stack([frame.pos[3:6] for frame in frames]) + numpy_input_dict[InputDataKeys.PELVIS_ROT] = np.column_stack([frame.pos[:3] for frame in frames]) + numpy_input_dict[InputDataKeys.PELVIS_LIN_VEL] = np.column_stack([frame.vel[3:6] for frame in frames]) + numpy_input_dict[InputDataKeys.PELVIS_ANG_VEL] = np.column_stack([frame.vel[:3] for frame in frames]) + numpy_input_dict[InputDataKeys.COM_ACC] = np.column_stack([frame.comAcc for frame in frames]) + + numpy_output_dict[OutputDataKeys.CONTACT] = np.column_stack([np.array(frame.contact, dtype=np.float64) for frame in frames]) + correct = subject.getGroundForceBodies()[0][-1] == 'l' + left = 0 if correct else 1 + right = 1 - left + contact_class = 0 + if frames[-1].contact[left] == 0 and frames[-1].contact[right] == 0: + # Flight phase + contact_class = 0 + elif frames[-1].contact[left] == 1 and frames[-1].contact[right] == 0: + # Left foot stance + contact_class = 1 + elif frames[-1].contact[left] == 0 and frames[-1].contact[right] == 1: + # Right foot stance + contact_class = 2 + elif frames[-1].contact[left] == 1 and frames[-1].contact[right] == 1: + # Double stance + contact_class = 3 + one_hot_contact = np.zeros(4, dtype=np.float32) + one_hot_contact[contact_class] = 1 + + numpy_output_dict[OutputDataKeys.CONTACT] = one_hot_contact + numpy_output_dict[OutputDataKeys.CONTACT_FORCES] = (frames[-1].groundContactForce if correct else frames[-1].groundContactForce[[3,4,5,0,1,2]]) / (np.array([1.,9.8,1.,1.,9.8,1.]) * subject.getMassKg()) + + # Doing things inside torch.no_grad() suppresses warnings and gradient tracking + with torch.no_grad(): + input_dict: Dict[str, torch.Tensor] = {} + for key in numpy_input_dict: + input_dict[key] = torch.tensor( + numpy_input_dict[key], dtype=torch.float32, device=self.device) + + label_dict: Dict[str, torch.Tensor] = {} + for key in numpy_output_dict: + label_dict[key] = torch.tensor( + numpy_output_dict[key], dtype=torch.float32, device=self.device) + + self.windows.append((input_dict, label_dict)) + index += 1 + else: + num_skipped += 1 + + def __len__(self): + return len(self.windows) + + def __getitem__(self, index: int): + input_dict, label_dict = self.windows[index] + + return input_dict, label_dict + +if __name__ == "__main__": + window_size = 50 + stride = 20 + batch_size = 32 + device = 'cpu' + + # Input dofs to train on + input_dofs = ['knee_angle_l', 'knee_angle_r', 'hip_flexion_l', 'hip_flexion_r', 'hip_adduction_l', 'hip_adduction_r'] + data_path = "/Users/rishi/Documents/Academics/stanford/human-body-dynamics/InferBiomechanics/data/processed/standardized/rajagopal_no_arms/data/protected/us-west-2:be72ee5a-acdb-4e07-b288-a55886ca1e3b/data/c1ab/5dd9f9149f8e8064442a852d79e77050a17c772bdc1199cfe088177b9387a657/5dd9f9149f8e8064442a852d79e77050a17c772bdc1199cfe088177b9387a657.bin" + AddBiomechanicsDataset(data_path, window_size, stride, input_dofs=input_dofs, device=torch.device(device)) \ No newline at end of file diff --git a/main.py b/main.py index 8796786..8eef417 100644 --- a/main.py +++ b/main.py @@ -14,81 +14,94 @@ stride = 20 # The batch size is the number of windows we want to load at once, for parallel training and inference on a GPU batch_size = 32 -# The number of epochs is the number of times we want to iterate over the entire dataset during training -epochs = 40 -# Learning rate -learning_rate = 1e-3 -# learning_rate = 1e-1 + device = 'cpu' # Input dofs to train on input_dofs = ['knee_angle_l', 'knee_angle_r', 'hip_flexion_l', 'hip_flexion_r', 'hip_adduction_l', 'hip_adduction_r'] -# Create an instance of the dataset -train_dataset = AddBiomechanicsDataset( - './data/train', window_size, stride, input_dofs=input_dofs, device=torch.device(device)) -dev_dataset = AddBiomechanicsDataset( - './data/dev', window_size, stride, input_dofs=input_dofs, device=torch.device(device)) - -# Create a DataLoader to load the data in batches -train_dataloader = DataLoader( - train_dataset, batch_size=batch_size, shuffle=True) -dev_dataloader = DataLoader(dev_dataset, batch_size=batch_size, shuffle=True) - -# Define the model -# hidden_size = 2 * ((len(input_dofs) * window_size * 3) + (window_size * 3)) -hidden_size = 256 -model = FeedForwardBaseline(len(input_dofs), window_size, hidden_size, dropout_prob=0.1, device=device) - - -# Define the optimizer -optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate) - -for epoch in range(epochs): - # Iterate over the entire training dataset - loss_evaluator = LossEvaluator( - contact_weight=1.0, com_acc_weight=1e-3, contact_forces_weight=1e-3) - for i, batch in enumerate(train_dataloader): - inputs: Dict[str, torch.Tensor] - labels: Dict[str, torch.Tensor] - inputs, labels = batch - - # Clear the gradients - optimizer.zero_grad() - - # Forward pass - outputs = model(inputs) - - # Compute the loss - loss = loss_evaluator(outputs, labels) - - if i % 100 == 0: - print(' - Batch '+str(i)+'/'+str(len(train_dataloader))) - if i % 1000 == 0: - loss_evaluator.print_report() - - # Backward pass - loss.backward() - - # Update the model's parameters - optimizer.step() - # Report training loss on this epoch - print('Epoch '+str(epoch)+': ') - print('Training Set Evaluation: ') - loss_evaluator.print_report() - - # At the end of each epoch, evaluate the model on the dev set - dev_loss_evaluator = LossEvaluator( - contact_weight=1.0, com_acc_weight=1e-3, contact_forces_weight=1e-3) - with torch.no_grad(): - for i, batch in enumerate(dev_dataloader): - if i % 100 == 0: - print(' - Dev Batch ' + str(i) + '/' + str(len(dev_dataloader))) +def get_model(): + # Define the model + # hidden_size = 2 * ((len(input_dofs) * window_size * 3) + (window_size * 3)) + hidden_size = 256 + model = FeedForwardBaseline(len(input_dofs), window_size, hidden_size, dropout_prob=0.1, device=device) + + return model + +if __name__ == "__main__": + model = get_model() + + # Create an instance of the dataset + train_dataset = AddBiomechanicsDataset( + './data/train', window_size, stride, input_dofs=input_dofs, device=torch.device(device)) + dev_dataset = AddBiomechanicsDataset( + './data/dev', window_size, stride, input_dofs=input_dofs, device=torch.device(device)) + + # Create a DataLoader to load the data in batches + train_dataloader = DataLoader( + train_dataset, batch_size=batch_size, shuffle=True) + dev_dataloader = DataLoader(dev_dataset, batch_size=batch_size, shuffle=True) + + # The number of epochs is the number of times we want to iterate over the entire dataset during training + epochs = 40 + # Learning rate + learning_rate = 1e-3 + # learning_rate = 1e-1 + + # Define the optimizer + optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate) + + for epoch in range(epochs): + # Iterate over the entire training dataset + loss_evaluator = LossEvaluator( + contact_weight=1.0, com_acc_weight=1e-3, contact_forces_weight=1e-3) + for i, batch in enumerate(train_dataloader): inputs: Dict[str, torch.Tensor] labels: Dict[str, torch.Tensor] inputs, labels = batch + + # Clear the gradients + optimizer.zero_grad() + + # Forward pass outputs = model(inputs) - loss = dev_loss_evaluator(outputs, labels) - # Report dev loss on this epoch - print('Dev Set Evaluation: ') - dev_loss_evaluator.print_report() + + # Compute the loss + loss = loss_evaluator(outputs, labels) + + if i % 100 == 0: + print(' - Batch '+str(i)+'/'+str(len(train_dataloader))) + if i % 1000 == 0: + loss_evaluator.print_report() + model_path = f"./outputs/models/epoch_{epoch}_batch_{i}.pt" + torch.save({ + 'epoch': epoch, + 'model_state_dict': model.state_dict(), + 'optimizer_state_dict': optimizer.state_dict(), + }, model_path) + + # Backward pass + loss.backward() + + # Update the model's parameters + optimizer.step() + # Report training loss on this epoch + print('Epoch '+str(epoch)+': ') + print('Training Set Evaluation: ') + loss_evaluator.print_report() + + # At the end of each epoch, evaluate the model on the dev set + dev_loss_evaluator = LossEvaluator( + contact_weight=1.0, com_acc_weight=1e-3, contact_forces_weight=1e-3) + with torch.no_grad(): + for i, batch in enumerate(dev_dataloader): + if i % 100 == 0: + print(' - Dev Batch ' + str(i) + '/' + str(len(dev_dataloader))) + inputs: Dict[str, torch.Tensor] + labels: Dict[str, torch.Tensor] + inputs, labels = batch + outputs = model(inputs) + loss = dev_loss_evaluator(outputs, labels) + # Report dev loss on this epoch + print('Dev Set Evaluation: ') + dev_loss_evaluator.print_report() diff --git a/models/FeedForwardRegressionBaseline.py b/models/FeedForwardRegressionBaseline.py new file mode 100644 index 0000000..0f4d9fd --- /dev/null +++ b/models/FeedForwardRegressionBaseline.py @@ -0,0 +1,78 @@ +import torch +import torch.nn as nn +from typing import Dict +from b3dDataset import InputDataKeys, OutputDataKeys +import argparse + +class FeedForwardBaseline(nn.Module): + dofs: int + window_size: int + hidden_size: int + + def __init__(self, args: argparse.Namespace, dofs: int): + super(FeedForwardBaseline, self).__init__() + self.dofs = dofs + self.window_size = args.window_size + self.hidden_size = args.hidden_size + self.feature_names = args.features + # Compute input and output sizes + + # For input, we need each dof, for position and velocity and acceleration, for each frame in the window, and then also the COM acceleration for each frame in the window + input_size = dofs + if InputDataKeys.PELVIS_POS in args.features: + input_size += 3 + input_size *= args.window_size + # For output, we have four foot-ground contact classes (foot 1, foot 2, both, neither) + output_size = 1 + + self.dropout1 = nn.Dropout(args.dropout_prob) + self.fc1 = nn.Linear(input_size, args.hidden_size, dtype=torch.float32, device=args.device) + self.dropout2 = nn.Dropout(args.dropout_prob) + self.relu = nn.ReLU() + self.fc2 = nn.Linear(args.hidden_size, output_size, dtype=torch.float32, device=args.device) + + def forward(self, input: Dict[str, torch.Tensor]) -> torch.Tensor: + # Get the position, velocity, and acceleration tensors + flattened_input_list = [] + for key in input: + if key in self.feature_names: + flattened_input_list.append(input[key].flatten(start_dim=-2)) + flattened_input = torch.cat(flattened_input_list, dim=-1) + # Actually run the forward pass + x = self.dropout1(flattened_input) + x = self.fc1(x) + x = self.dropout2(x) + x = self.relu(x) + x = self.fc2(x) + + return x + +class GRFPredictor(nn.Module): + def __init__(self, args: argparse.Namespace, dofs: int): + super(GRFPredictor, self).__init__() + self.grf = nn.ModuleList([FeedForwardBaseline(args, dofs) for _ in range(6)]) + + def forward(self, input: Dict[str, torch.Tensor]) -> torch.Tensor: + outputs = [] + for i in range(6): + outputs.append(self.grf[i].forward(input)) + output = torch.cat(outputs, dim=1) + return output + +class DynamicsPredictor(nn.Module): + def __init__(self, args: argparse.Namespace, dofs: int): + super(DynamicsPredictor, self).__init__() + self.grf_predictor = GRFPredictor(args, dofs) + # self.com_acc_predictor = + + def forward(self, input: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: + # Now we need to split the output into the different components + output_dict: Dict[str, torch.Tensor] = {} + output_dict[OutputDataKeys.CONTACT_FORCES] = self.grf_predictor(input) + + return output_dict + +if __name__ == "__main__": + print(FeedForwardBaseline(19, 5)) + print(GRFPredictor(19, 5)) + print(DynamicsPredictor(19, 5)) \ No newline at end of file diff --git a/print_files.py b/print_files.py new file mode 100644 index 0000000..86fe57d --- /dev/null +++ b/print_files.py @@ -0,0 +1,12 @@ +import os +import pickle + +files_with_acc = [] +for i in range(35): + file, analysis_evaluator = pickle.load(open(f"./outputs/analysis/{i}.pkl", "rb")) + acc = analysis_evaluator.sum_correct_foot_classifications / analysis_evaluator.sum_timesteps if analysis_evaluator.sum_timesteps else -1 + files_with_acc.append((file, acc)) + + +sorted_files = sorted(files_with_acc, key=lambda x: x[1]) +print(sorted_files) \ No newline at end of file diff --git a/regression_main.py b/regression_main.py new file mode 100644 index 0000000..d78d7c9 --- /dev/null +++ b/regression_main.py @@ -0,0 +1,116 @@ +import torch +from torch.utils.data import DataLoader +from AddBiomechanicsDataset import AddBiomechanicsDataset, InputDataKeys, OutputDataKeys +from models.FeedForwardRegressionBaseline import FeedForwardBaseline +from models.TransformerBaseline import TransformerBaseline +from RegressionLossEvaluator import RegressionLossEvaluator +from typing import Dict, Tuple, List +import time + +# The window size is the number of frames we want to have as context for our model to make predictions. +window_size = 5 +# The number of timesteps to skip between each frame in a given window. Data is currently all sampled at 100 Hz, so +# this means 0.2 seconds between each frame. This times window_size is the total time span of each window, which is +# currently 2.0 seconds. +stride = 20 +# The batch size is the number of windows we want to load at once, for parallel training and inference on a GPU +batch_size = 1024 + +device = 'cpu' + +# Input dofs to train on +# input_dofs = ['knee_angle_l', 'knee_angle_r', 'hip_flexion_l', 'hip_flexion_r', 'hip_adduction_l', 'hip_adduction_r'] +input_dofs = ['hip_flexion_r', 'hip_adduction_r', 'hip_rotation_r', 'knee_angle_r', 'ankle_angle_r', 'subtalar_angle_r', 'mtp_angle_r', 'hip_flexion_l', 'hip_adduction_l', 'hip_rotation_l', 'knee_angle_l', 'ankle_angle_l', 'subtalar_angle_l', 'mtp_angle_l'] + +def get_model(): + # Define the model + # hidden_size = 2 * ((len(input_dofs) * window_size * 3) + (window_size * 3)) + hidden_size = 256 + model = FeedForwardBaseline(len(input_dofs), window_size, hidden_size, dropout_prob=0.0, device=device) + + return model + +if __name__ == "__main__": + model = get_model() + + # Create an instance of the dataset + train_dataset = AddBiomechanicsDataset( + './data/train', window_size, stride, input_dofs=input_dofs, device=torch.device(device)) + dev_dataset = AddBiomechanicsDataset( + './data/dev', window_size, stride, input_dofs=input_dofs, device=torch.device(device)) + + # Create a DataLoader to load the data in batches + train_dataloader = DataLoader( + train_dataset, batch_size=batch_size, shuffle=True) + dev_dataloader = DataLoader(dev_dataset, batch_size=batch_size, shuffle=True) + + # The number of epochs is the number of times we want to iterate over the entire dataset during training + epochs = 40 + # Learning rate + learning_rate = 1e-3 + # learning_rate = 1e-1 + + # Define the optimizer + optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate) + + for epoch in range(epochs): + # Iterate over the entire training dataset + loss_evaluator = RegressionLossEvaluator(contact_forces_weight=1.) + data_start = time.time() + for i, batch in enumerate(train_dataloader): + data_time = time.time() - data_start + + forward_pass = time.time() + inputs: Dict[str, torch.Tensor] + labels: Dict[str, torch.Tensor] + inputs, labels = batch + + # Clear the gradients + optimizer.zero_grad() + + # Forward pass + outputs = model(inputs) + forward_pass = time.time() - forward_pass + + # Compute the loss + backprop = time.time() + loss = loss_evaluator(outputs, labels) + + if i % 100 == 0: + print(' - Batch '+str(i)+'/'+str(len(train_dataloader))) + if i % 100 == 0: + loss_evaluator.print_report() + model_path = f"./outputs/models/epoch_{epoch}_batch_{i}.pt" + torch.save({ + 'epoch': epoch, + 'model_state_dict': model.state_dict(), + 'optimizer_state_dict': optimizer.state_dict(), + }, model_path) + + # Backward pass + loss.backward() + + # Update the model's parameters + optimizer.step() + backprop = time.time() - backprop + # print(f"{data_time=}, {forward_pass=}, {backprop=}") + data_start = time.time() + # Report training loss on this epoch + print('Epoch '+str(epoch)+': ') + print('Training Set Evaluation: ') + loss_evaluator.print_report() + + # At the end of each epoch, evaluate the model on the dev set + dev_loss_evaluator = RegressionLossEvaluator(contact_forces_weight=1.0) + with torch.no_grad(): + for i, batch in enumerate(dev_dataloader): + if i % 100 == 0: + print(' - Dev Batch ' + str(i) + '/' + str(len(dev_dataloader))) + inputs: Dict[str, torch.Tensor] + labels: Dict[str, torch.Tensor] + inputs, labels = batch + outputs = model(inputs) + loss = dev_loss_evaluator(outputs, labels) + # Report dev loss on this epoch + print('Dev Set Evaluation: ') + dev_loss_evaluator.print_report() diff --git a/regression_main_faster.py b/regression_main_faster.py new file mode 100644 index 0000000..4bb7596 --- /dev/null +++ b/regression_main_faster.py @@ -0,0 +1,233 @@ +import torch +from torch.utils.data import DataLoader +from b3dDataset import AddBiomechanicsDataset, InputDataKeys, OutputDataKeys +from models.FeedForwardRegressionBaseline import DynamicsPredictor +from models.TransformerBaseline import TransformerBaseline +from RegressionLossEvaluator import RegressionLossEvaluator +from typing import Dict, Tuple, List +import time +import numpy as np +import os +import matplotlib.pyplot as plt +import logging +import argparse + +def get_model(args): + # Define the model + # hidden_size = 2 * ((len(input_dofs) * window_size * 3) + (window_size * 3)) + model = DynamicsPredictor(args, len(args.input_dofs)) + + return model + +def get_subject_paths(args): + subject_paths = [] + + data_path = f"{args.dataset}/train" + if os.path.isdir(data_path): + for root, dirs, files in os.walk(data_path): + for file in files: + if file.endswith(".b3d"): + subject_paths.append(os.path.join(root, file)) + np.random.seed(0) + np.random.shuffle(subject_paths) + + train, dev = .8, .2 # test split is 20% + train = int(train * len(subject_paths)) + dev = int(dev * len(subject_paths)) + + return subject_paths[:train], subject_paths[train:train+dev], [] + +class Trainer: + def __init__(self, args: argparse.Namespace, model: torch.nn.Module): + self.train_losses = [] + self.train_steps = [] + self.dev_losses = [] + self.dev_steps = [] + self.global_stepper = 0 + self.epoch = 0 + self.args = args + self.exp_name = args.exp_name + + self.model = model + self.train_subject_paths, self.dev_subject_paths, self.test_subject_paths = get_subject_paths(args) + logging.info(f"train: {len(self.train_subject_paths)}, dev: {len(self.dev_subject_paths)}, test: {len(self.test_subject_paths)}") + + exp_dir = f"outputs/{args.exp_name}" + self.model_dir_path = f"{exp_dir}/models" + os.makedirs(self.model_dir_path, exist_ok=True) + + self.plot_dir_path = f"{exp_dir}/plots" + os.makedirs(self.plot_dir_path, exist_ok=True) + + self.pred_dir_path = f"{exp_dir}/pred" + os.makedirs(self.pred_dir_path, exist_ok=True) + + self.direction = {0: 'x-left', 1: 'y-left', 2: 'z-left', 3: 'x-right', 4: 'y-right', 5: 'z-right'} + + def train(self): + # The number of epochs is the number of times we want to iterate over the entire dataset during training + epochs = 40 + # Learning rate + learning_rate = self.args.lr + # learning_rate = 1e-1 + + # Define the optimizer + self.optimizer = torch.optim.Adagrad(self.model.parameters(), lr=learning_rate) + for _ in range(epochs): + self.train_epoch() + + def train_epoch(self): + np.random.seed(self.epoch+9999) + np.random.shuffle(self.train_subject_paths) + + # Iterate over the entire training dataset + for subject_index in range(0, len(self.train_subject_paths), 20): + train_labels = [] + train_preds = [] + + dataset_creation = time.time() + # Create an instance of the dataset + train_dataset = AddBiomechanicsDataset(self.train_subject_paths[subject_index:subject_index+20], self.args) + # Create a DataLoader to load the data in batches + train_dataloader = DataLoader(train_dataset, batch_size=self.args.batch_size, shuffle=True) + dataset_creation = time.time() - dataset_creation + logging.info(f"{dataset_creation=}") + data_start = time.time() + for i, batch in enumerate(train_dataloader): + loss_evaluator = RegressionLossEvaluator(contact_forces_weight=1.) + data_time = time.time() - data_start + + forward_pass = time.time() + inputs: Dict[str, torch.Tensor] + labels: Dict[str, torch.Tensor] + inputs, labels = batch + + # Clear the gradients + self.optimizer.zero_grad() + + # Forward pass + outputs = self.model(inputs) + forward_pass = time.time() - forward_pass + + force_labels = labels[OutputDataKeys.CONTACT_FORCES].numpy() + train_preds.append(outputs[OutputDataKeys.CONTACT_FORCES].detach().numpy()) + train_labels.append(force_labels) + + logging.info(f"Label stats: {np.max(force_labels, axis=0)=}, {np.min(force_labels, axis=0)=}, {np.mean(np.abs(force_labels), axis=0)=}, {np.mean(force_labels, axis=0)=}") + # Compute the loss + backprop = time.time() + loss = loss_evaluator(outputs, labels) + + self.train_losses.append(np.sqrt(loss_evaluator.sum_contact_forces_N_error / loss_evaluator.sum_timesteps)) + self.train_steps.append(self.global_stepper) + + if i % 100 == 0: + logging.info(f' - Batch {subject_index} / {len(self.train_subject_paths)}') + if i % 100 == 0: + loss_evaluator.print_report() + model_path = os.path.join(self.model_dir_path, f"epoch_{self.epoch}_batch_{i}.pt") + torch.save({ + 'args': args, + 'epoch': self.epoch, + 'model_state_dict': self.model.state_dict(), + 'optimizer_state_dict': self.optimizer.state_dict(), + }, model_path) + + self.plot_losses() + self.plot_preds(train_labels, train_preds, split="train") + + # Backward pass + loss.backward() + + # Update the model's parameters + self.optimizer.step() + backprop = time.time() - backprop + logging.info(f"{data_time=}, {forward_pass=}, {backprop=}") + data_start = time.time() + self.global_stepper += 1 + # # Report training loss on this epoch + # print('Epoch '+str(epoch)+': ') + # print('Training Set Evaluation: ') + # loss_evaluator.print_report() + + # At the end of each epoch, evaluate the model on the dev set + dev_loss_evaluator = RegressionLossEvaluator(contact_forces_weight=1.0) + + for subject_index in range(0, len(self.dev_subject_paths), 20): + dev_dataset = AddBiomechanicsDataset(self.dev_subject_paths[subject_index:subject_index+20], self.args) + dev_dataloader = DataLoader(dev_dataset, batch_size=self.args.batch_size, shuffle=False) + + with torch.no_grad(): + for i, batch in enumerate(dev_dataloader): + if i % 100 == 0: + logging.info(f' - Dev Batch {subject_index} / {len(self.dev_subject_paths)}') + inputs: Dict[str, torch.Tensor] + labels: Dict[str, torch.Tensor] + inputs, labels = batch + outputs = self.model(inputs) + loss = dev_loss_evaluator(outputs, labels) + # print(f"{labels[OutputDataKeys.CONTACT_FORCES].shape=}, {outputs[OutputDataKeys.CONTACT_FORCES].shape=}") + self.plot_preds([labels[OutputDataKeys.CONTACT_FORCES].numpy()], [outputs[OutputDataKeys.CONTACT_FORCES].numpy()], split="dev") + self.dev_losses.append(np.sqrt(dev_loss_evaluator.sum_contact_forces_N_error / dev_loss_evaluator.sum_timesteps)) + self.dev_steps.append(self.global_stepper) + # Report dev loss on this epoch + logging.info('Dev Set Evaluation: ') + dev_loss_evaluator.print_report() + self.epoch += 1 + + def plot_losses(self): + train_losses = np.concatenate(self.train_losses) + # print(f"{train_losses.shape=}") + dev_losses = None if not self.dev_losses else np.concatenate(self.dev_losses) + + for i in range(6): + plt.clf() + plt.plot(self.train_steps, train_losses[:,i], label='train') + if dev_losses is not None: + plt.plot(self.dev_steps, dev_losses[:,i], label='dev') + plt.legend() + plt.savefig(os.path.join(self.plot_dir_path, f"loss-{self.direction[i]}.png")) + + def plot_preds(self, labels, preds, split="train"): + for i in range(6): + plt.clf() + plt.plot(np.concatenate(labels)[::5,i], label=f'True F{self.direction[i]}') + plt.plot(np.concatenate(preds)[::5,i], label=f'Pred F{self.direction[i]}') + plt.legend() + plt.savefig(os.path.join(self.pred_dir_path, f"{split}-f{self.direction[i]}.png")) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--exp-name", type=str, default="run1") + parser.add_argument("--lr", type=float, default=1e-3) + parser.add_argument("--dataset", type=str, default="data") + parser.add_argument("--features", type=str, nargs='+', default=['pos', 'pelvis_pos']) + parser.add_argument("--device", type=str, default="cpu") + parser.add_argument("--stride", type=int, default=20) + parser.add_argument("--window_size", type=int, default=5) + parser.add_argument("--hidden_size", type=int, default=256) + parser.add_argument("--dropout_prob", type=float, default=0.1) + parser.add_argument("--batch_size", type=int, default=1024) + parser.add_argument("--processing_pass", type=int, default=0) + parser.add_argument("--input_dofs", type=str, nargs='+', default=['hip_flexion_r', 'hip_adduction_r', 'hip_rotation_r', 'knee_angle_r', 'ankle_angle_r', 'subtalar_angle_r', 'mtp_angle_r', 'hip_flexion_l', 'hip_adduction_l', 'hip_rotation_l', 'knee_angle_l', 'ankle_angle_l', 'subtalar_angle_l', 'mtp_angle_l']) # ['knee_angle_l', 'knee_angle_r', 'hip_flexion_l', 'hip_flexion_r', 'hip_adduction_l', 'hip_adduction_r'] + + args = parser.parse_args() + + exp_dir = f"outputs/{args.exp_name}" + os.makedirs(exp_dir, exist_ok=True) + logpath = os.path.join(exp_dir, "log") + # Create and configure logger + logging.basicConfig(filename=logpath, + format='%(asctime)s %(message)s', + filemode='a') + + # Creating an object + logger = logging.getLogger() + + # Setting the threshold of logger to DEBUG + logger.setLevel(logging.INFO) + + model = get_model(args) + + trainer = Trainer(args, model) + trainer.train()