From 07f79caab67a334f3626b76469929b8a6e162468 Mon Sep 17 00:00:00 2001 From: Mikhmed Nabiev Date: Tue, 16 Apr 2024 22:09:37 +0300 Subject: [PATCH] added readmes --- baseline_tensorflow/README.md | 10 + baseline_tensorflow/baseline_experiment.py | 173 +++++++++++++ baseline_tensorflow/baseline_model.py | 130 ++++++++++ baseline_tensorflow/config.json | 8 + baseline_tensorflow/dataset_generator.py | 274 +++++++++++++++++++++ baseline_tensorflow/plot_results.py | 62 +++++ code/README.md | 6 +- proposed_model/README.md | 7 + proposed_model/align_embeddings.py | 73 ++++++ proposed_model/config.json | 8 + proposed_model/data.py | 82 ++++++ proposed_model/eeg_encoders.py | 47 ++++ proposed_model/get_embeddings.py | 77 ++++++ proposed_model/models.py | 66 +++++ proposed_model/reduce_emb_dim.py | 17 ++ proposed_model/stimulus_encoders.py | 35 +++ proposed_model/train.py | 128 ++++++++++ slides/Nabiev2024MiddleTalk.pdf | Bin 546610 -> 546603 bytes slides/Nabiev2024MiddleTalk.tex | 172 +++++++++++++ src/README.md | 4 + src/README.rst | 25 -- src/mylib/train.py | 6 +- src/setup.py | 2 +- 23 files changed, 1379 insertions(+), 33 deletions(-) create mode 100644 baseline_tensorflow/README.md create mode 100644 baseline_tensorflow/baseline_experiment.py create mode 100644 baseline_tensorflow/baseline_model.py create mode 100644 baseline_tensorflow/config.json create mode 100644 baseline_tensorflow/dataset_generator.py create mode 100644 baseline_tensorflow/plot_results.py create mode 100644 proposed_model/README.md create mode 100644 proposed_model/align_embeddings.py create mode 100644 proposed_model/config.json create mode 100644 proposed_model/data.py create mode 100644 proposed_model/eeg_encoders.py create mode 100644 proposed_model/get_embeddings.py create mode 100644 proposed_model/models.py create mode 100644 proposed_model/reduce_emb_dim.py create mode 100644 proposed_model/stimulus_encoders.py create mode 100755 proposed_model/train.py create mode 100644 slides/Nabiev2024MiddleTalk.tex create mode 100755 src/README.md delete mode 100755 src/README.rst diff --git a/baseline_tensorflow/README.md b/baseline_tensorflow/README.md new file mode 100644 index 0000000..54b1941 --- /dev/null +++ b/baseline_tensorflow/README.md @@ -0,0 +1,10 @@ +# Запус эксперимента + +Загрузить данные из репозитория https://github.com/exporl/auditory-eeg-dataset/tree/master. + +В файле `config.json` меняем `--absolute path to dataset folder--` на абсолютный путь к датасету. + +Важно согласно репозиторию датасета предварительно разделить данные на тренировочные, валидационные и тестовые. + +Запуска эксперимента: `python3 baseline_experiment.py` +Создание графиков: `plot_results.py` \ No newline at end of file diff --git a/baseline_tensorflow/baseline_experiment.py b/baseline_tensorflow/baseline_experiment.py new file mode 100644 index 0000000..83be01c --- /dev/null +++ b/baseline_tensorflow/baseline_experiment.py @@ -0,0 +1,173 @@ +"""Example experiment for the 2 mismatched segments dilation model.""" +import glob +import json +import logging +import os, sys +import tensorflow as tf +import keras + +import sys + +# add base path to sys +sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..')) +from baseline_model import dilation_model + +from dataset_generator import DataGenerator, batch_equalizer_fn, create_tf_dataset + + +def evaluate_model(model, test_dict): + """Evaluate a model. + + Parameters + ---------- + model: tf.keras.Model + Model to evaluate. + test_dict: dict + Mapping between a subject and a tf.data.Dataset containing the test + set for the subject. + + Returns + ------- + dict + Mapping between a subject and the loss/evaluation score on the test set + """ + evaluation = {} + for subject, ds_test in test_dict.items(): + logging.info(f"Scores for subject {subject}:") + results = model.evaluate(ds_test, verbose=2) + metrics = model.metrics_names + evaluation[subject] = dict(zip(metrics, results)) + return evaluation + + +if __name__ == "__main__": + # Parameters + # Length of the decision window + window_length_s = 5 + fs = 64 + + window_length = window_length_s * fs # 5 seconds + # Hop length between two consecutive decision windows + hop_length = 64 + + epochs = 2 + patience = 5 + batch_size = 16 + only_evaluate = False + number_mismatch = 4 + + training_log_filename = "training_log_{}_{}.csv".format(number_mismatch, window_length_s) + + # Get the path to the config file + experiments_folder = os.path.dirname(__file__) + task_folder = os.path.dirname(experiments_folder) + config_path = os.path.join(experiments_folder, 'config.json') + + # Load the config + with open(config_path) as fp: + config = json.load(fp) + + # Provide the path of the dataset + # which is split already to train, val, test + data_folder = os.path.join(config["dataset_folder"], config['derivatives_folder'], config["split_folder"]) + + # stimulus feature which will be used for training the model. Can be either 'envelope' ( dimension 1) or 'mel' (dimension 28) + stimulus_features = ["envelope"] + stimulus_dimension = 1 + + # uncomment if you want to train with the mel spectrogram stimulus representation + # stimulus_features = ["mel"] + # stimulus_dimension = 10 + + features = ["eeg"] + stimulus_features + + # Create a directory to store (intermediate) results + results_folder = os.path.join(experiments_folder, + "results_dilated_convolutional_model_{}_MM_{}_s_{}".format(number_mismatch, + window_length_s, + stimulus_features[0])) + os.makedirs(results_folder, exist_ok=True) + + # create dilation model + model = dilation_model(time_window=window_length, eeg_input_dimension=64, env_input_dimension=stimulus_dimension, + num_mismatched_segments=number_mismatch) + + model_path = os.path.join(results_folder, + "model_{}_MM_{}_s_{}.keras".format(number_mismatch, window_length_s, + stimulus_features[0])) + + if only_evaluate: + model = tf.keras.models.load_model(model_path) + + else: + + train_files = [x for x in glob.glob(os.path.join(data_folder, "train_-_*")) if + os.path.basename(x).split("_-_")[-1].split(".")[0] in features] + print(features) + # Create list of numpy array files + train_generator = DataGenerator(train_files, window_length) + import pdb + + dataset_train = create_tf_dataset(train_generator, window_length, batch_equalizer_fn, + hop_length, batch_size, + number_mismatch=number_mismatch, + data_types=(tf.float32, tf.float32), + feature_dims=(64, stimulus_dimension)) + + # Create the generator for the validation set + val_files = [x for x in glob.glob(os.path.join(data_folder, "val_-_*")) if + os.path.basename(x).split("_-_")[-1].split(".")[0] in features] + val_generator = DataGenerator(val_files, window_length) + dataset_val = create_tf_dataset(val_generator, window_length, batch_equalizer_fn, + hop_length, batch_size, + number_mismatch=number_mismatch, + data_types=(tf.float32, tf.float32), + feature_dims=(64, stimulus_dimension)) + + # Train the model + model.fit( + dataset_train, + epochs=epochs, + validation_data=dataset_val, + callbacks=[ + tf.keras.callbacks.ModelCheckpoint(model_path, save_best_only=True), + tf.keras.callbacks.CSVLogger(os.path.join(results_folder, training_log_filename)), + tf.keras.callbacks.EarlyStopping(patience=patience, restore_best_weights=True), + ], + ) + + test_window_lengths = [2, 4] + number_mismatch_test = [2, 3, 4, 8] + for number_mismatch in number_mismatch_test: + for window_length_s in test_window_lengths: + window_length = window_length_s * fs + results_filename = 'eval_{}_{}_s.json'.format(number_mismatch, window_length_s) + + model = dilation_model(time_window=window_length, eeg_input_dimension=64, + env_input_dimension=stimulus_dimension, num_mismatched_segments=number_mismatch) + + model.load_weights(model_path) + # Evaluate the model on test set + # Create a dataset generator for each test subject + test_files = [x for x in glob.glob(os.path.join(data_folder, "test_-_*")) if + os.path.basename(x).split("_-_")[-1].split(".")[0] in features] + # Get all different subjects from the test set + subjects = list(set([os.path.basename(x).split("_-_")[1] for x in test_files])) + datasets_test = {} + # Create a generator for each subject + for sub in subjects: + files_test_sub = [f for f in test_files if sub in os.path.basename(f)] + test_generator = DataGenerator(files_test_sub, window_length) + datasets_test[sub] = create_tf_dataset(test_generator, window_length, batch_equalizer_fn, + hop_length, batch_size=1, + number_mismatch=number_mismatch, + data_types=(tf.float32, tf.float32), + feature_dims=(64, stimulus_dimension)) + + evaluation = evaluate_model(model, datasets_test) + + # We can save our results in a json encoded file + results_path = os.path.join(results_folder, results_filename) + with open(results_path, "w") as fp: + json.dump(evaluation, fp) + logging.info(f"Results saved at {results_path}") diff --git a/baseline_tensorflow/baseline_model.py b/baseline_tensorflow/baseline_model.py new file mode 100644 index 0000000..9d8c805 --- /dev/null +++ b/baseline_tensorflow/baseline_model.py @@ -0,0 +1,130 @@ +import tensorflow as tf + +def dilation_model( + time_window=None, + eeg_input_dimension=64, + env_input_dimension=1, + layers=3, + kernel_size=3, + spatial_filters=8, + dilation_filters=16, + activation="relu", + compile=True, + num_mismatched_segments=2 +): + """Convolutional dilation model. + + Code was taken and adapted from + https://github.com/exporl/eeg-matching-eusipco2020 + + Parameters + ---------- + time_window : int or None + Segment length. If None, the model will accept every time window input + length. + eeg_input_dimension : int + number of channels of the EEG + env_input_dimension : int + dimemsion of the stimulus representation. + if stimulus == envelope, env_input_dimension =1 + if stimulus == mel, env_input_dimension =28 + layers : int + Depth of the network/Number of layers + kernel_size : int + Size of the kernel for the dilation convolutions + spatial_filters : int + Number of parallel filters to use in the spatial layer + dilation_filters : int + Number of parallel filters to use in the dilation layers + activation : str or list or tuple + Name of the non-linearity to apply after the dilation layers + or list/tuple of different non-linearities + compile : bool + If model should be compiled + inputs : tuple + Alternative inputs + + Returns + ------- + tf.Model + The dilation model + + + References + ---------- + Accou, B., Jalilpour Monesi, M., Montoya, J., Van hamme, H. & Francart, T. + Modeling the relationship between acoustic stimulus and EEG with a dilated + convolutional neural network. In 2020 28th European Signal Processing + Conference (EUSIPCO), 1175–1179, DOI: 10.23919/Eusipco47968.2020.9287417 + (2021). ISSN: 2076-1465. + + Accou, B., Monesi, M. J., hamme, H. V. & Francart, T. + Predicting speech intelligibility from EEG in a non-linear classification + paradigm. J. Neural Eng. 18, 066008, DOI: 10.1088/1741-2552/ac33e9 (2021). + Publisher: IOP Publishing + """ + + eeg = tf.keras.layers.Input(shape=[time_window, eeg_input_dimension]) + stimuli_input = [tf.keras.layers.Input(shape=[time_window, env_input_dimension]) for _ in range(num_mismatched_segments+1)] + + all_inputs = [eeg] + all_inputs.extend(stimuli_input) + + + stimuli_proj = [x for x in stimuli_input] + + # Activations to apply + if isinstance(activation, str): + activations = [activation] * layers + else: + activations = activation + + + # Spatial convolution + eeg_proj_1 = tf.keras.layers.Conv1D(spatial_filters, kernel_size=1)(eeg) + + # Construct dilation layers + for layer_index in range(layers): + # dilation on EEG + eeg_proj_1 = tf.keras.layers.Conv1D( + dilation_filters, + kernel_size=kernel_size, + dilation_rate=kernel_size ** layer_index, + strides=1, + activation=activations[layer_index], + )(eeg_proj_1) + + # Dilation on envelope data, share weights + env_proj_layer = tf.keras.layers.Conv1D( + dilation_filters, + kernel_size=kernel_size, + dilation_rate=kernel_size ** layer_index, + strides=1, + activation=activations[layer_index], + ) + + stimuli_proj = [env_proj_layer(stimulus_proj) for stimulus_proj in stimuli_proj] + + # Comparison + cos = [tf.keras.layers.Dot(1, normalize=True)([eeg_proj_1, stimulus_proj]) for stimulus_proj in stimuli_proj] + + linear_proj_sim = tf.keras.layers.Dense(1, activation="linear") + + # Linear projection of similarity matrices + cos_proj = [linear_proj_sim(tf.keras.layers.Flatten()(cos_i)) for cos_i in cos] + + # Classification + out = tf.keras.activations.softmax((tf.keras.layers.Concatenate()(cos_proj))) + + + + model = tf.keras.Model(inputs=all_inputs, outputs=[out]) + + if compile: + model.compile( + optimizer=tf.keras.optimizers.Adam(), + metrics=["accuracy"], + loss=["categorical_crossentropy"], + ) + print(model.summary()) + return model \ No newline at end of file diff --git a/baseline_tensorflow/config.json b/baseline_tensorflow/config.json new file mode 100644 index 0000000..dbf1b54 --- /dev/null +++ b/baseline_tensorflow/config.json @@ -0,0 +1,8 @@ +{ + "dataset_folder": "/home/bukkacha/Desktop/EEGDataset", + "derivatives_folder": "derivatives", + "preprocessed_eeg_folder": "preprocessed_eeg", + "preprocessed_stimuli_folder": "preprocessed_stimuli", + "split_folder": "split_data", + "stimuli": "stimuli" +} diff --git a/baseline_tensorflow/dataset_generator.py b/baseline_tensorflow/dataset_generator.py new file mode 100644 index 0000000..ee24795 --- /dev/null +++ b/baseline_tensorflow/dataset_generator.py @@ -0,0 +1,274 @@ +"""Code for the dataset_generator for both tasks.""" +import itertools +from math import e +import os +import numpy as np +import tensorflow as tf + + +@tf.function +def batch_equalizer_fn(*args): + """Batch equalizer. + Prepares the inputs for a model to be trained in + match-mismatch task. It makes sure that match_env + and mismatch_env are equally presented as a first + envelope in match-mismatch task. + + Parameters + ---------- + args : Sequence[tf.Tensor] + List of tensors representing feature data + + Returns + ------- + Tuple[Tuple[tf.Tensor], tf.Tensor] + Tuple of the EEG/speech features serving as the input to the model and + the labels for the match/mismatch task + + Notes + ----- + This function will also double the batch size. E.g. if the batch size of + the elements in each of the args was 32, the output features will have + a batch size of 64. + """ + eeg = args[0] + num_stimuli = len(args) - 1 + # repeat eeg num_stimuli times + new_eeg = tf.concat([eeg] * num_stimuli, axis=0) + all_features = [new_eeg] + + # create args + args_to_zip = [args[i::num_stimuli] for i in range(1,num_stimuli+1)] + for stimuli_features in zip(*args_to_zip): + for i in range(num_stimuli): + stimulus_rolled = tf.roll(stimuli_features, shift=i, axis=0) + # reshape stimulus_rolled to merge the first two dimensions + stimulus_rolled = tf.reshape(stimulus_rolled, [tf.shape(stimulus_rolled)[0] * tf.shape(stimulus_rolled)[1], stimuli_features[0].shape[-2], stimuli_features[0].shape[-1]]) + + all_features.append(stimulus_rolled) + labels = tf.concat( + [ + tf.tile(tf.constant([[1 if ii == i else 0 for ii in range(num_stimuli)]]), [tf.shape(eeg)[0], 1]) for i in range(num_stimuli) + ], axis=0 + ) + return tuple(all_features), labels + +def shuffle_fn(args, number_mismatch): + # repeat the last argument number_ mismatch times + args = list(args) + for _ in range(number_mismatch): + args.append(tf.random.shuffle(args[-1])) + return tuple(args) + + + +def create_tf_dataset( + data_generator, + window_length, + batch_equalizer_fn=None, + hop_length=64, + batch_size=64, + data_types=(tf.float32, tf.float32), + feature_dims=(64, 1), + number_mismatch = None # None for regression, 2 or 4 for match-mismatch +): + """Creates a tf.data.Dataset. + + This will be used to create a dataset generator that will + pass windowed data to a model in both tasks. + + Parameters + --------- + data_generator: DataGenerator + A data generator. + window_length: int + Length of the decision window in samples. + batch_equalizer_fn: Callable + Function that will be applied on the data after batching (using + the `map` method from tf.data.Dataset). In the match/mismatch task, + this function creates the imposter segments and labels. + hop_length: int + Hop length between two consecutive decision windows. + batch_size: Optional[int] + If not None, specifies the batch size. In the match/mismatch task, + this amount will be doubled by the default_batch_equalizer_fn + data_types: Union[Sequence[tf.dtype], tf.dtype] + The data types that the individual features of data_generator should + be cast to. If you only specify a single datatype, it will be chosen + for all EEG/speech features. + + Returns + ------- + tf.data.Dataset + A Dataset object that generates data to train/evaluate models + efficiently + """ + # create tf dataset from generator + dataset = tf.data.Dataset.from_generator( + data_generator, + output_signature=tuple( + tf.TensorSpec(shape=(None, x), dtype=data_types[index]) + for index, x in enumerate(feature_dims) + ), + ) + # window dataset + dataset = dataset.map( + lambda *args: [ + tf.signal.frame(arg, window_length, hop_length, axis=0) + for arg in args + ], + num_parallel_calls=tf.data.AUTOTUNE + ) + + # for x, y in dataset: + # print(y.shape) + # break + # exit(0) + + if number_mismatch is not None: + # map second argument to shifted version + + + dataset = dataset.map( lambda *args : shuffle_fn(args, number_mismatch), + num_parallel_calls=tf.data.AUTOTUNE + ) + # batch data + dataset = dataset.interleave( + lambda *args: tf.data.Dataset.from_tensor_slices(args), + cycle_length=8, + block_length=1, + num_parallel_calls=tf.data.AUTOTUNE, + ) + # print(len(list(dataset))) + # exit(0) + if batch_size is not None: + dataset = dataset.batch(batch_size, drop_remainder=True) + + if batch_equalizer_fn is not None: + # Create the labels and make sure classes are balanced + dataset = dataset.map(batch_equalizer_fn, + num_parallel_calls=tf.data.AUTOTUNE) + + return dataset + + +def group_recordings(files): + """Group recordings and corresponding stimuli. + + Parameters + ---------- + files : Sequence[Union[str, pathlib.Path]] + List of filepaths to preprocessed and split EEG and speech features + + Returns + ------- + list + Files grouped by the self.group_key_fn and subsequently sorted + by the self.feature_sort_fn. + """ + new_files = [] + grouped = itertools.groupby(sorted(files), lambda x: "_-_".join(os.path.basename(x).split("_-_")[:3])) + for recording_name, feature_paths in grouped: + new_files += [sorted(feature_paths, key=lambda x: "0" if x == "eeg" else x)] + return new_files + + + +class DataGenerator: + """Generate data for the Match/Mismatch task.""" + + def __init__( + self, + files, + window_length, + ): + """Initialize the DataGenerator. + + Parameters + ---------- + files: Sequence[Union[str, pathlib.Path]] + Files to load. + window_length: int + Length of the decision window. + spacing: int + Spacing between matched and mismatched samples + """ + self.window_length = window_length + self.files = self.group_recordings(files) + + + def group_recordings(self, files): + """Group recordings and corresponding stimuli. + + Parameters + ---------- + files : Sequence[Union[str, pathlib.Path]] + List of filepaths to preprocessed and split EEG and speech features + + Returns + ------- + list + Files grouped by the self.group_key_fn and subsequently sorted + by the self.feature_sort_fn. + """ + new_files = [] + grouped = itertools.groupby(sorted(files), lambda x: "_-_".join(os.path.basename(x).split("_-_")[:3])) + for recording_name, feature_paths in grouped: + new_files += [sorted(feature_paths, key=lambda x: "0" if x == "eeg" else x)] + return new_files + + def __len__(self): + return len(self.files) + + def __getitem__(self, recording_index): + """Get data for a certain recording. + + Parameters + ---------- + recording_index: int + Index of the recording in this dataset + + Returns + ------- + Union[Tuple[tf.Tensor,...], Tuple[np.ndarray,...]] + The features corresponding to the recording_index recording + """ + data = [] + for feature in self.files[recording_index]: + f = np.load(feature).astype(np.float32) + if f.ndim == 1: + f = f[:,None] + + data += [f] + data = self.prepare_data(data) + #---------- + res = tuple(tf.constant(x) for x in data) + #------------ + return res + + + def __call__(self): + """Load data for the next recording. + + Yields + ------- + Union[Tuple[tf.Tensor,...], Tuple[np.ndarray,...]] + The features corresponding to the recording_index recording + """ + for idx in range(self.__len__()): + yield self.__getitem__(idx) + + if idx == self.__len__() - 1: + self.on_epoch_end() + + def on_epoch_end(self): + """Change state at the end of an epoch.""" + np.random.shuffle(self.files) + + def prepare_data(self, data): + # make sure data has dimensionality of (n_samples, n_features) + + + return data + + diff --git a/baseline_tensorflow/plot_results.py b/baseline_tensorflow/plot_results.py new file mode 100644 index 0000000..8cbee92 --- /dev/null +++ b/baseline_tensorflow/plot_results.py @@ -0,0 +1,62 @@ +# import seaborn as sns +import glob +import json +import os + +import matplotlib.pyplot as plt +import mne +import numpy as np +import pandas as pd +import scipy.stats +import seaborn as sns +from sklearn import base + +# generate plots from all the different results and save them in the figures folder + +# load the results +base_results_folder = os.path.abspath(".") +os.makedirs(os.path.join(base_results_folder, 'figures'), exist_ok=True) +plot_dilation = True + + +if plot_dilation: + # dilation model, match mismatch results + # plot boxplot of the results per window length + # load evaluation results for all window lengths + + files = glob.glob(os.path.join(base_results_folder, "baseline_tensorflow/results_dilated_convolutional_model_4_MM_5_s_envelope/eval_*.json")) + print(os.path.join(base_results_folder, "results_dilated_convolutional_model_4_MM_5_s_envelope/eval_*.json")) + # sort the files + files.sort() + + # create dict to save all results per sub + results = [] + windows = [] + number_mismatch = [] + for f in files: + + # load the results + with open(f, "rb") as ff: + res = json.load(ff) + #loop over res and get accuracy in a list + acc = [] + + for sub, sub_res in res.items(): + if 'compile_metrics' in sub_res: + acc.append(sub_res['compile_metrics']) + + results.append(acc) + + # get the window length + windows.append(int(f.split("_")[-2].split(".")[0])) + number_mismatch.append(int(f.split("_")[-3].split(".")[0])) + + # sort windows and results according to windows + windows, results, number_mismatch = zip(*sorted(zip(windows, results, number_mismatch))) + + #boxplot of the results + plt.boxplot(results, labels=[*zip(windows, number_mismatch)]) + plt.xlabel("(Window length, Number of mismatch)") + plt.ylabel("Accuracy (%)") + plt.title("Accuracy of dilation model, per window length") + plt.savefig(os.path.join(base_results_folder, 'baseline_tensorflow/figures', "boxplot_dilated_conv.pdf")) \ No newline at end of file diff --git a/code/README.md b/code/README.md index 10cb690..0aa1f01 100644 --- a/code/README.md +++ b/code/README.md @@ -1,9 +1,9 @@ -# Запус эксперимента +# Эксперимент Загрузить данные из репозитория https://github.com/exporl/auditory-eeg-dataset/tree/master. В файле `config.json` меняем `--absolute path to dataset folder--` на абсолютный путь к датасету. -Важно согласно репозиторию датасета предварительно разделить данные на тренировочные, валидационные и тестовые. +Важно согласно репозиторию датасета предварительно разделить данные на тренировочные, валидационные и тестовые. -Запуск эксперимента: `python3 baseline_experiment.py` \ No newline at end of file +Эксперимент был проведен на ноутбуке `main.ipynb` и все результаты там же. \ No newline at end of file diff --git a/proposed_model/README.md b/proposed_model/README.md new file mode 100644 index 0000000..38e31ab --- /dev/null +++ b/proposed_model/README.md @@ -0,0 +1,7 @@ +- `models.py` --- модель для эксперимента. По умолчанию инициализируется базовая модель. При указании `use_transformers=True` инициализируется базовая модель с трансформером, а при `use_embeddings=True` --- модель который использует эмбеддинги стимулов. +- `eeg_encoder.py`, `stimulus_encoders.py` --- энкодеры для ЭЭГ и стимулов. +- `train.py` --- базовый класс `Trainer` для обучения модели. +- `data.py` --- файл с описанием наследника класса `torch.utils.data.Dataset`ю +- `get_embeddings.py`, `reduce_emb_dim.py`, `align_embeddings.py` --- скрипты для подготовки эмбеддингов. + +Пример запуска модели смотрите в `code/main.ipynb`. \ No newline at end of file diff --git a/proposed_model/align_embeddings.py b/proposed_model/align_embeddings.py new file mode 100644 index 0000000..039222d --- /dev/null +++ b/proposed_model/align_embeddings.py @@ -0,0 +1,73 @@ +from email.mime import audio +import os +import json +import glob +import numpy as np +from mne.filter import resample +from tqdm import tqdm + +file = os.path.abspath('') +experiment_folder = os.path.dirname(file) + +# Load the config file +with open(os.path.join(file, "src/mylib/utils/config.json")) as file_path: + config = json.load(file_path) + +# Path to the dataset, which is already split to train, val, test +data_folder = os.path.join(config["dataset_folder"], config['derivatives_folder'], config["preprocessed_stimuli_folder"]) + +train_files = [x for x in glob.glob(os.path.join(data_folder, "*.npy"))] + +# resampling wav2vec embeddings +print("Resampling Wav2Vec embeddings") +path_to_embedds = os.path.join(file, "code/embeddings/wav2vec_resampled") +os.makedirs(path_to_embedds, exist_ok=True) + +processed = set() +pairs = [] +for x in tqdm(train_files): + audio_name = "_".join(os.path.basename(x).split("_")[:-1]) + if audio_name in processed: + continue + for emb_f in glob.glob(os.path.join(file, "code/embeddings/wav2vec/*")): + f_name = os.path.basename(emb_f).split(".")[0] + if audio_name == f_name: + pairs.append([x, emb_f]) + processed.add(audio_name) + +for p in tqdm(pairs): + audio_name = "_".join(os.path.basename(p[0]).split("_")[:-1]) + stimul = np.load(p[0]) + emb = np.load(p[1]).astype(np.float64).squeeze(axis=0) + resampled = resample(emb, stimul.shape[0]/emb.shape[0], axis=0) + assert resampled.shape[0] == stimul.shape[0] + print(stimul.shape, resampled.shape) + np.save(os.path.join(path_to_embedds, f"{audio_name}_resampled"), resampled) + +# resampling whisper embeddings +print("Resampling Whisper embeddings") + +path_to_embedds = os.path.join(file, "code/embeddings/whisper_resampled") +os.makedirs(path_to_embedds, exist_ok=True) + +processed = set() +pairs = [] +for x in tqdm(train_files): + audio_name = "_".join(os.path.basename(x).split("_")[:-1]) + if audio_name in processed: + continue + for emb_f in glob.glob(os.path.join(file, "code/embeddings/whisper/*")): + f_name = os.path.basename(emb_f).split(".")[0] + if audio_name == f_name: + print(emb_f) + pairs.append([x, emb_f]) + processed.add(audio_name) + +for p in tqdm(pairs): + audio_name = "_".join(os.path.basename(p[0]).split("_")[:-1]) + stimul = np.load(p[0]) + emb = np.load(p[1]).astype(np.float64).squeeze(axis=0) + resampled = resample(emb, stimul.shape[0]/emb.shape[0], axis=0) + assert resampled.shape[0] == stimul.shape[0] + print(stimul.shape, resampled.shape) + np.save(os.path.join(path_to_embedds, f"{audio_name}_resampled"), resampled) \ No newline at end of file diff --git a/proposed_model/config.json b/proposed_model/config.json new file mode 100644 index 0000000..4c75e83 --- /dev/null +++ b/proposed_model/config.json @@ -0,0 +1,8 @@ +{ + "dataset_folder": "/home/bukkacha/Desktop/EEGDataset", + "derivatives_folder": "derivatives", + "preprocessed_eeg_folder": "preprocessed_eeg", + "preprocessed_stimuli_folder": "preprocessed_stimuli", + "split_folder": "split_data", + "stimuli": "stimuli/eeg" +} diff --git a/proposed_model/data.py b/proposed_model/data.py new file mode 100644 index 0000000..263be51 --- /dev/null +++ b/proposed_model/data.py @@ -0,0 +1,82 @@ +import torch +import numpy as np +import itertools +import os +import glob +from torch.utils.data import Dataset + +project_path = os.path.abspath("mylib/utils") + +class TaskDataset(Dataset): + """Generate data for the Match/Mismatch task.""" + + def __init__(self, files, window_length, hop_length, number_of_mismatch, use_embeddings=False, embedding_type=None, max_files=100): + self.labels = dict() + assert number_of_mismatch != 0 + self.window_length = window_length + self.hop_length = hop_length + self.number_of_mismatch = number_of_mismatch + self.files = files + self.max_files = max_files + self.group_recordings(use_embeddings, embedding_type) + self.frame_recordings() + self.create_imposter_segments() + self.create_labels_randomize_positions() + + def group_recordings(self, use_embeddings, embedding_type): + new_files = [] + grouped = itertools.groupby(sorted(self.files), lambda x: "_-_".join(os.path.basename(x).split("_-_")[:3])) + + for recording_name, feature_paths in grouped: + eeg_path, envelope_path = sorted(feature_paths, key=lambda x: "0" if x == "eeg" else x) + + if use_embeddings: + # найдем соответствующий эмбеддинг + envelope_name = os.path.basename(envelope_path).split("_-_")[2] + for emb_path in glob.glob(os.path.join(f"{project_path}/embeddings", f"{embedding_type}_resampled/*.npy")): + audio_name = "_".join(os.path.basename(emb_path).split("_")[:-1]) + if audio_name == envelope_name: + envelope_path = emb_path + break + eeg, envelope = np.load(eeg_path), np.load(envelope_path) # eeg [L, C], env [L, 1] + new_files += [[torch.tensor(eeg.T).float(), torch.tensor(envelope.T).float()]] + + if self.max_files is not None and len(new_files) == self.max_files: + break + self.files = new_files + + def frame_recordings(self): + new_files = [] + for i in range(len(self.files)): + self.files[i][0] = self.files[i][0].unfold( + 1, self.window_length, self.hop_length).transpose(0, 1) # [num_of_frames, C, window_length] + self.files[i][1] = self.files[i][1].unfold( + 1, self.window_length, self.hop_length).transpose(0, 1) # [num_of_frames, C, window_length] + eegs = list(torch.tensor_split(self.files[i][0], self.files[i][0].shape[0], dim=0)) + envs = list(torch.tensor_split(self.files[i][1], self.files[i][1].shape[0], dim=0)) + for eeg, env in zip(eegs, envs): + new_files.append([eeg.squeeze(), env.squeeze(dim=0)]) + self.files = new_files + + def create_imposter_segments(self): + for i in range(len(self.files)): + for _ in range(self.number_of_mismatch): + t = self.files[i][-1].reshape(-1) + t = t[torch.randperm(t.shape[-1])].reshape(self.files[i][-1].shape) + self.files[i].append(t) + + def create_labels_randomize_positions(self): + roll = lambda x, n: x[-n % len(x):] + x[: -n % len(x)] + for i in range(len(self.files)): + self.labels[i] = torch.tensor(0) + for j in range(1, self.number_of_mismatch + 1): + envs = self.files[i][1:] + rolled_envs = roll(envs, j) + self.files.append([self.files[i][0], *rolled_envs]) + self.labels[len(self.files) - 1] = torch.tensor(j) + + def __len__(self): + return len(self.files) + + def __getitem__(self, idx): + return self.files[idx], self.labels[idx] \ No newline at end of file diff --git a/proposed_model/eeg_encoders.py b/proposed_model/eeg_encoders.py new file mode 100644 index 0000000..3741007 --- /dev/null +++ b/proposed_model/eeg_encoders.py @@ -0,0 +1,47 @@ +import torch.nn as nn + + +class BaselineEEGEncoder(nn.Module): + """Encoder for EEG""" + + def __init__(self, in_channels=8, dilation_filters=16, kernel_size=3, layers=3): + super(BaselineEEGEncoder, self).__init__() + + self.eeg_convos = nn.Sequential() + + for layer_index in range(layers): + self.eeg_convos.add_module(f"conv1d_lay{layer_index}", + nn.Conv1d( + in_channels=dilation_filters * (layer_index != 0) + ( + layer_index == 0) * in_channels, + out_channels=dilation_filters, + kernel_size=kernel_size, + dilation=kernel_size ** layer_index, + bias=True)) + self.eeg_convos.add_module(f"relu_lay{layer_index}", nn.ReLU()) + + def forward(self, eeg): + return self.eeg_convos(eeg) + + +class MultiheadAttentionEEGEncoder(nn.Module): + """EEG Encoder using transformer""" + + def __init__(self, embed_dim, ff_dim): + super(MultiheadAttentionEEGEncoder, self).__init__() + + self.mha_attention = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=2) + self.ffn = nn.Sequential(nn.Linear(embed_dim, ff_dim), nn.ReLU(), nn.Linear(ff_dim, embed_dim)) + self.layer_norm1 = nn.LayerNorm(embed_dim, eps=1e-6) + self.layer_norm2 = nn.LayerNorm(embed_dim, eps=1e-6) + self.dropout1 = nn.Dropout(p=0.5) + self.dropout2 = nn.Dropout(p=0.5) + + def forward(self, x): + attn_output, _ = self.mha_attention(x, x, x) + attn_output = self.dropout1(attn_output) + out1 = self.layer_norm1(attn_output + x) + ffn_output = self.ffn(out1) + ffn_output = self.dropout2(ffn_output) + out = self.layer_norm2(out1 + ffn_output) + return out diff --git a/proposed_model/get_embeddings.py b/proposed_model/get_embeddings.py new file mode 100644 index 0000000..a1c6d77 --- /dev/null +++ b/proposed_model/get_embeddings.py @@ -0,0 +1,77 @@ +import gzip +import os +import json +import glob +import librosa +import torch +from tqdm import tqdm +import numpy as np +from scipy.io.wavfile import write + +from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Model, WhisperFeatureExtractor, WhisperModel + +experiment_folder = os.path.dirname(os.path.abspath('')) +with open(os.path.join(experiment_folder, "utils/config.json")) as file_path: + config = json.load(file_path) + +path_to_stimuli = os.path.join(config["dataset_folder"], config["stimuli"]) +path_to_audio = os.path.join(experiment_folder, "../../code/audio") +os.makedirs(path_to_audio, exist_ok=True) + +stimuli = [x for x in glob.glob(os.path.join(path_to_stimuli, "*.npz.gz"))] + +print("Generating Audio") +for stimulus in tqdm(stimuli): + path_to_save = os.path.join(path_to_audio, f"{os.path.basename(stimulus)[:-7]}.wav") + if os.path.exists(path_to_save): + continue + with gzip.open(stimulus, "rb") as f: + data = dict(np.load(f)) + data = { + 'data': data['audio'], + 'sr': data['fs'] + } + rate = data['sr'] + scaled = np.int16(data['data'] / np.max(np.abs(data['data'])) * 32767) + write(path_to_save, rate, scaled) + +print("Getting embeddings") +path_to_embedds = os.path.join(experiment_folder, "../../code/embeddings") +os.makedirs(path_to_embedds, exist_ok=True) +device = "cuda" if torch.cuda.is_available() else "cpu" +for asr_model_name in ["Clementapa/wav2vec2-base-960h-phoneme-reco-dutch", "openai/whisper-small"]: + if "wav2vec" in asr_model_name: + path_asr_embedds = os.path.join(path_to_embedds, "wav2vec") + os.makedirs(path_asr_embedds, exist_ok=True) + print("Wav2Vec Embeddings") + feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(asr_model_name) + asr_model = Wav2Vec2Model.from_pretrained(asr_model_name).to(device) + elif "whisper" in asr_model_name: + path_asr_embedds = os.path.join(path_to_embedds, "whisper") + os.makedirs(path_asr_embedds, exist_ok=True) + print("Whisper Embeddings") + feature_extractor = WhisperFeatureExtractor.from_pretrained(asr_model_name) + asr_model = WhisperModel.from_pretrained(asr_model_name).to(device) + + for audio in tqdm(glob.glob(os.path.join(path_to_audio, "*.wav"))): + + if os.path.exists(os.path.join(path_asr_embedds, f"{os.path.basename(audio)}.npy")): + continue + input_audio, sr = librosa.load(audio, sr=16000) + sr = int(sr) + embed = [] + for j in range(0, len(input_audio), sr): + part = input_audio[j: j + sr] + i = feature_extractor(part, return_tensors='pt', sampling_rate=sr).to(device) + with torch.no_grad(): + if "wav2vec" in asr_model_name: + if i.input_values.shape[1] < sr: + break + output = asr_model(i.input_values) + elif "whisper" in asr_model_name: + output = asr_model(i.input_features, + decoder_input_ids=torch.tensor( + [[1] * 10], device=device) * asr_model.config.decoder_start_token_id) + embed.append(output.last_hidden_state.cpu().numpy()) + embed = np.concatenate(embed, axis=1) + np.save(os.path.join(path_asr_embedds, os.path.basename(audio)), embed) diff --git a/proposed_model/models.py b/proposed_model/models.py new file mode 100644 index 0000000..adbb373 --- /dev/null +++ b/proposed_model/models.py @@ -0,0 +1,66 @@ +import torch +import torch.nn as nn +from src.mylib.models.eeg_encoders import BaselineEEGEncoder, MultiheadAttentionEEGEncoder +from src.mylib.models.stimulus_encoders import BaselineStimulusEncoder, PhysicsInformedStimulusEncoder + + +class Model(nn.Module): + """Модель основанная на базовом решении""" + + def __init__(self, + layers=3, + kernel_size=3, + spatial_filters=8, + dilation_filters=16, + use_transformer=False, + use_embeddings=False): + super(Model, self).__init__() + + args = {"dilation_filters": dilation_filters, "kernel_size": kernel_size, "layers": layers} + self.use_transformer = use_transformer + self.use_embeddings = use_embeddings + + # Пространственное преобразование ЭЭГ + Энкодер ЭЭГ + if use_transformer: + self.spatial_transformation = MultiheadAttentionEEGEncoder(embed_dim=64, ff_dim=32) + self.eeg_encoder = BaselineEEGEncoder(in_channels=64, **args) + else: + self.spatial_transformation = nn.Conv1d( + in_channels=64, + out_channels=spatial_filters, + kernel_size=1, + bias=True + ) + self.eeg_encoder = BaselineEEGEncoder(in_channels=spatial_filters, **args) + + # Энкодер стимула + if use_embeddings: + self.stimulus_encoder = PhysicsInformedStimulusEncoder(**args) + else: + self.stimulus_encoder = BaselineStimulusEncoder(**args) + + self.fc = nn.Linear(in_features=dilation_filters * dilation_filters, + out_features=1, + bias=True) + + def forward(self, eeg_stimuli): + eeg = eeg_stimuli[0] + stimuli = eeg_stimuli[1:] + if self.use_transformer: + eeg = eeg.transpose(1, 2) + eeg = self.spatial_transformation(eeg) + if self.use_transformer: + eeg = eeg.transpose(1, 2) + eeg = self.eeg_encoder(eeg) + + # Общие веса для всех стимулов + for i in range(len(stimuli)): + stimuli[i] = self.stimulus_encoder(stimuli[i]) + stimuli[i] = stimuli[i][:, :, :eeg.shape[-1]] + + cosine_sim = [] + for stimulus in stimuli: + cosine_sim.append(eeg @ stimulus.transpose(-1, -2)) + + sim_projections = [self.fc(torch.flatten(sim, start_dim=1)) for sim in cosine_sim] + return torch.cat(sim_projections, dim=1) diff --git a/proposed_model/reduce_emb_dim.py b/proposed_model/reduce_emb_dim.py new file mode 100644 index 0000000..9751186 --- /dev/null +++ b/proposed_model/reduce_emb_dim.py @@ -0,0 +1,17 @@ +import os +import glob +import numpy as np +from sklearn.decomposition import PCA +from tqdm import tqdm + +# Уменьшаем размерность до 1, как в базовом случае +pca = PCA(n_components=1) +for emb_type in ["wav2vec", "whisper"]: + print(f"Reducing dimensions for {emb_type} embeddings") + for emb_path in tqdm(glob.glob(os.path.join(os.path.abspath("mylib/utils"), f"code/embeddings/{emb_type}_resampled/*.npy"))): + embedding = np.load(emb_path) + + # Уменьшаем размерность + embedding = pca.fit_transform(embedding) + + np.save(emb_path, embedding) \ No newline at end of file diff --git a/proposed_model/stimulus_encoders.py b/proposed_model/stimulus_encoders.py new file mode 100644 index 0000000..3931fd6 --- /dev/null +++ b/proposed_model/stimulus_encoders.py @@ -0,0 +1,35 @@ +import torch.nn as nn + +class BaselineStimulusEncoder(nn.Module): + """Энкодер стимула взятый из базового решения""" + + def __init__(self, dilation_filters=16, kernel_size=3, layers=3): + super(BaselineStimulusEncoder, self).__init__() + + self.env_convos = nn.Sequential() + for layer_index in range(layers): + self.env_convos.add_module(f"conv1d_lay{layer_index}", + nn.Conv1d( + in_channels=dilation_filters * (layer_index != 0) + (layer_index == 0), + out_channels=dilation_filters, + kernel_size=kernel_size, + dilation=kernel_size ** layer_index, + bias=True)) + self.env_convos.add_module(f"relu_lay{layer_index}", nn.ReLU()) + + def forward(self, stimulus): + return self.env_convos(stimulus) + +class PhysicsInformedStimulusEncoder(nn.Module): + """Физико-информированный энкодер для стимула""" + + def __init__(self, *args, **kwargs): + super(PhysicsInformedStimulusEncoder, self).__init__() + self.encoder = nn.Sequential( + nn.Conv1d(in_channels=1, out_channels=16, kernel_size=3, bias=True), + nn.ReLU() + ) + + + def forward(self, embedding): + return self.encoder(embedding) \ No newline at end of file diff --git a/proposed_model/train.py b/proposed_model/train.py new file mode 100755 index 0000000..c02ae82 --- /dev/null +++ b/proposed_model/train.py @@ -0,0 +1,128 @@ +import os +import numpy as np + +import torch +from torch.utils.tensorboard import SummaryWriter +import torch.nn as nn +import torch.nn.functional as F + +from src.mylib.utils.data import TaskDataset + +class Trainer(object): + r"""Base class for all trainer.""" + + def __init__(self, model, train_files, val_files, test_files, args, optimizer, use_embeddings=False, embedding_type=None): + self.model = model + self.args = args + self.optimizer = optimizer + self.loss_fn = nn.CrossEntropyLoss() + self.test_files = test_files + self.use_embeddings = use_embeddings + self.embedding_type = embedding_type + self.initialize_dataloaders(train_files, val_files) + + def initialize_dataloaders(self, train_files, val_files): + r"""Initialize dataloaders""" + + conf = {"window_length": self.args["window_length"], "hop_length": self.args["hop_length"], + "number_of_mismatch": self.args["number_of_mismatch"], "max_files": self.args["max_files"], + "use_embeddings": self.use_embeddings, "embedding_type": self.embedding_type} + self.train_dataloader = torch.utils.data.DataLoader(TaskDataset(train_files, **conf), + batch_size=self.args["batch_size"]) + self.val_dataloader = torch.utils.data.DataLoader(TaskDataset(val_files, **conf), + batch_size=self.args["batch_size"]) + + def train_one_epoch(self, epoch_index, writer, eps): + r"""Train one epoch""" + + running_loss = 0 + last_loss = 0 + for i, data in enumerate(self.train_dataloader): + inputs, labels = data + + self.optimizer.zero_grad() + outputs = self.model(inputs) + loss = self.loss_fn(outputs, labels) + loss.backward() + + self.optimizer.step() + + running_loss += loss.item() + + if i % 100 == 99: + last_loss = running_loss / 100 + print(' batch {} loss: {}'.format(i + 1, last_loss)) + x = epoch_index * len(self.train_dataloader) + i + 1 + writer.add_scalar('Loss/train', last_loss, x) + running_loss = 0 + + return last_loss + + def train_model(self, epochs, run_name, eps): + r""" Train models""" + + writer = SummaryWriter(f"runs/{run_name}") + + best_vloss = 1_000_000 + if not os.path.isdir("saved_models"): + os.makedirs("saved_models") + + for epoch in range(epochs): + print(f"EPOCH {epoch + 1}:") + self.model.train() + avg_loss = self.train_one_epoch(epoch + 1, writer, eps) + + running_vloss = 0.0 + self.model.eval() + with torch.no_grad(): + for i, vdata in enumerate(self.val_dataloader): + vinputs, vlabels = vdata + voutputs = self.model(vinputs) + vloss = self.loss_fn(voutputs, vlabels) + running_vloss += vloss.item() + + avg_vloss = running_vloss / (i + 1) + print("LOSS train {} valid {}".format(avg_loss, avg_vloss)) + + writer.add_scalars("Training vs. Validation Loss", + {"Training": avg_loss, "Validation": avg_vloss}, + epoch + 1) + writer.flush() + + if avg_vloss < best_vloss: + best_vloss = avg_vloss + model_path = f"saved_models/{run_name}_{epoch}" + torch.save(self.model.state_dict(), model_path) + + # if avg_vloss < eps: + # break + + def eval_model(self, dataset_type): + r"""Evaluate model for initial validation dataset.""" + + pass + + def test(self, window_length, hop_length, number_of_mismatch, max_files): + r"""Evaluate model for given dataset""" + + subjects = list(set([os.path.basename(x).split("_-_")[1] for x in self.test_files])) + accuracy_per_sub = [] + self.model.eval() + with torch.no_grad(): + for sub in subjects: + print(sub) + sub_test_files = [f for f in self.test_files if sub in os.path.basename(f)] + test_dataloader = torch.utils.data.DataLoader( + TaskDataset(sub_test_files, window_length, hop_length, + number_of_mismatch, max_files), batch_size=1) + loss = 0 + correct = 0 + for inputs, label in test_dataloader: + outputs = self.model(inputs) + loss += F.cross_entropy(outputs, label).item() + _, predicted = torch.max(outputs.data, 1) + correct += (predicted == label).long().item() + + print(f" Accuracy per subject: {100 * correct / len(test_dataloader)}") + accuracy_per_sub.append(100 * correct / len(test_dataloader)) + print("Score: ", np.mean(accuracy_per_sub)) diff --git a/slides/Nabiev2024MiddleTalk.pdf b/slides/Nabiev2024MiddleTalk.pdf index d7ed66b583ca0818871285a4db5c67ddfcc755e4..bf492764e41d6b0a6b6ffe25c8ca81f54522ee1b 100644 GIT binary patch delta 15724 zcmbumQ;eWp)FoVYb=kIU+v>7y+jz?8vTb$QRu{X=wr$(<{`n`Fd~-V|S!?gBb8)hD zva=d%Nvdi|5}28p*@&5>iFJutxH&k8+1NOV^@*93h&i~4nIwq0SlM_uh?x|Ld5D?R zh&k9fnOT?{jA%i%S&09y!z3e4tjo{E%4%$8YRb-I&T7hR%FWEmV`|1?&dkQi&TPTP zZOSFc&&teg%FM>hW@^mCW6aKBZvNkmZ0zQy>@22iTpXsXP5YpOkbwU2#E|L;Nwg4i zy*^C0cj&6(2MzdqIL0qFe7o#!mArfw)83s*7Yg=RRoAtPfpdS32(+~n$KUk#;H-A) z*Z&(Z}cbimm3bckYPbV-^nSH6XYHi4e+xlbpmqjf}cl zP^95O8sJuP^$|W05D4*E(C{XIsr1Qb+QI!U zjxe$6!&Fe2ByfR&<|Ai&{K=z%@8PAm2uf)uQGpGab30!0&9~*o95DPzoPLwjx`fdr zFnJ_#8)VM9LlF`&@x~1e9^w(WbaEz&WRr<%dwlE`!9y8q=-U0Ts+(3O2|lU}^+~h* zSzdtRhhC}7;05q`ZnzjPl-4LHB*>dj)K?c!b9THxY{Dv3b>^F_G(R{R4eeRAz2@*- z3GjF6<;YJ{PD+0^RX$g}^t9BJe7w>;ZQtYPOYaqVyOj?Fx_BqWh=Ydb?{s8kV^lczeC<3ON?K5M13Z zUy-F0=+n9gz@MfOgqe7z#EQ5-eKp~XxF33n|J#A~ch~3p?sISaHPos+sn&4HAXfp& zl87>O68JRSYn6Yocr(7e1!FW6)+RmK8l-~e6P?)%A(4T2qLm5AB=={KImP|;;PdOp zmSBL(cObRTos%?@_6x0mMHC?%uuNiBTUg!!R=BxbmnQ@!>P+0w9FE^69@l4JbYZt1=|>nuj`-i+1t`A_sb1i^Dgj90f**l%~Hn=UQDoICZPbFCXsA z0G3Z~9H8BmzpoooU8h5t{5W-VZY_q}_04BsD`q+lmaR*Oho`fhKYv0l)22Q{V|g32;EO8E^jp7jNaoh^F~@XLaDi<;&S6T&lbYN!{N$`1U(%!ctv;ulQ%1Q(1mRhTmQ}6?V5^_8oWxd12G9Jw}P4H%2M0;GSM3W>sWyKVRtz zclB?1lB&vl;tc^ZGi|POtfWP=t0FFzjXTR=Oo+^o~qm>%VQ zXv#OMw=xQevb8cks=qQ%*K?WV49X5XTQ8+rsN(-bkL=OoVj^_4t$W0jm}5XpQ$Zs_7+ z28vTqv}nVk5%>Zp6~;#U&TDb`69Q8ZNQw>AX`^eaQNc84I?-~-2AeW(X6h?pv{>tv zK1!Nsn8gUq4WESCRNTvtw4J4nVoCwa``Kc6ayO&p`C2?dlqXn-MMljvmU3?OfET1foewqc z0hG*-AS8*Z)8K=p-{oKl0SK2V>>;cFQzxx2OJ}hV#K2LgDgiYm76ke;ggtKiT3eGL@ak22mFwEx18@v>3nshJTXHWSEl^bf((*@$t zInp_r2<+l66(d`by(m*C0UZN%mV&tmv6>a~WcYVAv(sO&$Xbvk^gttFMhGh@bMe8k zb?P8XX`*oKg#pB+m{PoEGY&-v5d~q$H0TAmpAWUUyQJKR_LXRbSUD1{Qid88xQI4+ zwMvM2=tzc^P3g{!8Vgrx_4ro3s*SSjf&`cWnLX&T|Hr$uISoZJi;OLL_l^VW(ky9x z(UjJ>)*P7c+!1NV1e6lB*v)J}t-hH#73zK8m-A18l{!9Z=VoBI$ufnp34|FT^yC z5sNFhqHt2oftqUt6RPK~eu4@j=&U zVg`Q*AsUG`7T9aZX3n%kOdhUTBY||UD0Bf2Sra7-Y#EL;f^_USYM&9QvxFct1c+P0 zH~(hVH(ocs1ow@&TYbmC?J%{cj9fyqWLogw5$Z09 zuS&G_oOV3?x|D9QpQ)V@gYbwsq)dBeoQ$e7n-|m^oa)9znGtq$8HEuk3Q)Vh?Emrh zsc}^fA^|m7Zhv>)%R)hZyTUW|x?jy-GnG1<#Yg3{h#Zz;^7)lC#ZI;8>1@k1GYJ*y zK~~cy;()P?+bS4bl*N!qIECopY-a!Q1|ehcL@N~_%!alju9Yh<6)??CjM#l(xlCo% zVeB%C)+f0x?v(!jaj!=ceftg?euPfv~5H82z0y z_kyhCS0Y+4;L*z7@g8?{Ac`C&|VE^Bw4yho57(j0{HVXGO=W z7FinU-u}iqLlmVGtViIeaC`{LF5<(X&kXV6NG(CLW)xU%Kc`>1G-&-dpkbmyt5JIE z)PP&{2{8mn*ByE?Szo&sny)94C|DTvOiT#*m}tGLuJ_lgfZneB>nlsV=Ma3@cWm$J zrQf<~p9Dr*SxX^v9B@bI_fIamcOLAw5tE>fLx(N!eMV?{;3vt)3W3Q{W~g}*VT9FK zwFEL}T4yq~|M1o?7E}l^*4O03S3(hUQvmJVtY+9-bKUi3InZj!clWKOCkA4xV|%-& zaoo!MJNM=lqR~_PQ-;#pmoofqUOYRwLmn%PYgRgy=~7K^Hh*gA!n|qojuJMrL__ca z&tgvS3N^dCTEQt8li{o(!j}LS+%9fV+zFe1&C<3|ZAhQLoYq@-JRnNk8Xh!P_5tgn zL6b85j2p0)O~(4N{?00q0%`EyW;Ul-c3I#OYI~M zSed#7L}}0SKr+LSpP6o3W$go25IOzngjJK-~(_ZlpW(~CX;v6PAgloD1u4OVXLm7n;u)i|_m{s~s|2Cd zxR=)?D3gqn%8(<;*f{2=CY_%2>dRW)xCvUPN*<>c-ch34#bU6j><#I}bGq4=VK@1{ zgp%?!MRQRB6Gx$*_v*|XB+IJ43(datNRU3}>DCUNx`^)8-$;AS3t#aFDBEcQ`>B?CjOjbg~fLxvRUaLxWFAb$*Ko0|F}P_ zRP3fql}jbY4MnQ8%m6iwu_h|4G!*D|lguVD9FMXu!!_NXzJe?C9TDrEaf40M!DQ1Q zla`d(5=M%=mKCR#*QyIBT7vs8^9fT~-#`tgiTzSh)suxiPZmX+n?|UqJ3TihmFu`^ zAn7^#b#LEm$}#x682IMAdq2J1F*obIGU&Tv?9|0dFE8I8cJp{L_Hhni{kl^U+@`wU zyhS8p;Kv~{RMRjp>GSn`>UeAEEN1XeI^G7`sq?k&_{w%$$Yt!}WLRVs&jlNm2N*grWOxP4wiX1pjoptQ z`1ihaulw{y-tp-e2*h|nTl`7Wwy-@NvdORE`^aiy9;qYPPGixlMSB)rx@+|nc-?pL z3&z?%-+wE~yREvHfcAm=Q#ZeV`8GL2f(F4$L%|&n%I$%ZQ;IfiYd&?Yx(yQBqAQZ@ zov%OD|2X`>Cd1Z20@SAVei)4;h_p@Y-1yz-eC6007mFB-n zF(YU%HNA8MF?!P&zD`rJUy8Wv5e>T@Ki`huXl~PsF+q93{3g@4|Fh_pwbM$yJHp6W zoMMtxsQl2RU$8~#3|OGLn3QN+7@T6-rRKd?R{~$=&s#`JPo8<{d03!Wz?!u}>cdQV z?a2nyvqA5wzp&p%4+c@_M$*Wwb1hwuyk~tCemSjF(HKp+i&I68F74m6v5Ha`E19Gi z#7|5}71_!gIpqpj1#HQD3!KWoe0W)WP}V4v9igXCly+7N*(Yk;D%qJOumiTO=9j%3m)(K=0Eh+T-& zGbesZT);io8E$xDg-sbd8Z`_hE*>{f zw&#j4Y2d+!KaB)gx=7}lHK+t^U;gfUMFgAxJvx#;*QZ_YU5=2aUUVgzEq373rM!ab zIuhG%R+&G@u4_|c4al-d6P;@8IG^JrI5%-7Tvi(OlWJXw)~i+BlX=roG(XSA-h7{p z9FP0)P!4|?_LNcsT+CT{AeXqtuj&o2&60uVF}ArN`)$bPthNv*P(=qVtuW|YKOx!= zvvjy<<&xT-RoEsjKF2D}zi*YG9f^dKxYZvKDZIt}x}#w_G=+WxMcl8EX)N*QRLv>g z27LvQuSq6H=WmF^lWmWDzj8NWA6;mB@MW2fXUCl%7DGz`%Zp94vSjUPP`5jlP3p3F zC{@>JNg9&4RGaCfmmb?i7X1FRrCoya6#Tsc9T{*Aj=gg-Pd-DNomgr@lT5G#eva zqE7P44|(ygLm1hrxUwF%U{`urtIM-?!O~E8ZON^n&N=r+xHimxa{u0t4ovArWn`2R zNRf#_lG{&e$ITpsVy*w1Kb+s3A49)6D9W{iG`W5kldabzY} z+eF#8WrY9zPB}ZBtg-kF_1tTGMkuCOAz!p0!03IQTDsE&%{RS$8O^HY3x92|ffZsM z!i~TDJ5aY6td!xyeyqL+&Mx0apXrJ-d=R{Uyn39je?mPjuq`iU_G3LG&d>{oQBPy; z6k6$8!2dDJ9B|^3+Cd%v0zhVv%pN1wa6#MFaKWxQ0d`_RK{!`87jt8KIIo;VZH0Jr zF65r;IwyxFR8zgfYY9D6krHBwvBn7Lt{hQelmZBs$h&LR!+3|TatqHi5R!<@tJBu1 zv8O7{X~JRyy5e3Twrsi?jFY##JZy9DGqaw7NL=RMNPU%>mO=z>ozLo)4}ONVZ7p)Y z24DUt0Oyy7mQ9uZT10K{<9wzwL3xXHDK^EYqlZ%QM+Y4#QpxLTUM-)Gd6=2mw_LpC zvlWJ0_G#IohMK;na!*tOpHF8OyYtd{IjxKZoj#>WPa`xB1x-^fAj;5 zl*EiYuWg&(;G*@AlJ)i(78fsMLB}A^2J&2MFl%exEDfs-XqAWzZ>amF^=d;fEb&*t zwqGCE2_D9Oev6Qt6!`z@nM`Fj_LCdL28w%Gw332Rr-Q<)pNv{OKWkcg17bEcac|{+ zw3eC5D9EA3$615DOx=NuxtF9j_;>NPJ__p))&I8t*e(hDB&T5>jCS7ti~pi_0tTwb z4s-s+(N1kkfNxSIQm9G1ohG>AdhLnvV^;74+a=adEF+2G47>B*lC)8oFVGzU9{BCL zfWL@fGeUvISBr!X0zE7dXFH@qEB9%n(ogZ>dX&x!1~yz9N7}5skX6ZtC}OK(*P6#b z@Jmx_{!Ag9;{gq=E7Vo3nN*i3E?j5?)XPslNK6M3oKOV=OD7q!{@_ z%#wnY@M{KxXj4-vu^CcvId!~5J>$@rH%reIcJyvTtQ8Gbhg! zX8GxPU+d6DG@rwhY+Wrvi~{GOJw3B=Pxtwdfv$hpK$MWYU~lvA!m)y(tNQm(cEwSe zFdwv{=zln!29Ch2wS(X4v0n+V6AaC|pKT5JZTya>@MS!q5Xqv>7#8<7h4%F@pShce zFDk-%9GGQz)zj+mk!A<|tj4mUN?k9Brr@=4#M1U4vDPwp{dNG@HGwyI)_F&AJFdh1 zv{!k`w$YK&(!>#|r*I8Jz2a$Ow(bsx;bv?wnHl7wmT^yqpMj%;YnV@5MUutNs}hF# z(WDHg^(H$IiIk5&sqK zE+wg=)<~d;`u2J~uK-oLWUvLFUUodV;pu`;wOFVa&Fsum zMA5_`m;Fe;*^5YKq_fhG1QA~ZeO);4Gr%k^qw=GqqUv5&9^S%~nNA5#kuQI9f2r&A z?uQ*IIxgNf<)87KlyP&)1*KS9nO{nQG7G2jK!+_L+%4ECW&%C)f|xVJ`f-)AdqCNb zW?pX3?xP|_3z^x!Dpw+N!f^@4Xh#qmBf}}4ntI`(8nARvXP=TV(XxI&ppEU1Qstd^ zRsDH-C*g5HNAdF|;@xvBcy_h^ZvR^Z`_093=5ug)T_^EWN}zcc=0EtWCl%wESHjDw zu+gc#R84s%e+NMJ;(H+TjV@f>^o>5=6K{gxe`w`d6TE57EA=t9x{uLuF8@i1R+)pF z?$Ub9VNpI}VOx`F8Omj6t&=B(>V7?Y`>o{XBU9A1PG8WE#Lh^*YcYU01mes*f>SuM6pRfx0B8j^ps^X{%my2W^>LyQak%(N1W*%*+3g ztx@VFfDLf51ckvM3#ZpphUsy^Z>HsM!S4kb9@py3%og*c)*=%-g2yz1yBiD{@1LnbkJDNt*8Zd$;*TFcv18t-sCJ6qJ#=AXA^j{M2J7~O}WBvPa2ofwI5!Mk9 z0dB5uM9qyJ3}Pe7k8i}tMJGx!CG1a*{^S1UQxnbufQ3dv(qu6(aCCGejEv?(LN#X^ z?uUE`_vHq8G`eC=!J5E)sj@G?9D;oEbma%ZEC>SM_(Vz75`w8EEkcSq_%|Y%>God*#BIP*?;RfFTD%1sm81WG4Cz{$F+=_~{$>Y%0*zgR~P|9DxgWiFF$ZBc)K$gM8-40c0T92+d()0)d>&zT99pN>6xf z6IV!n#bD?g&{j~LC?wc0Z;v0Jmd?d&y5s|^&qQz6AxG*z7yZbq246krfv&@Y1VLed zy^R_B-y{_bSg=S@k+3leGV{)MrW^5%5Z!CMmU<`wD)pOGKNsJl=Bxog8}vCXvJ3LX zmyUTS+0RV;E)>iK4I5m*#dPzn`_w1)#nkdWcK40^0W6msoL+on_kUp>0xZOgPL|t3 zJY2X>5AB)I3I~&F#9!Gq2=AsbhY7$`>-VMVW8j8-82!N3>vc_N7hS)XnUKIvozcL$trpIqSYx{_Rr-(kS^59SOc z1h|Ymana#H{PTe&3dx6E-v=LVUK8J@VF_@YQCg zE0q+-9$9SPuOK}t(N=||Cm=A6o1jb>@o|HiH

e~F*K7Gn#= zzGK=i?0g)rz?2Ugao^TW&0B(pt09wQ*D2iX^xD&b+%@PC2wN_`Cn6 zy)RO5y78>>)2h>j$mkaxA|&kSD|D~SZ}Z8`No;@7YfaLbxu^3Cgop#m-RX)VAa!e2 zt${bJ&tc0NP&8|jnerCntzCX=&-!UzLGi6dWd{CT{a=?}4sPWte;2|eL0L=bH-m`s z8}{{}1-O@bu@Cho`us~tE(5AW^sJ?SbQ^;_7C*IWM)+kgAR`qF8OI|e_%&axj49?@ z)viXuK5Ls34lQ9QTeJfzUh2@d?V2fqln1r6@|SZX^vk#-3A^OuH$1&(d}6rg)zJ)T zH5U<9@TJGG#bMq%GUs){LoMo`GMH_>Ml^gp_dF%dEx+j1e}&BmYUD=G!X=wgK@bv-9b_}Ww(_B@3hI6#PtZW)*9imI-t$-C5;9Lm0d`d6 zJT{mxTR(>mb%q5MvCG|#qHhuTR5}N9mT%(H9lGmEu47rGEbX+Vsfo_WlzV-~la)8= zOWp5Yyd{GPa!v_A{kl`pw*NU9r`4jF_srBFsG&gV5Z}zyCJR6*k73WZ;N9kyg-o-b zQWswzhOxFSb+iCIeKnTEtE=;xFbeJ5JuRfIhyI68Z8-4+(Rul5@f%5;NB)!*Uh0`1 zCTO^u+y@@&WIfSBLKyBkLG1(CpEG2`UMiV2(#O208Z?iSsys?=pPdpiO$of61k3Wx z>7wMdy}X)hM{0wW+8XWJvE9f^+#iW89V?_?-RS2mr3w<)+jV@;DWsjRRdKHvI9e^Cz%Jx(dCZL;+tk z37n{gNn0j?v35=k{cu(GQEvle**_tSEeHdnUz>HV)ZNZLu}g8!U%L zulgglynIAV%Ygi>DXKxjPyba^({N9X%-41RGlIKJPMj6S51$DO_cvOZ(S}z|68V#; zWQ=ptTZbZga#WRfZIp*Bh@AKd!C^va71r@iq%1Ca@1yI!UUjHkuCaZ`Mn`8nzmfKz zV)-sflxq~_ash?1!+?KHtF1+z^*nFw!@c3I#0SxOhk~Ws=?T+OiwJF9*scM;k9Qvd z^gPJ=!?3k^v&Y+fRwPxZEBS1{km)CZm>}_4(})!=E5RARCQiiU{X2VG^q#2>z7p{x zY+S}0vP)~}3)Oh?C-1{|nV$UNqL3m&GqiMf)5!bxTFvmQ9dzh?jxqLteI&&3#nFB} zyxHC} zg`f}$?OX-KIglp%zfXE4 zLRfcd;#%y7je~_zN<~dW@W%KM%{AkXuw3Mxy@b7Eet{dtr_n2R75<*F4ihEQeY-Oto1J~KJ~)z)w0WDpR`-Vv;ax)l<_O{VtvTLp}q1rG36 zSk>-p81`PqKhL*vg0$>9sc^ew|>QBIG@fs zF8uzPi&UQ#*X}=>U;C9uA&Ans^DWfa(pMs1$7*3yDb)lgrnZ*okmcm7iRW`9I-+E` z*(h49qdkEfnx!E|C&7!F(p9PptrwqQiSXB7*7!JHcoqd?DGhz0!;yk2F~qm#cf004 z604JAKJyrhJMLZAsqmMXI!{z1sD&E%+LU?Al5TMsC#YI#EX^`nbwZqemP>{MZd^}U zNJ_01w$1`m98bw-azX>4@O)kNbo)+mW-erJNTLAx*BC=e>!(NCWcOmd@SUgh%_O&@5p<1@G7%y4paT} z$0#lw($$d-A*FhMPj2sh(hF#Kg#yIwxB-kRwJ6Ll{uM$x|x&Vr}16Nv{^0A^8CL#CaTvLZhT+vR}PMc zcEUFl5H4*oHx!>Qr8;gaL~>6wO;>`Cam>=oSTe2qEC0&o7dS)ZTW;xcM={@+a1QaW z$uJPE(=m?eGO_Bx#5yRdB-+(QZDjl7Ig9~KdbbB(x|c?O(6X!omZA8q-ohVM7_uX` zs)pbx0^azdro|q6SVJZu%970}gMIC)=gx1%pJWFNmccmL7pAyO^L0=b=JBf0lfovo z3F*T$U*~geZH;H(%NAOQ){GZ)c~MZCYJQd%VpcZl9}g9JzN=5?Oi;8}zmUck(+vZc zkdx_#FV8=6>-0G82o!4I1dy4$Q|#WZ5@UnY>%29a%f#ksjeWU&5hRsQ{SIcsUDO(o zN>21GDm=Sy=E5XH`iF?~sOWU8l($u={QH(2mx44Av>I9iu`jae$6{m>O($@-SKk`k zS}2!T-IFp@`N&MFz~;?y@4vz!Zc~BP5M9r>?l>M5Kf9KLgZj+vpk7b4taMZs^6e{E z+D6o9dG54Ic7qOnf%Ao(N#eOIxtwXSH3up?D;FVMx3Imuj#tz8|Q&Ts$D?@YlXca10ZNgDt>0P$w- zeXKEk&Jx*BU&laoiLZKIOX)O2*ss$?+NhI9bW8RP{A+9!r+0Me&nrhabzpGjXB7Ba zTxD8z=Q2=p0Blba&kE zeasE^YU$_NTNQ?<{kRxtl?G4{0;E6Z<>AH8)1^>;{`110+|seu&l`3W%}q-)eqpx0 zr+VLITP$PwQg}^R=7YBgvk%Wu(cmVKnzdzCxv6T3IZ*o% zl^W+P$%fB>>&Z41=&;o|0nJ8uHoSk$8xjUh!B=Sy_B^X@R5G3fr2#z5(DBwTgk1T( z%_bF1#J|vKVk%2XdT(YML|CqsXR1s}w#>l|aDEJg$)I+*Vp)`)o;8-150gFyd<~*c`gSR0J-Hqk2|glMSw`M$!~$ zZoLAPYDe=PKGoZ;MO8w4Gx~M8-vfKiZlhXY;|4J2YqI=mHbKNrc)% z=+=I4#-38U9@dTG1-&1rnU8mdr+C(Vn8trgm1v4>B6!Om91g$uAaj^JHWzgD+;o-m zsPizC9i-W`JOFe>$YN2`0dt*!Sh@Z`4JmS`5j+ktlAp>1LL7u*ns<{iH?BmKG8`B} zQXNLCaYl;>B$FHOxis~bF9{@FauCYt`$U0>ms47o!#kSA%Q(~{v$xs9@#Z>&J<_-h%l#5Iq z8@M_M7MIsmn)G|>7U>48K~uEAaAPH$4@fkx*nkxOI=bHzG84TdabBH=qC!8iEq5O* zzmW^W7OkwU5VO}Gh`$K{n^Fydu|>x(W5|8HOpF4-tCHZ?!S26-Yk}X@MT9U{A2s#&XnfU4Nm{b}u zr)oJ3L5nrq{o6OK5I%{u`B%B2%8@>KK0IJt&xUSa_u0T2_9={38nt*&tz;BY>?Cg&bf9 z&ch~t%BU54owY|!Cd5%*vc5_8vxwm~TUxJ~5;dn5Bzj$VITC2>(jLh}p53rmzsPQL z)!C^H8%Ep)($3fn6-jzEZ;5hnRk~c*Ft%jpL6cGXc(uKXK5}oJ&aJFU7iJr!%!5uyJEc zFRF=#)G1^VQjC%t-Mm3_%gQ|JIFX!?wf@@aK3*z&yHzu#k zd?vYi;Nb3k=?3_2Qr@n#|A^}IHLZ-H_k-1(S2z5))OgPgiaBni#08dZcHERd<6=Y+ zZpN2S*~YV}#*my2ud;2nd-021x;Rn?6W!p;Bm4}lC5i){IxQWTD@tRvb!ONoYiLm; zyDd~{M)^-E&KqHDxMiyZAnO^oNy`hL<+$wPQs6e*P`}f-{cl5T%ZiT=UqYinFpSM; z#ey%~`Hiu){a;lIRe*Mz`yOM6Q<qiAQT z^t>v z@${q3Pd9geTS@O~dgbD=uy^gTV`7t7{V(kQczOG z7eK*pqq2ls&e6VbQX53_znHZXq76Iv@~F9t^&3)T16nS~*v9C?l|}120UWX$Pz*-Zq$ZGTtqdy<(xAuSjy2_tD3(P5OVl3$M zWv!FsG1{)vkx%*g?|Pn?m{e}zEH_Q#ZG1=9YAS<4YW?J%E3SYcW3ZmE% z8n)eg7=Yf)&LR5IO&7=I{qjc*2E~EfFa{e@lqu|e^||bsj^O8dxA&s{=EIx}%1cTO zCrW}3)^yYFk6waX;a~&RTnLYhm%RwPT^wIYJda7j{*f`o13G0+ z+J}S0DrNe|T>6={m_gP=CrDb2c^byi{5`C8+5pLLjLdNpWJKGyyp>xUX)5B;&tfmr zm0kX!n5sffbl;XzhH?$+gJ4Mi(yga@Vk(nyMsCs!F&F3fkRD=(PGu!JrHH~x-|9P8 z`*U6*gNn#B6U6@8Qwbao(Z;H$uDQiiG+H<@KgA#)8xuP;QvRs^HX(y{&r63!1PApt zT_9i^JFDQ~c z5mS}q6mIL7q|kf}62tHkf^@7Jdv3_6eqeR9OaD2}jg*i`X^`;wJs%NEeffkptLEJJ zqz9ceg>v>HcRpNtx&W?|jnf|vzsHsRP7LGcX5yXvJ(kn&ZtMFNm-xMK=;1WaE|_TI z(}S!j^>sA;hBBt`Vb4fcoSXp0tH-(8Y#H)@O78DS5q;jMT5m^efviNfG>sJwZGiEG zeh5YHODZK_?xEX)*sH4sl92Ys(n%rz8z|?m9G@;bE=0$l!lw>whe;?8Fau=017t4Q(G-x?yxqTP}{MOLG09b3i#nslCG!W{`*dhUUERo!<6p3d2cuD7;MH4gLzETk@H<0oI#IEjtP^q?gEft{> zZ6Y`--A6R)*I$#qQspcm38vpRUQ4|0Xx00jAzdXdNv~9_yE*!yP^y9$^YMeF5&}Hv zKvYWhq3i5E!c8L_h8Qg}0Qh2?iQ|w?wv>IMEIWI~u*vR#mCxQy#evk+e|5@ZRc~@7 z@lgNODN4XsPjU5(F*(_wN>`4nFGyMlLh>{Uz_d(QX+CopKeDl>)+t|F7i+j>YiZ# z0sKEe{0GSY0M(}M2@R}6v9L3ArMZQ^@5L_eE7jQ@Mj8+^zb@J`t+1I=^j4Pj0Gfd0fRC zN$aagW0{#*m`RwVNpwkAxH&jT*w{Ep^hubMNI1Ajm?TKJSlL>+7&>aSp`{O(@!0*|ee6yjfVCUvQcr#b#ceL1N2i>k z_u=2(31L;PJG3-dXGKaHx_3jOTCazAZ4Uu8XKR_z1VP-D4y5k2z(=8GuH9S%Erj&f zhP;?&gz|%eB8pB8i?WbVBEE31w0S%Kt}~jULHY#0g@5An%Mlz&5QAZZW+MjCKR0G* z@P)B^1LNXK`1gjL0uBz!0*1oZ!U+zl1q##=bXMhqeiR~8I!4sQo1%J`g6}Hj8|Sgq z2(mOUXgO)wX!lk!GS;H8>~za)Sa=`}2MTRboS`$Lt+g`kzy!S4vx6D(Ow8O--DKA7 zBDd7pi(kH`AN@@pF}eIp#G#oyNG9Gcn@(d=q=57$?ov4F4x=_l5wWEnVTK(pUIv6o zUy1P+xXpe))&LqpU)tkuA1^09ejnX$P31W;*b!qNmF3q8f(pn~OhgyzJx!Ze8s8qj z+ql!R71HD0EH$pR9)ngVeztwuNZ_rjGnK#XT=JF2Ae@Fa{}Oi=?BM4a6C+fJj!u{4 zI%G*9Ue91Gow@7nXh>M~`S$VQ4q%QdIR2c8fPRm-4a`vY^Z4Cd+WJjojUgk9CleWL zRGp9>+^m=$zs5@=CkGl9Q`n!<3JJ;zhJ})H0SU?kAY_!;cen)qQ}7nyo$nV%qHpFs z0^t&KEqz!(LmricdVJk)K0-IC-rq5F z#|n?JBQ|69d4`|^UbhbR90dgoCyyMnalUW4-S@clt_`~leRy(EF4gBACsr#OPIoT` z3hUc|Z@G5h%PVJ>sut4H6EOJjHS4P6q&i@_{8cw>6((0oy&cy zXXL;6RLG$Zv43jjOp{OYVqFPEkMO+GA0IZ>eLv4$$w)Au(d2AKo#YN)&LNMgDMyMP z5BR;h-9enMn8Iwb8XwSj%(Rx{3TjA4p2k*z&FhigH{+&MLq(KygXs}V?Bo5{gNqwE zzPX^+i@Q$W2m7Rg!(CSv+)V-gO#L&-3Mz3B$eH~VCaAgu?wlzh`6Dx1#IMnxS9_vk zLrNN-jpc^5_a9B+)(0|jmG~6nBx*P+2Im{4q%HTlF}EZ04M0odrT||C!_xe3FVA`) z)h{zrXoDx)a4V3uYJ)ode%#F3PBNV}y@u*y>g6Jpx#2w_p&(-7e(YoZ!Q{^Pn{(9K zR4P3FE?H7mj-LUkIq&w{Pp@cY(W_6Sc41w7$&R;1=EX`vYRrHn@?$2 zx6p=$7rx+?b^cG+GeLpxd!*q_gz|fF^<68yU~9a;ySYKd0wb!X6IbQnQ;D8N<~_b0 zlX)Fy>nN*4N(x=^ni6}|uH4J{$=K&M?knf+``>N~PmCt90v&pJ`dq>Q?Clgh!NDJ@ zgaV1`=@MXs7&|j@`IssNTf>)|Pk>PCafIoZn99<_mG@nF5t|Ch zn1}x#(k}k{=R?~f!o2zaA$1Em^XBZpLRMYchNAyTam8Ot#n#U=RxHwVESxWfp6`H} zi#5R0S0S1!p5Q)DXSyI=KIRJe$?DB!+OSk(Y%W zapf{T7fNJS`H}q$K`BkLNxsTxR?lvoHt&t3D~hl%b6$1eg1Te}R|)v%l9me41Q~A? zx#GDOT|})i1=&CmFKWvn8~~&=`D)`|YT2zyHsdSIb`?c)B<$Z2vZ!ut2a&;oTL9aS zOIbSv zxY<*|p&=R)z3~CQdsJB(X$+^NCex-eoBhHfsWHrYtZ`Tdq;4Se-2XtPI23w9R@o3$ zFo!y1TXN^qw-Y8M?_(Y^>F|ztu0A zw1MV~-s0o7q@3vDw3+hCaZ;o_^ZbSQ(d=u}43>GcjFY{Gw@2wG1w3UGpYPCLO&(wfsuD57Gv;5`?^9Vt@_yQ}NW=0OD zJZEc!AoavX2W4fAfw~FEXtJ;!wY?xR4_E>fXF{k8Rd7YZ4(KTs9X*g^_kwBS+y~A& zOb!}!jbFZZ+>PaA5;Nc5YP+s6MVbyu*}g1pWG9E!zEXM$ zPf;C?NfD{WKt$^>6Ouc#WnEb^7M%w8hNwSmBl?^rm{Y8|eXh~X*FZga|M|4O-P`GT zGU|$VA6aOuM#c+K@Zgf^$_qHGNliCsjG^IwDG}ya{5cnAcp#4?X>9Z?Inaq13Ax&6 zDsZ3fvbvF=499>J-X(i;VNRo46K^S&OjIX>Blmb_yK|r~qkEfab4*cipW1yYON^kZ zQrgBG*xm>a4)XVz1CY)m=*vkZFXyRugibgIDb&1_=AMI&ym3R2%|7wE9f6qe^tWJ^ zhr1*UfHaUjQ`KQ9K(Z}^&7x&>B!WZm$0Psq;U^uUU|nBO-SMMU14`?vq%^L(-dhPP zj=AoZQpc7xk)biSDs(${P`Yh!zOBcUH!)BORi*?AG%dMM5#SN0-%JA2g-25&U_1GD zhZhB!BuLOYM46_yQu0GiOWwbimc%^nv6mnb)tHDvNXdpWS~)7%w-Qr)R`B7$Ln1?k z^+hG0?}eaOu(u>sRnxN3Ec1{tk=OMt_byn*jTI$PqMw#SJJBjK?2j%Sk`kti+Iaf~ zB3}_!k+?qj4!{vl;4tQ;iHS;!F&~+ zgP_iKnyc|8l(9c{L$zP_?OYC5O7@A-PuY1v`R3MMJN+-cWPY^h0?RAWex>f)!K+zZ zaIMUszdp@Po29Mw^Btk=xbo1C?o!;k(puf?bP`j>yDyFp!Hf^VvpVCFd6{KOZ6w z8zt!I3KHD0z5#!#t)y0cjH)<_>xsu@z_wIrOoEk%J!)Z5%nnu22`A-l)}N(e*0~?Y zlz5d$=uEBoBi(WrV+dqr!}a@U1E>-+FVcjpjzC}l$(C48jHaYvsN$#=jh7_RU)H0( zmT< z0f5>8Mc(95-O}9V1vN*rl`(Nzs4ZP;UTC5M)NZ)lC~uGYG4@|JG^@ozKB#R23JBpW zT@RWXmmEKUCW_2k{v#5WmZ@oFmn_T9wfb=GO)=m1750$UC@OpsdV`ea3gRheO`u3TwpztAE6}1x+d|T^D2ez1)Qhl@YoeNLAG72 zX$g|*7UEMI2F$zi*uflIaO!T7pMlfcX_ORHZXP?-dk*6Oo${gQTkg|%-x|b>uAh)# z?>G1;y;o{%>0|t_0<)JOws=fNaH0R17)y@UpcSgv0yCWRjqQ`?@bf4xZ#*rHGM~bZxmoUc=i>s;B#c!qVhsQ8&It;WZ{iCe*1D%`lRQPb)WS zANrDL=qTl^3^1uU*lio`&-a_3KJG%>n=_nO&^%~QEO$t?DlPQy?_-_+s-TN(FeYh^ zrp}rVrtJ2RGr+h0PS{}k^-;D$0kc#y86Z^XW90l1a01#)dVKKD&O2qUbcJW9Gxjc0IMjs>&#mf&;^)`@ld}EW zF23#o&XweGms#2^bIrnRxthDB59L@fS`lQ}8u9bGjcXp1YT*sz0A0M(KUD=aN8j z%}|5=rw|(;K0)$`6n~Bt0gxB6`Zp8Q;a;ZUf3wDdS);)vTZG-o1LMLib zqWu0h1@VJZrs(0uO^7&&tm&ZIeZ3la#5F{&Avm9C{H=QeRJOGwZuhU4k;Uk4*+t0K zH3#fsEk&5ih-+pkz^sV5_y==AWpl-^D|0*axFNh(MI($*Fr1wQF}iae9o8-9>Id*x@kOY4`>!RreU=IDfCwp0wj}4%#0e4}COM1| z!&|Ehhw?3G{4hyp?P#AL{C^z&(U1SlT8gED9c!N7{dpzF{p&(eCrPc!pQ%h<4oy-9 zwj*9zm=)$FTj2~!Wi)Kf#`7KY3(~Cw1X=c6|LKefKbrs^qoOF1(v9Y}aww!?%Fvuk z`84&J$&^TK4FGQpG$mrfOcyP|>WOet4bl>FWIf6EX7G0R`AXxuHn3QduDZa|F_xR$}d33RNT;jJRfnNaGL_$S&O7yM?1Xa?v1^??e^U}Z6;q+h+D*tkD22EOw9zAJbzEKu5usW+3VtiprMbKy?~+kDEoJYJ(Sja;zvJR<6h$TJIiRucPb|*7@ktS!n zz8$~OIJs*BrBsiNUxek}1~XhKu~?w=U^vq$-B_T!7*wq@O5HmisOg+y zl2mB?&_BLJ3Q`cTL3OeE(YN?H#eNoN}Tf$y0fYxP% zKImdd)0FM0A$KM%1RFnCC+B(jJYRKiDFG|n>%dCk(XWz=?tU8Rc|GuDeevAT$A&ka zp`zO9x?>Yd5A!T#z63u*pQt;rJpW;61{6W6`*Iq zDu!7{x6$L*c0Vx?I{E3hpQ2H?8sgqE*)hkpo^WbAQqxc}Z32i|vh-Ey=t>`w?;$QN zWpUAuAWonT_Agkzoc;D{ihS6bcYk)@hMT+9l&<-;3Y9JA=2PAn z-hDdDN+`LdPlw+n$tm$;S7+7siZH^q9X;#t#G->-|5&j3XUQ3*8zWiu+hWq=^Hty0 zCbWZiBAwr3az3!pYvl#jXcK&>HFPwO2U^Wm?FJUPEt)u1iJFW5H-35Thq8wl>Q1uF zugy|=nXN6kDdvV}x{Bn_5h21(Ul5tS$R)1C1H@Y=XjJDKKjm+6r#n;zQ=(-hE1LUG zKM~Y>ilNEnNBr;%hjVufzK;8=!$@zQJp;v-l!40%SUF&3rGrYIsv#TvL#y9@Pdq|l#0#6ZZnCf%uL6=r?;0)1*@0Yz{V&9ssheJaXiFO&*fl4>Ln0^$=i=&YZfpnVk-7MDA{vtm zWphT$^|w2DwNu*%!5}57IGieZ$WL(-?LYoMgkjB453kh@rCK)ipIIOz%~MLw> zc-<~4N?H=K{W@nW=w>lb-r}}SV7pXMH_ZZ7j6NPtTTZ8j?iC973^iDzukRmI^)tC6 zm27nY`W_#z97Km|A#ZlenIisl^^$&t^OBgxQQP6~sHX9i z(`Dp}!{XVLMT19*_k=A!z~%PFnnX!6@6n>&lTG5VF)I>0$u-s9K#9q{1!fm3t49L) z!P5TUFR$N+Sw)tXj0!8a?-K%|2OY|ll~WGDz1B?E$%Gb{P!&$fpj32Co7P?@UU}s_&vNbQ9Cir!Dw5DmceLW+pz(2v%a);Ub&0NZ@!}UVx{9 zu%7c>%+xoyC^Hxt4qY)W-<9yr*gzKc{FAT8A8y9tvd% zZ?;xy8wT4tjl5af*b2Gp+qpLV0I^FM0816)Yp%0faLE14D`< zFx_cvDEb^6x>v+0ZXb^&Wn51vReh4+i{f4m7c{^`OLF&|T2?_LDkL}kK+t9)9ch(e z(x<81sg2lnI&V!Ej%0!LNWS98g}8RoYPaj6(GM{X33@`H1(B1=Hkv9pWZWtJV%(6X zM3smHv&#I>7wXM^A{8|o_ns$EiHCnBgF3j0U#94<);CpIT7=;~-j2H#Q>Yl?46mxZ z5o&|wFjQBUz-Wb<-&X!8mXT*NX+DS)-N^~BiG_hNtE0g$2l zh7a+mlJ+&J)B>lF--TRAA3T~x!rgpIBloAMdl6sXPn-}PslKNGN(y0ss~)s5W6roU z+~``Nbm+ZI^`#1?sVaN4jzP7m+N>{aq}kJ|P)!+pj9MYc;8r0-UV_k_O{`RuR4#Nl z{&js4NGfZEJeFBukYDC*nn1Hpb>d8@RnL|*!TyJp74CNwA~e(voAfM+5lP;tkq`ra z-IX{_dimCgJks@x$&(PMZCM_l^ZTj*aYXQd*m(t=A>DEsxRJf_hs!(_$fPkz4kUt$ z`-8-YI7{h2_nR!cVffOjBY=BFa?>XD8UL}{ECy15N*WWgAa&HR_8b`-YU)~#iB?Bt zhCn}I=Iu{KhdBD|j90*K)iT95HEZ+uJ8Q|aq(b9~@$Ge}u^M*ZLi-H;RK91fwu%mW zX`uwYEegj%jsY=tXZ@$ITU51B85>KZb8o{w7k3&wx#Q+<;)+hZC?YN+0ZB6Pltx? zolV+l3!12iN^2l*EJ0a>Ir!VgVsQ)mY$vO_G#C%|j01UqN})1U4@D*&dmCge%Yyvu zwEMcm0u;&<=^pg{A1tvSl$DE}<-!Zn+{LP{9#SVs zpoL#AEA33L>{I_9D894w?m!U~RITO?KZzYAvWtCU81EYmds}Xfwdc@^3=VC8!tz%G zfRK-6cpo2#d)z6C`a9A3%c4Xn((td^rFB^Imgez}`TBes19XidHv;i7#*p2g28qLl zr!#Dqiy0cRqC76$@i}g?&W?xyFoF0JZ>3wBDGy22ld2gX_D(C7pa0NT|ERg(3L4H5 zi%xhX-%jOt`mv3C^^?k0RDN0;sBF~gF8sFgT9!%D!HxdFXjXrl5-Dczo#c~g$aGIp z-S~XaboJ(uxP4vjo1!V2O~1|{FD*$eKp2H)v-}>Fp-Z9uR4rD?2iek5eY(btE{o?v z!-?M(IEYv;;(JsodN)KLn=4MpQ%6q;(gveL{%^f1Z*FhtYQ;*z$;Hi`;N*(|aB*?` z-_jSQ?xwB2*;2e1hZIK?mzbW1e|i=F+aDAv1Q#cSKvbu&ume>>R#ZX+?-Z*LpC-;e zHp1`m-S7F!Dd(R5IJ0S~W68kg`gybR+4aHG%r0(8nSvZ_3j7E%4E&Je@$rx-Ktvct z+8-NY54yG<59pnBJP|?#sX)azgcAEP_zg}L6DT^nEC>nnhF2aY4zjqhD_jeLLQju` zg$f%R)d;TiBt8e##lmo~=rROm%A-z2gaBO#&Lzy9UZz_U-VydKL_`RG`qF|$0%V}s*CaAyX6EM;u=g&8Bg9wb zT%5rLjIE$^i53vU@BG)mfGEU;KyI;bqG;@1ICe+>wolYKskJ--Qflg(HPFyiVO0BN znJXAwYB-NBL1jY?@X|{#i4V}F7w}%h4;OELBZHgm!>`FN4VaLZEQGo`5#A1Alr839 zfmtLdAQY@AiQVI$ft5W8gz-B(Oh5-|%{?g*3J!sZFXGE4KSU{c1$?mH%*%1GJtuW4 z{94p9BGfkr#dZzrGJe(H+9WAnJ~31)uHKq23?u5&smE#W`%m9NMVLY=u=^N0Xt9;G zPrRX}%|A0Jpe}Bq<+LAR?Y}|4Zdb+r36rA00OW9JP@?()%1rps-G&i39v#8}bYIWL zvdfFHUM!<<$GJ+bA2Z*{BecI$kcV)9RU+R4@_kMKpbNr`V+G$J0$!E(#0;^MbGB?> zx=H-R0k6!=l#6?k9&AK%vV8xXGsH1t8wb<+EOfICJyl{-TAWKd^7uRpgdQHm8VDl* zF)%oz156NLfg&a*!l)`Jv)9{6t|YZWe6KNj8o`8U)K5|a%Y2Vo^F#n^z`jzRBiOgT z9K7e05q0c7aeqcourTK`)6I9rQ;*sgQ}cKE-8abx@LYI+6#6ac_yPH92=kW>;p&s> zD1G4S#74mCM~z0ni$f{P12XOe>25RdO{gXzR;@uAQ5*W*mBh?Nk&3;b2_4ex^b*7l zydVNHNJwJBZn5^YhRX=C&)UgDI1GN_B~#ZeJS;dV zHO3&mqwnl;G71WKH&!vj!Z}w;fKMk3`5P~!hd?IrEK(TQ2d@SyO03dp$~n+NK?`y) z|2={Rc_;9;4i@wp;KTTB3T;3Sn-{rHw4u{`x{{@qQGtISl2 zXk>z5O@_Lsjj0OUNhVJxSQZ(g0ep`Ks6t9W-l2kP$6oECXs4g%QQ@0SQ5a`!e35BE za^|_r)5Wsyq%((FRLc%3GLSHrE|BneUspkP!33LX>UG2tuT#mtZ^k2en^O9lo-jmu zu2`3~`RVellDzvuqiP>E+cvQ#QG7x4ya_LTt8;vzRgB5v!B++Ov#)*LNaAFN(R(`I z#r8%u(NjEJleeMWY=DMjt0B;}yP(4*y)r6~hgErs%*s5J0zsje8u%wuL*h};Z^fJQ z^5eK@#DbS+SSM|7x7f~mEckguKFo*7E)SvW%p#~O!-+ptvncu2CCbO_?wlPORJ1sm zjZpsD@!kjG<4^BWQAi}deN?b&4uQvJ?{MhwyaC~zC$C}uHIkrIYtBLe$A~0S2!zX& zj&15PoE%ejbt&t95zxO*3-e}gyjEMAQ4SO3f*zq~3S-)yK)=7-dUqXJW;;LK%-vj{ zQ$U}XZxagV+-Dkt{YRQ^q63}5vBWVkYv%)LOd%>C-h5BQRY~2_q!ElLI|u_7j8Z`U zahSXL2dw}++dSg33)Ckv646l_0}KaW+=?WlFVaCgcy!{ANPz$Jx9E>TUYECwurZ`_ zNbqG$*jBBZ=sl^c-dEF3F{C8>x`QR3g974H+A}YQ$yK~}Ug}ihMWy(Xts7TqE;Z{Y zR{P{ID}2C2H}+=uuFl<~6vk$~uHN9rg0~z6p6moLS{_FYqP7UPUWa{GT<>>C_ekA( zJkv{)I=`sv0&1!{goaKVT3{4XAqGq(_$R!_r-#9X#*=Ztpu*UI_thxjywYqwVRH7*v24>m|Rlz>?jGKhu}W@P9L3-W*oz~4t=qtRCZAb z4VezMu1-r?-i^)~kSf=w96`>kD$V5Z>9O2&y_8QQ(6HxlI9=9;McTI6c&lUe%`BfE z>M-$7f_M(EjjbkvNw*j3_L7yb|NAJLHvecT2VS5yx;|FHD*ObV@}uVb^C@9resbED zwp;NS97(hl=i^*kguj3Awz5t%huNT=Ycl`JOlK-@PSVxNcd0te`b5e8vTu1kAxtmp z;^T-HC^8O} zdJ7*$wdpR*1T5lsWEQm$SuD5|Cs?!9fxU#`#xPk(|Cqglz)v?(j7Xarb+jQvK@3a0 zg0Q79BTgrl8J2&FPo{|Q-9-eAj<0tJ1e%f&KYR%6GK0{8Jhohh6gIWR8Te#K{_)xh z%trzYm$gMRgR>BEMJ`Og;Av+5oa=HPaM9v}@u#a7c}QP{Qlrk*;lxidfoDl6ps$SB z+rC!7&LX83cA}T?`w}+6q4u;Ink5zns*~g$Z|Xh$T8k>zd1FhV$wX_l@n zC=`Sb_YF}zNY&_0S$WmqrzP3*!#X|xO%Drt-BiD0_kkTy(u8c&0}By)aAIa!gY?U= zh^;yKpj>F$?+oFZx$?j3DxkNK)m?s(ZDS}A9R7|C98BOcU6?V7eF(Pi{>b2NP8G2& zPj-g2eZ1|s=UkMrGcfL2IKS4H0DQ2A6PpjP?k0wQRN`V^tc;ZfYCb^%V!WZ3(`PGO z@^B2A4&7S6(iav8&i~TS1*FK!ZRFt$&=Mga3Bx`QW9CAf78K%Cy zelePVX{HUXk!^b!3FXd_ye5XC$m?=fQ`?5_1`IH?E$0&!yNy zsiuW2XaUa={(4bmXl+hT&@xNaM16$I8IA}>0KShHnbdYK&PuoX^g(Iy&S`ANvlUx9 zXv6o|1RqUJ-oo$0I)Qt=i?-p231`_CzW_M%c%|qDe7L_`vY3#N<{MZw{8hFP%`8Xg9dA%V(+O0V@1hP`sAKQ+7#k}n2t1z~c z(D2Uw0X!%0^s->) zN2hm!{-5xKi!EbM9vuzJXMcfRkBP5PH-3=J+i2y{7S{|{E2eLvB`ys|lXda~D)r0w z9klzhC=Lg|9ht+}>ij^^a2K=K*e4=%o0A~UZq@*x0#dW~IvY7ALw?e{L{S&KLt08$OGVqc zWGfy(!v~#jExdsL1 zyTE1M(bEh=jWSqHX+IN#30R>$zohkw>~IBya!q1!bRTFt9O9lq78f#P$ZwOH7l+aA z)l5V0E3|is$0TH@kG8DMSAW}| zZpTaBJsE+63{g=jOzwshuL9D(s=v~zCw{7XRrCCyX17s8OIwQRX^mEEJdW=!e!N;q z0E;uTyUL(j=wyp+^dq_pN?Ib$-i_2S8oVdnwVAD#Wfx27{=2~NYx;eaholzNr2K^) z5H%3fevnWVQ=|}%0U3DUluM!;uwg6MV!o8&la*+=<6)~YL4se$LcCzyr6)SXrD>(K z1ebdcPs&}iJ6BQElNF{rx@69t{mDD@$68+USMFKd9CpSrlp&SSPwp_*JLBa+{xwBU zmd^KlX7{RDM@H^)K^2MeSB00WhfrlTV5?}Z`V=2IkiYp z%S-a`_=s~&yS*D4v#x+L?JEDj)rdT8;JgBe5c8p1Dg_~1gpU5JzZ_|$4{M}K#7p~ z_d*gsdsg@PpT^7@mJWyBZBb-O&BWU??Hr?kDvgliKRLL`&gu^E^ybvyANp{FWyze) zhS~bLmxiSk4bM|uc?HmX4Zw^VWFYbE2m3tOzt;oRS4?I?2`Bbs0md;5<~$h%1umc1 z*4u=A64fSqZ6mR-m+m(MTi8`ASAt@fZ8)VM6I1W$%G@y&-8HzLL*`$YvPgd3>Dh_> zG;JHK{bnIG66Y4fhs)E?y!&6|I-f(MBTK?@24%@`Hs1pBJUFMFxj+E=;&XB~pLvf! zjB#lxb3u;d>)emMS?U(wf&CrzcXF}ztx{D=CltKXqV>qSUU4tzD2Y?WW-^8A=&*LI zqNb?^5S$)diu%J-CV~XZ_)+#HX+`YthCu$1c4FPyZtOPY$6uD(_O&SZN?hho@MZkH z1POed|6~Zy)_R7}ErF@(MR_2Y=h{Z6r2T?$oy0>(khiF5cnHZ-GNa zPDIZ2;Z=+Y5(%{a?yQdlv! zjebIN=&at~J|3qPW7v{658~$cxA@N8oAF;r+kS;o3XA0C@-IWLqdFjnCWqubU2QuZ z9XxKZgv|@ZzkolJy744pV>N=T?fxlM`kY&`yoR`!va%x=3VdGr0FL#*FulhHST^fE zV?dH4aW&qU2|);2!yRX`{?Y{@lvctrQ^5pr4emfuQO!8_JjS zvz$+!kR1Z?X4qpK#4-FsAk?n-}A7S&)S4fY#vd9Onnm z9R`pyujM1gsxqRl6509GO{cql9)|I`@5oAEmuy z;esR@z7Idwg#6t~EE`+0N-t&MjawF&uT7v>S|+xEUd$Sd75p>7Re2=Zy_(F4wADHB zs^+aV)M*M{yZEKC+}QdNb>4MZLOaSFAccm1%%9~ZLZ^a^uk>Oex!g{>1Op}`D+5El z33vzX4{}b98*~`Z6CW%M|J|67LOtb}zKxfAWR5a%_U_jc>-Ts&2nlY56f@J-*oN=J zBw9#!EsEpCT&*+=USzqoDmFwF@@RFHbO=}7Ekbd3demT@;`N)Hq}l0tB3nmTd=!)^ zW@0dZQ?Io(*a#{%9kWn8BhA_yQ@^Wh0Mr{Di=XTbc6(z2uZx+wDS2<(!ezp)!U|K) zy;e*$688~9J0uorIO@TRt=O(7+@`|y+G&|Pe|@=mq<8-*kEmd>kq(<#Rct42j?$rtLW{UO z8_m^HyiHwrM+-gnaP_l`a@Eg6Ks}?t$>Ce-1s&Ly5j#KQo9*6iVz+4;Qr*K!n zjmkNrmFYv;4$mk6{er)$Qxz_4CFQHthMm*iss#&AAyV3QTG9aCAF0HcjEL3e@S_K# zig%N*(!_Y;)+>ykmR-hhJvsy1!504~f8&Hx$(*03q&gM~`tMTGm}7}{iP0pLmqAK6 zDcow}K4&R#ar^Ra#S8Q%U=7&P$v0_FE?Yw9!YXPvJziUr*|Qs_z};QmVk{GC?^bJh z+9i9mWGPF!NlFx(5uU*wU6@_RrDY!S*yI1K%%6gq@cHQ$kTFC!A)Ya>1^Q{|usLm0 z;x{5x2L^ZhN;1BDA9?gtwymeZ7V&b&{f{7PqOlHX)8f7=`@!lbfF4E1;h3K4C+7u< z^|IK}I93#8diW0E{4C$(V1S_3u@`OWLbN=r;L4$;ndco zr*%Oqfjg3gVTT3E5n7f+1=38RC#wUM!#?olc)BA%7v=zqek{~VrORs&70C;)iXK|R zJUn4Agc_=>oeNZN5Cq zi*Y~8O`LKDs9X51L#aRJ?d*#AtwlEXl)u!@$VMPF%)5#ud*_gk@!r;Fnlc_; z^+GjGfAY2P$l1h0Vr(5k$dAlUaGf~rYFzrBBwEhPf2lip4=#Cw5w$s;)oB|!|KQ>L zZDq>*%m8X*#M+byWSteAd@_aDu%Hq&WBW=_Wr7_7^{^7}U&fNCwzTaW_9$Qy;PjhgMH&orv2xEveZ;k4}^AQ=2E9EjnVMMTu@|l8! zQoC0u_9Q1BCXWUoqG%2O!4tx_a$4^|L67EK))+-PN@w2N$Dnso35CboyhfhLm0SNc zNv2|umQYzrP~&;CrK;w$V;gZp71U>qdj`V>9G%$I7&;f)IM$T|cj!qBTM^V>K)EdR z?bfWY8Kd4Fq|Fk~wk)3TA%5)^yF8LKubIVV`nBEPXAY-!}_sV(B941C7 zyYzUl`XMwrZ+o$3Z+3Is52Q3sw-(Ro+Rh6${y^|uA||>gPaDvw9uB)`qNCIkp!myo zrn2>QRn+S81y0^$oqM!!IH7qWy4$i2$fgpJrA5>XVgGIo|FK9iyfytcVU2bv>B6=1 zR}NAfE#z;Pl7`Z=9?LHZALUyaV>6+&z<0|h`4P{rkSrAmruZ*E@cdSn5%3NF)T8Myfp1XQbnP+=f3Q5G7so zcCONzaEw578M5=ILngoxsRYhackABL=TM;p#E5WH1xZ|bgm`-scH=BkKL(IsJkapQ zJR;Bz3i%D%&%g^S@EWB?6N9|>^d1m|7wb?IowpxK|X@^s{X;N zcnz=B`+guD5ZjjN;?AjLO6f)7q^$)PHH~rwshC5xncD?c^BoPD>eLMYxk@)|qx=IV zmHgtxF=_DU?D2Ae6RfaeICMjSQcO;33?2p}=S?=b34uWd@*I=VqM8r1 zaLElF1UhFD!P^c0P0JKtyKL@T8MuyZZaf`nag%P!Y%nIA0xX`V**7ICD)wXAe*pgv5dQ)4KR~r8x zGy;U;&VHbA^iW1gmVFnDwXohIls8=d5`n^ni)OVi{I#-L%-)o-W@`&DSZN?X(tLLJ zB5PLqya@aDlzQz!GA0EH`z#os;?~lQk}gS9ISC83ERgW09KoV Jr3HieKLGMUp0)r0 diff --git a/slides/Nabiev2024MiddleTalk.tex b/slides/Nabiev2024MiddleTalk.tex new file mode 100644 index 0000000..f784938 --- /dev/null +++ b/slides/Nabiev2024MiddleTalk.tex @@ -0,0 +1,172 @@ +\documentclass{beamer} +\beamertemplatenavigationsymbolsempty +\usecolortheme{beaver} +\setbeamertemplate{blocks}[rounded=true, shadow=true] +\setbeamertemplate{footline}[page number] +% +\usepackage[utf8]{inputenc} +\usepackage[english,russian]{babel} +\usepackage{amssymb,amsfonts,amsmath,mathtext} +\usepackage{subfig} +\usepackage[all]{xy} % xy package for diagrams +\usepackage{array} +\usepackage{multicol}% many columns in slide +\usepackage{hyperref}% urls +\usepackage{hhline}%tables +\usepackage{natbib} +\usepackage{natbib} +\usepackage{doi} +\usepackage{mathtools} +% Your figures are here: +\graphicspath{ {fig/} {../fig/} } + +%---------------------------------------------------------------------------------------------------------- +\title[\hbox to 56mm{Декодирование мозговых сигналов}]{Декодирование мозговых сигналов в аудиоданные} +\author[М.\,Ф. Набиев]{Набиев Мухаммадшариф Фуркатович} +\institute{Московский физико-технический институт} +\date{\footnotesize +\par\smallskip\emph{Курс:} Моя первая научная статья\par (практика, В.\,В.~Стрижов) +\par\smallskip\emph{Эксперт:} аспирант П.\,А.~Северилов +\par\bigskip\small 2024} +%---------------------------------------------------------------------------------------------------------- +\begin{document} +%---------------------------------------------------------------------------------------------------------- +\begin{frame} +\thispagestyle{empty} +\maketitle +\end{frame} +%----------------------------------------------------------------------------------------------------- +\begin{frame}{Цель исследования} +%\begin{block}{Решается задача} +%\end{block} +\textbf{Цель:} Исследовать влияние физико-информированных энкодеров на качество декодирвование мозговых сигналов в аудиоданные. \\ +\textbf{Задача:} Решить задачу декодирования в постановке классификации, а именно определить, какой сегмент аудио вызвал конкретную мозговую активность. +\begin{figure} + \centering + \includegraphics[width=0.75\textwidth]{task_matchmismatch.png} +\end{figure} +\end{frame} +%----------------------------------------------------------------------------------------------------- + +%---------------------------------------------------------------------------------------------------------- +\begin{frame}{Постановка задачи} +\textbf{Данные:} Кортеж $(\mathbf{X}^i, \mathbf{s}_1^i, \dots, \mathbf{s}_K^i)$, где $\mathbf{X}^i \in \mathbb{R}^{64 \times T}$~--- ЭЭГ-сигнал с 64 каналами, $\mathbf{s}_1^i, \dots, \mathbf{s}_K^i \in \mathbb{R}^{1 \times T}$--- стимулы, а $K$~--- количество стимулов. Меткой данного объекта будет являться вектор $\mathbf{y}^i \in \{0, 1\}^K$. Только один стимул является истинным. \bigskip + +Требуется по имеющимся $\mathbf{X}^i, \mathbf{s}_1^i, \dots, \mathbf{s}_K^i$ получить распределение вероятностей стимулов $\mathbf{p}^i = [p_1^i, \dots , p_K^i]^T$. Пусть модель представляет собой следующее отображение $\mathbf{f} : \mathbb{R}^{64 \times T} \times \left( \mathbb{R}^{1\times T} \right)^K \rightarrow [0, 1]^K$. Задача сводится к минимизации кросс-энтропии: + $$CE = - \frac{1}{N}\sum_{i=1}^N\sum_{k=1}^K y_k^i \log \left( \left[ \mathbf{f}(\mathbf{X}^i, \mathbf{S}^i) \right]_k \right),$$ + где $\mathbf{S}^i = (\mathbf{s}^i_1, \dots, \mathbf{s}^i_K)$. То есть решается задача мультиклассовой классификации. + +\end{frame} +%---------------------------------------------------------------------------------------------------------- +\begin{frame}{Архитектура решения} +% \includegraphics[width=0.90\textwidth]{model_architecture.png} +\begin{figure}[t] + \centering + \includegraphics[width=1\textwidth]{model_architecture.png} +\end{figure} +\begin{columns}[c] +\column{0.5\textwidth} + \textbf{Базовое решение:} Расширенная CNN ~--- энкодер, который переводит ЭЭГ и стимулы в латентные пространства, где считается их близость (см.~\citep{Accou2021ModelingTR}). +\column{0.5\textwidth} + \textbf{Предлагаемые улучшения:} + Для ЭЭГ заменить CNN на трансформер и использовать физико-информированный энкодер для стимула (см.~\citep{multihead-gru},~\citep{Wang2024SelfsupervisedSR}). +\end{columns} +\end{frame} +%---------------------------------------------------------------------------------------------------------- +%---------------------------------------------------------------------------------------------------------- +\begin{frame}{Данные для эксперимента} +Эксперимент будет проверяться на данных SparrKULee (см.~\citep{K3VSND_2023}). +\begin{itemize} + \item \textbf{Участники:} 85 участников. + \item \textbf{Стимулы:} 6-10 аудиофрагментов разной категории, такие как аудиокниги и подкасты, каждый длительностью $\approx15$ минут. +\end{itemize} +\bigskip +После обработки, частота дискретизации всех данных была понижена до 64 Гц. +% \begin{figure} +% \centering +% \includegraphics[width=0.5\textwidth]{envelope_example.png} +% \caption{Пример огибающей сигнала} +% \end{figure} +\end{frame} + +\begin{frame}{Подготовка данных} +\begin{figure}[t] + \centering + \includegraphics[width=0.8\textwidth]{alignment.png} +\end{figure} +Пусть стимул обозначает сегмент аудиофрагмента. ЭЭГ и соответствующий аудиофрагмент делятся на сегменты фиксированной длины и для каждой пары (ЭЭГ, стимул) генерируются ложные стимулы. +\end{frame} + +\begin{frame}{Вычислительный эксперимент} +\begin{columns}[c] +\column{0.5\textwidth} +\begin{figure} + \includegraphics[width=1\textwidth]{sample.png} + \caption{Выборка, которая использовалась для эксперимента} +\end{figure} +\column{0.5\textwidth} +Эксперимент проводился на подвыборке данных. Были отобраны 22 участника и аудиофрагменты, которые они слушали, а также их записи ЭЭГ. \\ +Для эксперимента были взяты следующие параметры: +\begin{itemize} + \item Размер окна - 5 секунд + \item Шаг окна - 1 секунда + \item Количество ложных стимулов - 4 +\end{itemize} +После разбиения по окнам и генерации ложных стимулов получилось 612500 кортежей. +\end{columns} + +\end{frame} + + +\begin{frame}{Результаты эксперимента} +Обозначим множество классов, как $\{0, \dots, K-1\}$. Учитывая это, метрика качества вычисляется по формуле +$$ + Score = \frac{1}{22} \sum_{i=1}^{22} \frac{1}{l_i} \sum_{j=1}^{l_i} \left[ y^i_j = pred^i_j\right], +$$ +где $y^i_j \in \{0, \dots, K-1\}$~--- метка объекта, $l_i$~--- количество пар ЭЭГ-стимул для $i$-го участника, а $pred^i_j$~--- предсказание модели на объекте $j$. +\begin{table}[h] + \centering + \begin{tabular}{|c|c|} \hline + Model & Score (\%) \\ \hline + Baseline & 99.08 $\pm$ 0.27 \\ + Transformer Encoder & \textbf{99.95} $\pm$ 0.04 \\ + Wav2Vec2 & 99.43 $\pm$ 0.39 \\ + Whisper-small & 83.31 $\pm$ 4.37 \\ + Transformer Encoder + Wav2Vec2 & 99.78 $\pm$ 0.16 \\ + Transformer Encoder + Whisper-small & 95.44 $\pm$ 2.50\\ \hline + \end{tabular} + \label{results} +\end{table} + +\end{frame} +%---------------------------------------------------------------------------------------------------------- +% \begin{frame}{Дальнейшие планы} +% \begin{itemize} +% \item Добавить энкодеры для стимулов: conformer, whisper, wav2vec +% \item +% \end{itemize} +% \end{frame} +%---------------------------------------------------------------------------------------------------------- +\begin{frame}{Источники} + \bibliographystyle{plain} + \bibliography{Nabiev2024SignalToAudio} +\end{frame} +\end{document} +%----------------------------------------------------------------------------------------------------- + + +\end{frame} +%---------------------------------------------------------------------------------------------------------- +\begin{frame}{���������� ������} +\end{frame} +%---------------------------------------------------------------------------------------------------------- +\begin{frame}{�������} +\end{frame} +%---------------------------------------------------------------------------------------------------------- +\begin{frame}{�������������� �����������} +\end{frame} +%---------------------------------------------------------------------------------------------------------- +\begin{frame}{����������} +\end{frame} +%---------------------------------------------------------------------------------------------------------- +\end{document} \ No newline at end of file diff --git a/src/README.md b/src/README.md new file mode 100755 index 0000000..bf1d665 --- /dev/null +++ b/src/README.md @@ -0,0 +1,4 @@ +## Описание +- `src/mylib/train.py` --- содержит класс, который отвечает за обучение модели +- `src/mylib/utils` --- в этой директории собраны все вспомогательные скрипты для создания класса `torch.Dataset` и для работы с эмбеддингами/ +- `src/mylib/models` --- модель для экспериментов \ No newline at end of file diff --git a/src/README.rst b/src/README.rst deleted file mode 100755 index 8f32660..0000000 --- a/src/README.rst +++ /dev/null @@ -1,25 +0,0 @@ -************ -Installation -************ - -Requirements -============ - -- Python 3.* -- pip 20.0.2 - -Installing by using PyPi -======================== - -Install -------- -.. code-block:: bash - - git clone https://github.com/Intelligent-Systems-Phystech/ProjectTemplate.git /tmp/ProjectTemplate - python3 -m pip install /tmp/ProjectTemplate/src/ - -Uninstall ---------- -.. code-block:: bash - - python3 -m pip uninstall mylib diff --git a/src/mylib/train.py b/src/mylib/train.py index 560a75d..6096ccb 100755 --- a/src/mylib/train.py +++ b/src/mylib/train.py @@ -7,8 +7,6 @@ import torch.nn.functional as F from src.mylib.utils.data import TaskDataset -from sklearn.metrics import classification_report - class Trainer(object): r"""Base class for all trainer.""" @@ -74,7 +72,7 @@ def train_one_epoch(self, epoch_index, writer, eps): def train_model(self, epochs, run_name, eps): r""" Train models""" - writer = SummaryWriter(f"runs/{run_name}_{self.model.__class__.__name__}") + writer = SummaryWriter(f"runs/{run_name}") best_vloss = 1_000_000 if not os.path.isdir("saved_models"): @@ -136,6 +134,6 @@ def test(self, window_length, hop_length, number_of_mismatch, max_files): _, predicted = torch.max(outputs.data, 1) correct += (predicted == label).long().item() - print(f" Mean accuracy per subject: {100 * correct / len(test_dataloader)}") + print(f" Accuracy per subject: {100 * correct / len(test_dataloader)}") accuracy_per_sub.append(100 * correct / len(test_dataloader)) print("Score: ", np.mean(accuracy_per_sub)) diff --git a/src/setup.py b/src/setup.py index f9c5472..0c7abde 100755 --- a/src/setup.py +++ b/src/setup.py @@ -9,7 +9,7 @@ def read(file_path): return f.read() -readme = read('README.rst') +readme = read('README.md') # вычищаем локальные версии из файла requirements (согласно PEP440) requirements = '\n'.join( re.findall(r'^([^\s^+]+).*$',