From 16d7bc4a97c735499fbdb921d33983d94d5f5ab9 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 31 Jul 2024 15:23:56 -0700 Subject: [PATCH 01/63] create notebook for dev --- rope.ipynb | 619 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 619 insertions(+) create mode 100644 rope.ipynb diff --git a/rope.ipynb b/rope.ipynb new file mode 100644 index 0000000..2652e38 --- /dev/null +++ b/rope.ipynb @@ -0,0 +1,619 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/conda/envs/dreem/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import dreem\n", + "import os\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import math\n", + "import torch\n", + "import logging\n", + "from dreem.models.mlp import MLP\n", + "from dreem.models.model_utils import *\n", + "from dreem.datasets import SleapDataset" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "\"\"\"Module containing different position and temporal embeddings.\"\"\"\n", + "\n", + "logger = logging.getLogger(\"dreem.models\")\n", + "# todo: add named tensors, clean variable names\n", + "\n", + "\n", + "class Embedding(torch.nn.Module):\n", + " \"\"\"Class that wraps around different embedding types.\n", + "\n", + " Used for both learned and fixed embeddings.\n", + " \"\"\"\n", + "\n", + " EMB_TYPES = {\n", + " \"temp\": {},\n", + " \"pos\": {\"over_boxes\"},\n", + " \"off\": {},\n", + " None: {},\n", + " } # dict of valid args:keyword params\n", + " EMB_MODES = {\n", + " \"fixed\": {\"temperature\", \"scale\", \"normalize\"},\n", + " \"learned\": {\"emb_num\"},\n", + " \"off\": {},\n", + " } # dict of valid args:keyword params\n", + "\n", + " def __init__(\n", + " self,\n", + " emb_type: str,\n", + " mode: str,\n", + " features: int,\n", + " n_points: int = 1,\n", + " emb_num: int = 16,\n", + " over_boxes: bool = True,\n", + " temperature: int = 10000,\n", + " normalize: bool = False,\n", + " scale: float | None = None,\n", + " mlp_cfg: dict | None = None,\n", + " ):\n", + " \"\"\"Initialize embeddings.\n", + "\n", + " Args:\n", + " emb_type: The type of embedding to compute. Must be one of `{\"temp\", \"pos\", \"off\"}`\n", + " mode: The mode or function used to map positions to vector embeddings.\n", + " Must be one of `{\"fixed\", \"learned\", \"off\"}`\n", + " features: The embedding dimensions. Must match the dimension of the\n", + " input vectors for the transformer model.\n", + " n_points: the number of points that will be embedded.\n", + " emb_num: the number of embeddings in the `self.lookup` table (Only used in learned embeddings).\n", + " over_boxes: Whether to compute the position embedding for each bbox coordinate (y1x1y2x2) or the centroid + bbox size (yxwh).\n", + " temperature: the temperature constant to be used when computing the sinusoidal position embedding\n", + " normalize: whether or not to normalize the positions (Only used in fixed embeddings).\n", + " scale: factor by which to scale the positions after normalizing (Only used in fixed embeddings).\n", + " mlp_cfg: A dictionary of mlp hyperparameters for projecting embedding to correct space.\n", + " Example: {\"hidden_dims\": 256, \"num_layers\":3, \"dropout\": 0.3}\n", + " \"\"\"\n", + " self._check_init_args(emb_type, mode)\n", + "\n", + " super().__init__()\n", + "\n", + " self.emb_type = emb_type\n", + " self.mode = mode\n", + " self.features = features\n", + " self.emb_num = emb_num\n", + " self.over_boxes = over_boxes\n", + " self.temperature = temperature\n", + " self.normalize = normalize\n", + " self.scale = scale\n", + " self.n_points = n_points\n", + "\n", + " if self.normalize and self.scale is None:\n", + " self.scale = 2 * math.pi\n", + "\n", + " if self.emb_type == \"pos\" and mlp_cfg is not None and mlp_cfg[\"num_layers\"] > 0:\n", + " if self.mode == \"fixed\":\n", + " self.mlp = MLP(\n", + " input_dim=n_points * self.features,\n", + " output_dim=self.features,\n", + " **mlp_cfg,\n", + " )\n", + " else:\n", + " in_dim = (self.features // (4 * n_points)) * (4 * n_points)\n", + " self.mlp = MLP(\n", + " input_dim=in_dim,\n", + " output_dim=self.features,\n", + " **mlp_cfg,\n", + " )\n", + " else:\n", + " self.mlp = torch.nn.Identity()\n", + "\n", + " self._emb_func = lambda tensor: torch.zeros(\n", + " (tensor.shape[0], self.features), dtype=tensor.dtype, device=tensor.device\n", + " ) # turn off embedding by returning zeros\n", + "\n", + " self.lookup = None\n", + "\n", + " if self.mode == \"learned\":\n", + " if self.emb_type == \"pos\":\n", + " self.lookup = torch.nn.Embedding(\n", + " self.emb_num * 4 * self.n_points, self.features // (4 * n_points)\n", + " )\n", + " self._emb_func = self._learned_pos_embedding\n", + " elif self.emb_type == \"temp\":\n", + " self.lookup = torch.nn.Embedding(self.emb_num, self.features)\n", + " self._emb_func = self._learned_temp_embedding\n", + "\n", + " elif self.mode == \"fixed\":\n", + " if self.emb_type == \"pos\":\n", + " self._emb_func = self._sine_box_embedding\n", + " elif self.emb_type == \"temp\":\n", + " self._emb_func = self._sine_temp_embedding\n", + "\n", + " def _check_init_args(self, emb_type: str, mode: str):\n", + " \"\"\"Check whether the correct arguments were passed to initialization.\n", + "\n", + " Args:\n", + " emb_type: The type of embedding to compute. Must be one of `{\"temp\", \"pos\", \"\"}`\n", + " mode: The mode or function used to map positions to vector embeddings.\n", + " Must be one of `{\"fixed\", \"learned\"}`\n", + "\n", + " Raises:\n", + " ValueError:\n", + " * if the incorrect `emb_type` or `mode` string are passed\n", + " NotImplementedError: if `emb_type` is `temp` and `mode` is `fixed`.\n", + " \"\"\"\n", + " if emb_type.lower() not in self.EMB_TYPES:\n", + " raise ValueError(\n", + " f\"Embedding `emb_type` must be one of {self.EMB_TYPES} not {emb_type}\"\n", + " )\n", + "\n", + " if mode.lower() not in self.EMB_MODES:\n", + " raise ValueError(\n", + " f\"Embedding `mode` must be one of {self.EMB_MODES} not {mode}\"\n", + " )\n", + "\n", + " def forward(self, seq_positions: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Get the sequence positional embeddings.\n", + "\n", + " Args:\n", + " seq_positions:\n", + " * An (`N`, 1) tensor where seq_positions[i] represents the temporal position of instance_i in the sequence.\n", + " * An (`N`, n_anchors x 4) tensor where seq_positions[i, j, :] represents the [y1, x1, y2, x2] spatial locations of jth point of instance_i in the sequence.\n", + "\n", + " Returns:\n", + " An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding.\n", + " \"\"\"\n", + " emb = self._emb_func(seq_positions)\n", + "\n", + " if emb.shape[-1] != self.features:\n", + " raise RuntimeError(\n", + " (\n", + " f\"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \\n\"\n", + " f\"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions.\"\n", + " )\n", + " )\n", + " return emb\n", + "\n", + " def _torch_int_div(\n", + " self, tensor1: torch.Tensor, tensor2: torch.Tensor\n", + " ) -> torch.Tensor:\n", + " \"\"\"Perform integer division of two tensors.\n", + "\n", + " Args:\n", + " tensor1: dividend tensor.\n", + " tensor2: divisor tensor.\n", + "\n", + " Returns:\n", + " torch.Tensor, resulting tensor.\n", + " \"\"\"\n", + " return torch.div(tensor1, tensor2, rounding_mode=\"floor\")\n", + "\n", + " def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Compute sine positional embeddings for boxes using given parameters.\n", + "\n", + " Args:\n", + " boxes: the input boxes of shape N, n_anchors, 4 or B, N, n_anchors, 4\n", + " where the last dimension is the bbox coords in [y1, x1, y2, x2].\n", + " (Note currently `B=batch_size=1`).\n", + "\n", + " Returns:\n", + " torch.Tensor, the sine positional embeddings\n", + " (embedding[:, 4i] = sin(x)\n", + " embedding[:, 4i+1] = cos(x)\n", + " embedding[:, 4i+2] = sin(y)\n", + " embedding[:, 4i+3] = cos(y)\n", + " )\n", + " \"\"\"\n", + " if self.scale is not None and self.normalize is False:\n", + " raise ValueError(\"normalize should be True if scale is passed\")\n", + "\n", + " if len(boxes.size()) == 3:\n", + " boxes = boxes.unsqueeze(0)\n", + "\n", + " if self.normalize:\n", + " boxes = boxes / (boxes[:, :, -1:] + 1e-6) * self.scale\n", + "\n", + " dim_t = torch.arange(self.features // 4, dtype=torch.float32)\n", + "\n", + " dim_t = self.temperature ** (\n", + " 2 * self._torch_int_div(dim_t, 2) / (self.features // 4)\n", + " )\n", + "\n", + " # (b, n_t, n_anchors, 4, D//4)\n", + " pos_emb = boxes[:, :, :, :, None] / dim_t.to(boxes.device)\n", + "\n", + " pos_emb = torch.stack(\n", + " (pos_emb[:, :, :, :, 0::2].sin(), pos_emb[:, :, :, :, 1::2].cos()), dim=4\n", + " )\n", + " pos_emb = pos_emb.flatten(2).squeeze(0) # (N_t, n_anchors * D)\n", + "\n", + " pos_emb = self.mlp(pos_emb)\n", + "\n", + " pos_emb = pos_emb.view(boxes.shape[1], self.features)\n", + "\n", + " return pos_emb\n", + "\n", + " def _sine_temp_embedding(self, times: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Compute fixed sine temporal embeddings.\n", + "\n", + " Args:\n", + " times: the input times of shape (N,) or (N,1) where N = (sum(instances_per_frame))\n", + " which is the frame index of the instance relative\n", + " to the batch size\n", + " (e.g. `torch.tensor([0, 0, ..., 0, 1, 1, ..., 1, 2, 2, ..., 2,..., B, B, ...B])`).\n", + "\n", + " Returns:\n", + " an n_instances x D embedding representing the temporal embedding.\n", + " \"\"\"\n", + " T = times.int().max().item() + 1\n", + " d = self.features\n", + " n = self.temperature\n", + "\n", + " positions = torch.arange(0, T).unsqueeze(1)\n", + " temp_lookup = torch.zeros(T, d, device=times.device)\n", + "\n", + " denominators = torch.pow(\n", + " n, 2 * torch.arange(0, d // 2) / d\n", + " ) # 10000^(2i/d_model), i is the index of embedding\n", + " temp_lookup[:, 0::2] = torch.sin(\n", + " positions / denominators\n", + " ) # sin(pos/10000^(2i/d_model))\n", + " temp_lookup[:, 1::2] = torch.cos(\n", + " positions / denominators\n", + " ) # cos(pos/10000^(2i/d_model))\n", + "\n", + " temp_emb = temp_lookup[times.int()]\n", + " return temp_emb # .view(len(times), self.features)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "525188c5-1317-4003-90d1-bb1c4b9e9112", + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [] + }, + "outputs": [], + "source": [ + "def _learned_pos_embedding(self, boxes: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Compute learned positional embeddings for boxes using given parameters.\n", + "\n", + " Args:\n", + " boxes: the input boxes of shape N x 4 or B x N x 4\n", + " where the last dimension is the bbox coords in [y1, x1, y2, x2].\n", + " (Note currently `B=batch_size=1`).\n", + "\n", + " Returns:\n", + " torch.Tensor, the learned positional embeddings.\n", + " \"\"\"\n", + " pos_lookup = self.lookup\n", + "\n", + " N, n_anchors, _ = boxes.shape\n", + " boxes = boxes.view(N, n_anchors, 4)\n", + "\n", + " if self.over_boxes:\n", + " xywh = boxes\n", + " else:\n", + " xywh = torch.cat(\n", + " [\n", + " (boxes[:, :, 2:] + boxes[:, :, :2]) / 2,\n", + " (boxes[:, :, 2:] - boxes[:, :, :2]),\n", + " ],\n", + " dim=1,\n", + " )\n", + "\n", + " left_ind, right_ind, left_weight, right_weight = self._compute_weights(xywh)\n", + " f = pos_lookup.weight.shape[1] # self.features // 4\n", + "\n", + " try:\n", + " pos_emb_table = pos_lookup.weight.view(\n", + " self.emb_num, n_anchors, 4, f\n", + " ) # T x 4 x (D * 4)\n", + " except RuntimeError as e:\n", + " logger.exception(\n", + " f\"Hint: `n_points` ({self.n_points}) may be set incorrectly!\"\n", + " )\n", + " logger.exception(e)\n", + " raise (e)\n", + "\n", + " left_emb = pos_emb_table.gather(\n", + " 0,\n", + " left_ind[:, :, :, None].to(pos_emb_table.device).expand(N, n_anchors, 4, f),\n", + " ) # N x 4 x d\n", + " right_emb = pos_emb_table.gather(\n", + " 0,\n", + " right_ind[:, :, :, None]\n", + " .to(pos_emb_table.device)\n", + " .expand(N, n_anchors, 4, f),\n", + " ) # N x 4 x d\n", + " pos_emb = left_weight[:, :, :, None] * right_emb.to(\n", + " left_weight.device\n", + " ) + right_weight[:, :, :, None] * left_emb.to(right_weight.device)\n", + "\n", + " pos_emb = pos_emb.flatten(1)\n", + " pos_emb = self.mlp(pos_emb)\n", + "\n", + " return pos_emb.view(N, self.features)\n", + "\n", + "\n", + "def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Compute learned temporal embeddings for times using given parameters.\n", + "\n", + " Args:\n", + " times: the input times of shape (N,) or (N,1) where N = (sum(instances_per_frame))\n", + " which is the frame index of the instance relative\n", + " to the batch size\n", + " (e.g. `torch.tensor([0, 0, ..., 0, 1, 1, ..., 1, 2, 2, ..., 2,..., B, B, ...B])`).\n", + "\n", + " Returns:\n", + " torch.Tensor, the learned temporal embeddings.\n", + " \"\"\"\n", + " temp_lookup = self.lookup\n", + " N = times.shape[0]\n", + "\n", + " left_ind, right_ind, left_weight, right_weight = self._compute_weights(times)\n", + "\n", + " left_emb = temp_lookup.weight[\n", + " left_ind.to(temp_lookup.weight.device)\n", + " ] # T x D --> N x D\n", + " right_emb = temp_lookup.weight[right_ind.to(temp_lookup.weight.device)]\n", + "\n", + " temp_emb = left_weight[:, None] * right_emb.to(\n", + " left_weight.device\n", + " ) + right_weight[:, None] * left_emb.to(right_weight.device)\n", + "\n", + " return temp_emb.view(N, self.features)\n", + "\n", + " def _compute_weights(self, data: torch.Tensor) -> tuple[torch.Tensor, ...]:\n", + " \"\"\"Compute left and right learned embedding weights.\n", + "\n", + " Args:\n", + " data: the input data (e.g boxes or times).\n", + "\n", + " Returns:\n", + " A torch.Tensor for each of the left/right indices and weights, respectively\n", + " \"\"\"\n", + " data = data * self.emb_num\n", + "\n", + " left_ind = data.clamp(min=0, max=self.emb_num - 1).long() # N x 4\n", + " right_ind = (left_ind + 1).clamp(min=0, max=self.emb_num - 1).long() # N x 4\n", + "\n", + " left_weight = data - left_ind.float() # N x 4\n", + "\n", + " right_weight = 1.0 - left_weight\n", + "\n", + " return left_ind, right_ind, left_weight, right_weight" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# create Embedding object\n", + "emb = Embedding(emb_type=\"temp\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# get sample crops from training data to pass through the network\n", + "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", + " \n", + "data = SleapDataset([os.path.join(train_path,\"10-1.slp\")], [os.path.join(train_path,\"10-1.mp4\")], crop_size=64,\n", + " mode=\"train\", clip_length=32, anchors=\"centroid\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# get a list of all instances; this is the format that the model pipeline uses as input data\n", + "ref_instances = []\n", + "for frame in data[0]:\n", + " for instance in frame.instances:\n", + " ref_instances.append(instance)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# get the vector of times using the list of crops+labels\n", + "# query_instance is the instances in last frame (set to None)\n", + "ref_times, query_times = get_times(ref_instances, None)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi4AAAGiCAYAAADA0E3hAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB5bElEQVR4nO39e5Bkd33f/z8/n885fZnLzt6kXS2SQNgiGC84WDgEQgwOIBuDKf+oCo7BGCf8YcwlKECwMamy7DKSTcpAImJSdlFAmRC5UgbHztehELEtTAkMERALcLAdy+hirVbSzs61L+d8Pu/fH59zek73dM90z2Vnevf9qOo63adPn+tMn/ec7nm/jIgISimllFJTwB70CiillFJKjUsLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNjQMtXH7zN3+TG264gUajwU033cSf/dmfHeTqKKWUUuqQO7DC5Xd/93e55ZZbeM973sPXvvY1/uk//ae87GUv44EHHjioVVJKKaXUIWcOKmTxuc99Lt///d/Phz/84d647/me7+HHf/zHuf322w9ilZRSSil1yCUHsdBut8u9997LL/zCL/SNv/nmm7nnnns2Td/pdOh0Or3HIQQuXLjAiRMnMMbs+/oqpZRSavdEhJWVFc6cOYO1O/vQ50AKl8cffxzvPadOneobf+rUKc6dO7dp+ttvv51f/uVfvlSrp5RSSql99OCDD3Lttdfu6LUHUriUBq+WiMjQKyjvfve7efvb3957vLS0xPXXX8//7w9eTTpb640Psrl6syZsHsfwT8eGjQ9sXp+h43TZumxdti5bl63L1mVvuZz57CK//SP/H/Pz80PnMY4DKVxOnjyJc27T1ZXz589vugoDUK/Xqdfrm8ans7UdFS7O7O6Ae9n5Addl67J12bpsXbYu+0pddq2bApsvXEziQP6rqFarcdNNN3HXXXf1jb/rrrt4/vOffxCrpJRSSqkpcGAfFb397W/nda97Hc95znN43vOex2/91m/xwAMP8MY3vvGgVkkppZRSh9yBFS4/8RM/wRNPPMGv/Mqv8Mgjj3D27Fn+6I/+iCc/+ckHtUpKKaWUOuQO9Mu5b3rTm3jTm950kKuglFJKqSmiWUVKKaWUmhpauCillFJqamjhopRSSqmpcaDfcdmtp8+eozGX9o0b9j/ne2Hw/9v9wOPB/1UffH6iZW0xr1D533vP8PG7NTivrZYz7P/+xzHqOA3Or7ovBvsObLfs7X4WBnsRjJp/YsrHledN2HScRjZlGtHzYNN0xeuryylfW12WM75vfSfpqTBsfYKYTdMGzND9s9U+HbYeo9Zh2DT92yi9ZW6s9/a/25Oug0U2HUdnZNPPgjWh7+dt3N4W+7HsSYyz//eLLvvKWvalNNWFy9FknWaS4Bj2JjJeATNugeEHLk6NLiA2z88Paf6z1fSDrymn6Stg+k7ow8ePY9T2j1MgBTFQnkTHvHg37FhVl2OL+ZXzt737tu+EG0+snp0Y+cu9adcNruvANg6ZzVZFyjgn3k3bWezvrU7u/Ws4efHUK46qRcKYJ9XdrMtgETzpMnezDjtZ7m4KiL20F3+cDdvmUcs6DNusVJX+RCql1AT266quUmo8WrgopZRSampo4aKUGupK+bxcKbV7l/L9QgsXpdRQk35fSil15bqU7xdauCillFJqamjhopRSSqmpoYWLUmoo/Y6LUuow0sJFKTWUfsdlOO1rotTB0t9ApZSawJXUx0WLNHUY6U+lUkqpoa6kIk1Nj6lu+f//PfpMGuspNZuT2EBiAtYEUhOwRkitxyLY8rHxWCO9tvPWCM4ELBtDiHEB1gRc8bj85R18HMeFvjb2w6IG0k1jKsa4Gr85bmAgN2lIPMDw+Wy/sOrrh0UJjBM7sBPj5iONiiLYK9X5J6Z/f4Rtso2qRuUHVQ1v4+8H8nn618EiW+ZS7TRyYFjUwDj5RZuWMUHkwKTRBpPEDYxal93GDYzah3sZNzB0um1a7+90+YPz2C5e4VLGDajpcim/EzfVhctDS0dphITEBVLnSW3A2UBqfa+QSSr3hxU1ifU44riyqCmf6ytqTCAURRDFG8jgL3H1jXirTJ7eNGO8CXixpAxk8pgtspOKwaiT+raFTZH/E8SS9nKIts9NSgdygyYtZIIZOEHIxgnGVbapeuKx9OcaTWrYScxt2rcbx8gysP+2WOQkv8TVaQeLg8Fgw8ECY9t5j3miKacLvZ/t8fKLqs8Nn++YAZNGtl3WsBPrToqWYcsbtcyDsFUBcSVdAdGcJDWK/lQopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKqQM1Tt8jpUpauCillDpQ2zW+U6pKCxellFJKTQ0tXJRSSik1Naa65X/2jQX8bB1JIDiQROLNCVRuxgnWCdYGrAtYKzgX2/wnzuOskDqPM4KzgZr1OBuKqABPzXkSU0QJVKIDyriAxIZebMBg9lHZ1r+adzSYfdT/fKXF/MDnvn2ZSAORAm6g1b8djAkomdFt8quZSt5Us4Iq+UVmc/v/uNzhl3rHadk9GA9QjRAYmZdkBrZhF1eaq9sKG3ED1WVCGTlQRiKYXVX9ozJzGNxfm7Zr8pbv43x/oLxUv1W8wLD8omEmyUvqy2Ea0vYfhucWjZz/hFlJ27X9Hye/Z7c5QZPkFY27/EvpUuUkKVWa6sLlyN8JZhZCAiE1SGIIaVnExPGSSHzswCdCPqqwSQLWBpyLRY2zgcR5UhcLk9TFzKPUemrW92UglQVNHG4EOW5V0MBAmKO4Ig9pIwfJC32BkB47NAOpmnk0KiNpWHaQG/E+4bH9hY/xvTf4lEqWj/G9E08cP2SGlWlG2fJkVjl5uYFxsDnjCCbPSbL4kXlF1ZNbdbrNmUY7s/mEOlA4bJNjVDXuCWxwntXX9gc8hi0Li03z3UHA47BlwPgZSX3z2kFe0nbbBFufVCcpGkYWqxMuc6xl7TDgcVSw5GHIDNKAR1U6+J9GpZRSSqkxaeGilFJKqamhhYu6rEz6MZFSSqnpou/y6rLi9uBzcKWUUoeXFi5KKaWUmhpauCillFJqamjhoi4r+h0XpZS6vOm7vLqs6HdclFLq8qaFi7qs6BUXpZS6vE1159x0PWAldsYNSeyOGxIIidnonOtMHFpiZ91EkKKTrjhBkjj0xS0ruunaJMSOujaQJAFXdNBNXNFRt+ymazaGg910LRJjAYxs3C866ZbddUfFBJQddXtRABLvZ7hNsQDlyXpU19xJVedT7SALsetn33KqTShHFQ3bXQXZ1OZ+yPTVaarPj/PasVQjDja2Mbb539j2kfthl4bt5/LYl11Lh7WjH8dWXVS3a3E/VmfbHXTMHZxm2+7K23Q83UnX3J0uqzRq/0/a7n5a7EXn2mlxWLoFq+GmunCZ+84aicvBGMQasHEoziLOFEVLMUw3HsdCx2wUOK6IC+jFBFSHQpZANymKnCIPCSeQhBgV4ATrAkniewWOs0UOkpFeXIAzoVfYVIucMvvIGumLDUgrw7Ko6d0vowJg030YP/Oob/yQN6bBaQfb3Kf4vgiAqmpMwKC++fTlJ9ne9B7Te658E6nGCgSxQzONhtnuRDQqRmBkbtFAvtE4bdy3Xv5WsQO+EjswXv7TpvlvddLZIh/JVwooKsseVRBsd3IbPNkPZiQNzn+r7KItC7IJW/GPk1lUXd9Ji5bq+oyzvHIZ+52TNK7DWrRoTtKVSUtKpZRSSk0NLVyUUkopNTW0cFFKKaXU1NDCRSml1GVjnC9Wq+mmhYtSSqnLxuX6X11qgxYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmxnQXLgYwJt4AqdzfNGnZ+LHohGpEMAJ93+MSNsZVbibOfNN4xCBi4l0h3i9uoXwMxWPTGwaKYXGD/m/CD3ZGrXbZDBi8WILYvi6ykxjsftsbv8MW13vZ+n77ZR3OL97tpLX8fsxjr40bKzDtDuO+V0oNN9Ut/y8+bZ4adVxXcN2AzQSbBUweh3SERARCUaQUxNCLByjjAiSJwzIioIwJCL0ogOJ+OS4tcpHSIvcoAZ8KmStjAcosJAErkAjGCcZtZCA5JzgXcDbeEhdzj8qYgLSIBajZvC8qIB0YOoqYgS2yj2BzNMCwmIBMXDFOeuMGbRUNUC1+3BZ1VUrRqn+wiOq1+DeV1v+2r/V/b7wZXeyVtivuytcNiw4IGFLjN8aZjQIzxW8u9CqLmjQCYGhWERv7otyXMTtpY/wok52IB47nwD7oL162L1InKXYGW+lP0vp/0KSZSYM/M4Ot+Ee14R86/wnXaS/a/k9aVO5l1MBW6zDOf/WMmwd1WDKD9iLyYNxtVts7+J+I3TCVG8VFkcErLmXREi+JQHmlJQgmVMYVj42XeHVG2BgXqAwH79ObnhCv2Jhg4vt7eV8MBNO7KlOs6MYqQeVKzcZt47EdeZLqXbHZ4o1m1JWU8qS+kQO0+x+HYUXNltNPeLVm1BvIqPHbXaEZ5w1p0m2K85WJioedXLUaNv9Jl7vtMg7JFS69IqKUKk134aKUUkqpK4oWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFJKqakx1Z1zl24w1LC4LriOw3UE2yV20s0Emws2E4wHkwesL5rOBTYa0VG0+c9jv3+XGyQXrDWIpa+LrrhKJ13HRvfcxCAJRVfd2Ek3duCNXXRD2U3XOSQRvBO8BZIALnbUtUU3XesCzsUuuonzpC52xU1d7KI7rJtuan1vaBGsCaTG95qRjdVRV+LrMhwOwRYN0cpOj30NwPa4F1i1+dpgB9m4vP7ny6ZxloHup6MaxW3XeXOc15XTjLM8Npr+jds4LYjZ6D6M7e+gW7lfnWaS+W9lsDvtpu61lY6fg51uq+O2Xc7AD061i+iwDrrbdbYdXP+hy9ziGI2zjP3sdjpqe3Y1zx10FT4sJunYq65sU124JN+7TMcIvuUwLYdrWVybOOxQ3CQOy2KmWxY0AZvHeACCYHzAlh12q0wRAVBGBDhDcHajkOnFAlSjAWIEQDm+vB8LmqKYSUCs68UDBCf4ajxAErBJEQ2QhF40QC3xsaApYgJq1seipmj/Xy1mLBIjAYyQFIVMWcSUhc2wYqYsZGAjGqAsZByBgOt7I9yu6+u4nWe92OHzMhsnF4vve7N3JmycxCsvCX3RA35jGUNOQtUCyBnfm6Z6YnPF+lnjN07ijO5K3GvLP/ZJrzKfSqFW3fY+Mnmn3a0MFkvQX8CUJ8TBAmYr23XdHZxnOd9R7f/jOKms3+67Io/TUn674mUvi4VxW/DvNENqP4qlS2EvWu5Pi8MSc3CY6d5RSiml1NTQwkUppZRSU2PiwuXzn/88P/ZjP8aZM2cwxvD7v//7fc+LCLfeeitnzpyh2Wzyohe9iG9+85t903Q6Hd761rdy8uRJZmdneeUrX8lDDz20qw1RSiml1OVv4sJlbW2N7/u+7+NDH/rQ0Off97738f73v58PfehDfOUrX+H06dO89KUvZWVlpTfNLbfcwqc//WnuvPNOvvCFL7C6usorXvEKvPdD56mUUkopBTv4cu7LXvYyXvaylw19TkT44Ac/yHve8x5e9apXAfDxj3+cU6dO8clPfpKf/dmfZWlpiY985CP8zu/8Di95yUsA+MQnPsF1113H5z73OX74h394F5ujlFJKqcvZnn7H5f777+fcuXPcfPPNvXH1ep0XvvCF3HPPPQDce++9ZFnWN82ZM2c4e/Zsb5pBnU6H5eXlvptSSimlrjx7WricO3cOgFOnTvWNP3XqVO+5c+fOUavVOHbs2MhpBt1+++0sLCz0btddd91errZSSimlpsS+/FeRMf19AkRk07hBW03z7ne/m6Wlpd7twQcf3LN1VUoppdT02NPC5fTp0wCbrpycP3++dxXm9OnTdLtdFhcXR04zqF6vc+TIkb6bUkoppa48e1q43HDDDZw+fZq77rqrN67b7XL33Xfz/Oc/H4CbbrqJNE37pnnkkUf4xje+0ZtmXPU0I63l2LpH6gHfCPg6+KbgGxQ3g29A3jDkdRMf1w2+ZvF1R6hZJLFI6ghJcd8aMPHWazIpRTRAACOC8YIJxC68vrzRGxoPpryfg83jOJuDyU0chnjf5OXQQDDgDZJbQm4IweJzi/cWHyzd3JF7RxaKx8GReUculjw48mDJxZIFR1aMy4IjF0ceHB5LEEMmLt6C27gvDi+WILbXPba8X3ZyLLvUVrtv+m1+jLzY3m0nBrvpDnYkHdZtd1SnTYfghnQ6rU5ffX7YfKqdUrfrCmyR3m1cB92efU9iBPY6F6I67yLKYutpdt5pdZLt38/t3CsH/fOk1F6b+L+KVldX+Zu/+Zve4/vvv5+vf/3rHD9+nOuvv55bbrmF2267jRtvvJEbb7yR2267jZmZGV7zmtcAsLCwwBve8Abe8Y53cOLECY4fP8473/lOnvnMZ/b+y2hcP3XDl7EzTdZDjaW8ycVshqWswVKnyXK3zmq7zmqrRt5OoGOxLYtrW1zbxBiA9kAkQHGz3TISQDB5iEWKD5hM+iMByuKmEgkgie1lGlVzjoKrRAC4MuMIpBIXIEUGUowIECSNj30i5IkgSYwDwJWRAAFrA0kRCZDYjYwjZ6SXceRM6EUBlLEAZRxAaooIgEpEQBkHkBo/OhKgyEQqT/LliaJaRAwrMEYVL86ErYuAgfdeV9RNG+3+N/6Vviyqhs1vWDxAfI3tRQN4TO/56vjqybBXyBUxAMNsat0+dKr+eWy01C/GVyIOnNmcUbRn7ds35UFVd3hlP+6w+BxVSFTjBOKSzNDconLanSyjVG2TX22hv120AAxvxb8Xrf6HteDfLmJgVLv/nRZR48YMjLMe+1HIHdZ2/zuNXRich+YzTW7iwuV//+//zQ/90A/1Hr/97W8H4PWvfz0f+9jHeNe73kWr1eJNb3oTi4uLPPe5z+Wzn/0s8/Pzvdd84AMfIEkSXv3qV9NqtXjxi1/Mxz72MZwb9da+S/oXh9pDhyVHZBozZ9TO6AlOqQ0TFy4vetGLkMEgwgpjDLfeeiu33nrryGkajQZ33HEHd9xxx6SL3xn9hVd76LCEoE1rYJ6anBYt02EaPjq8HBz8u++loFdclFJTbC8+llD7b/w0eLUbV0bhopRSU0yvuCi1QQsXpZRSag/oR0WXhhYuSk3oMHy/RSmlrlT6DqyUUkrtAf2Oy6WhhYtSSimlpsbE/w59mFzMZ2iEFC+Wus05mq6TWk/NeppJxlyty0q9zno3pd1OyWcSsrYjb7teEzrbKZrRdUxsRtctm9FZbFZpRucFk8dGdLGLrkDY+DzTlPfzgBGDCbEhXUgMxhusM0XTOSka1EFwhpAItrgvvaZ0RbO6DEwqSNHMLhRN6CQVJDP4xBKSQPCCtYG8aERnres1o3PekdqAs7EJ3bBmdIkJdIIjNYFELLlxJNaTY+NrCL2GdAGDFUswAYshFI3ovDgcQsBuNIyS/mZ0gx12q83qysZ0o5rQOTO8eZ0jbJpvtQHY4L8L9y2TasOxjSZkZVM9j9k0ftM4s3kbevMc8Xn35mZfG9tWbQZWjq82myvXf7AR3U70NVir/Gv14D6tPjfJv4LvtnHYYO+SrZY9SfO5YfMeR/XY7OS7DKMa6Y3zb+3ax2U66HdcLo2pLlz+eu0U19S7HEvWWUjWmbHd3ht7JgltSVj3dVZ9neW8wUrW4GK3yXKnwVqnRqsTCxppOUxZzJS3orNu0paioNnoquu6AZsHTB6LF+MDJkj/6aiMC7AbXXVxhuAsYik67FJ01a121C2KmnSj025IN4oZSSCkUnTYlWI6wTshS0LsqusE62IxY10sZsrOus5Kr7Nu6vymYqbm8l5Rk1aG1gRS4zcVMoMddQGcxGImw2101q0UDL3iwWwuZKon/8EiZmgnXLFDW/7DsA6wA2TI66oHsXqCKZZt2TjxOON70QgwfofXod1nzYjutH3bXBk/bN0L20Uw9OY2UOCNUyDF6fauk+ngPivf+DeKt83Fy7i2+xfi7QqjUV1ttzLq56263uMUf6O62W41/bjrs5Nuvftht8vUfxG/culHRUoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlPdOffLX3kaXJWSzGbMzbY5OtPiWH2dY7UW82mbOdehYTNOpqtcXVsmiCUTRzukrPo6a3mdlbzOcrfBarfOaqdGq1Oj3UkIrQTTtriWjZ10W7bXTdd1hKQXDxBiLEAeowFMLrH9f5BeLEC1q661BjEGDL1uumJN0fLfVrrnmo0uupVuulLpptvrpFt00xXnYiSA2+imS3EzTjBJwFrpddO1VmIUgPM4GzvplvEAqfXUnCcxG7EA9aKrbuyiW3bV9VgjvW66fZ10qx11hY0uuibE+7LRUdQRCLj+Tp9DGmNu1U13sOX+qI66sEVX3UpHWmc2Wt+P6phbHb/xOl8sY3hX0GEddl1l/W0RrRDXZ4wuultswzi27DBcmVd1uq1a1E8SQ1CNEoiPh0UfFBEMY7a8n6Sj6k665w6z1TaP0+133OUMfe0EXXMnsVXMwKRda8fdvt1GG4zTcn/cjr2TxFvsp73oVH0QnZH301QXLumSJdQducCaEYwREhuoOU/d5QTbxRJb06fGY238QZyxXWZsl3XXoekaNF1Gw2XUkwYriWfV1WlbwdsEb0GMRYxAUXBgyh8AgeovWTBYAUTiJMXvkCnGlS8xSHyZszHvyMb8HwkBSQyEeI4yUhmKwQgEqSxTIAC2eD6kxZtDOY0Uiy2GIhaSgGARAeeknA0y4mQXrAHyeJ70CbicBMigWDg4CfF5KUaYEE+45bBUFCyheK68b4sCwRH6Ws+X48ZVFjHDMo02TTsys6j/Dbaa21N9Iytzi8rxMKSAqWQeDTP4umF5TNvlGA2f74iMpFEnoSF5T9vZ7UmxOp9R67WTN9udtIHfq+JlmFEnncNyUlRqGulvjlJKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaU93y/8Q3PfIQZLMJ2VxKa26Wv5s7zt/Me8xsTnO2w5GZNgv1Nsfr6xyttZh1HWZcl7rJWUjWWUjWyUJC1nSshxotn7KSN1jJ6ix1m6x06qx1arRbNdbbCablsC0T84vaFteGpMgvch1wXcF1BZvFm8kDNheM35xhZPOAFCkC5KEvw0gSGzOMUoO4eAtpmV8khMTgU4qsooHsIldmGAmSgDgQJ4SkHAreClkiGBcwTvoyjJKkyCdyoZdflDqPM6Evv6hmPan1fflFcRjzi7bLMIIit0gEawIZDodg2b/8omEG2/9vamdfjUOodn4fnPeoTJEt1sFjhreFH5VRNDK7aMi8B5a7VRRAuQ8G84iq96vT7NSweVRb6k+aWTRui/9hGTbVOIGdtv3fbUbR4Lyqy9jPfJndxBiMnOc+ZSZdCrvNSLpcTEsUxVQXLo0nupClpC1HtwhDzLoGmzmyzLDuDd5bfLBI+cZYi9lFdZf3MoywXYJY5qVN26UcSdosJw1mki6zSZOLSZNl51lPanSTFO+SXiCiWBNDDZ1BrMTHJsYQiQVrLBB6b9NGpO/ka0LxIAjGxNcaa5AgiLOYUBYtNoY1BlPc4nnLB1OMBxMMIYBJiMPisSSChJiBJAmVYSxw4rKEMlJJxCCJj0MxBOcJYkhdDA/s5RcRT2jeelJjYm5RL3xQcBIIxmy8oRXZRZ540hgcX70/SX6RF7upeCkNjt+ukNnuxDoquyg+NzybqHoC2y7PqJznOBlF253Yhu2TYds/evuqBcXwabYzKmtqcB6TZBZNHPA3RvDeQdiLk8ReFguXWxDfXtmLkEO1tw5/aaWUUmrfadGipoUWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSik1wjT8l81emZZtnY61VEoppQ7AlfRfRdOyrVq4KKWUUmpqaOGilFJKjTAtH5/shWnZ1ulYS6WUUuoATMvHJ3thWrZVCxellFJKTQ0tXJRSSik1NaY6qwgLxscwQ5cJrguhXQYRWvLE0bUpqzYG/pkyMA6DxxIw1G0W84oKzgRcERRYt55GktHwCVnNkgdL8JasXmQCedvLCCpzg2LYT9E6W2L0TkwqClhTrDQBjAEfMFu12RbBFNlBJghiDMaDKbbbWIP1EEw5Ls4WUwwrjykHhmLZIEbAbzwp3hCwGBPw3mJMkY1kYvCeDUUAXwBrHIS4OVaEYIQ8OLAecCQUK1oEFMZ24haKkMXeuEpGUV9uzw7yiqrHcD9tlVcUn5dNeUWlSfJpqrlEo0yaLzNpAOXI+YzIKxqVTTTJPGDvwt72M6doLwMWDwPNKlLTYqoLF1931NuedDWj8WgMJZTEks848llHd9aRzTmy+QYX5+Z5bF4Ic550rsvcbJtjMy1ONNY4VlvnaNpiwbWYcR3mXJtr0vjGk0nCeqix4hss5w0uZk2Wu00udpost+ust2u02ymynmDbMejRtQyuY3CbUqPBdUMlOVqwWYghiT7EoMVqgrQpAu6sAWuKQEeLJKZIgLZFMrQhlCnRiSlSoInD6rhhydE2hjD2JUc7gSRgk5gc7SZMjbZGSEwgtR6LkFiPM0JSpEY7AokNWyZHV1Ojy4hKS+idGKup0VsVMaXyhL1dUVO+dtMJ2AzMc1RidMEy+otuzvjNhc02qc+usm6bnp/gBN8fVlidT2Uefds2fN7VtOjdGAx1LOcdH28UL33F7jbGLVYG5zlOQvRY852gaNkuhXqrYuJSpDEPS02eJOSyui7DgjS1WFI7oR8VKaWUUmpqaOGi1BVkPz86UZObln8/Veow0d8apa4gelleKTXttHBR6gqiV1yUUtPu8itcLr8tUkoppVTh8jvNT99/ISp1yehHRUqpaXf5FS5KKaWUumxdfoXL5bdFSu0Z/Y6LUmraXX6nef2oSKmR9KOiw2UaO+wqddAuv8JFKaWUUpctLVyUuoLoR0WHizagU2pyU51VtHY6JXRrJOsB1wkYH9+UbS64VqAmYL3B5mC7Bts15B1D3rEsthPW2zWWZxosNmc4Vl/nWG2dI0mbOdehUYQv2iJwccZ2IYl5G6kJJNaTOk8t8awmgXYS8GmCpC5mA9WKDKGawdcgaYNPwaU25hZlgu0KkpiYWeTjUIrcIiNFYCPEoMUgxUX+AGKwYhGJ942YXtCjCTHnMCTxcQgCobwPJimG3iC+yCySGN4YkmL2CRAsIRhMEpchwSBiCC7gQ8AHg3eBPFhS5xEx5NaSSCAxnmBNLxgxYEhMwBtDagLBGHywvcyi6nSp8X3Bi4iNWY7F/fKTjmrwYjWob1R+zrghjKOCDQcDAau5K+UyBwMDy48Bhp2c+gIlB6athjRWs2zKdRvMd5k0OLFavGzk9Gxs9+C2VbOENnKENufObLnMgXyb6mu32reThC3uRVG2XV7Rtusw4Uc/exUmuROTHkN1eRmWQzUtprpwuXBWaK476ouO+sVAbTWQrnpc25OsZQCIs4SaJW86slkbb3OO7rwjm6/xxNwMj88doT7X4chsm+PNdU421jheW+NYus6cazNju8y5NgCZODr1tBe8eDFrspTF0MWlToPVdp12q0a3nUDbbgQvtk0MW2ybgeDFeLOZxXYDNhdMbmL6sw8YIaZEBynSnmP0s+QBsQZrY9iiuBimKC7eYvCiIRRhi72QxYRi2koIo6s8l5gY0JgKkgiSWLwTfCLkScA6wTqPc0lf6GLiYpBi6uI4Z2JRkhRFXnk/FoJhI3hR+oMXM1wMZ5TB8EVDQLAYHEKgSI+WjcJhsIiB4YWMrxQCw4wqBAYLlOrJeFTooC/Wc5iySNmY1mxMWyy3Oi6I3Vi3gZNdtQDZymBhUw0aLF/rxY5dvOzUYKDiuMXLboqT6nHoCzbcVAjurHjZrmgpwwm3O1mME7R4UFfORgUsXur1mSTocTcO63eQLtX2H1Z6nVIppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NSYqHC5/fbb+YEf+AHm5+e5+uqr+fEf/3G+/e1v900jItx6662cOXOGZrPJi170Ir75zW/2TdPpdHjrW9/KyZMnmZ2d5ZWvfCUPPfTQ7rdGKaWUUpe1iQqXu+++mze/+c186Utf4q677iLPc26++WbW1tZ607zvfe/j/e9/Px/60If4yle+wunTp3npS1/KyspKb5pbbrmFT3/609x555184QtfYHV1lVe84hV47/duy5RSSil12Zmo5f9nPvOZvscf/ehHufrqq7n33nv5wR/8QUSED37wg7znPe/hVa96FQAf//jHOXXqFJ/85Cf52Z/9WZaWlvjIRz7C7/zO7/CSl7wEgE984hNcd911fO5zn+OHf/iH92jTlFJKKXW52VVW0dLSEgDHjx8H4P777+fcuXPcfPPNvWnq9TovfOELueeee/jZn/1Z7r33XrIs65vmzJkznD17lnvuuWdo4dLpdOh0Or3Hy8vLABx56kVWc0N7sUZ60VJbSqgtO2orQroecO2AzWLOj2vHHKCkJaRrhnTFks0a8llHNufI5xIem21yYX6Ov59tc7TZ5nhjnRP1NY6m68wlHWZsl7rNsCYw59q98MUjSZuFtMVSvcnFepPVZp2Vdp12JyXrJGQtR952A3lF4Dr9uUU2s7iOYHPBZgGbB0wuEPpzixDBeOJ4a5AgiDUYbxFriuBGg03K/CKJuUUJBGcIiWATeuNsmVXkDCEFSWJekiRxWilvzuCTcl6Czy15EnAu4KwjcZ6ud6TV3CJTPGdj+GJiA7kJ2JCQWk9iXC+/KJMYaJmJKwIuhdR4AjFUMlQzi0zAi4tZP0WMSzW3CDZnFw1mCW2XWVQ+N5jvMyxUcTB7Z9gytwthHBauWA1jHJZZ5LfI8BnMtxm2rdXXDwYuVrepus2jtnUnRmUiDT63mzDCYXkzg/Mblge022VUVbNldhvkuPV67C7DZth+uNRhfDtd3pWefH4lbf+OCxcR4e1vfzsveMELOHv2LADnzp0D4NSpU33Tnjp1iu985zu9aWq1GseOHds0Tfn6Qbfffju//Mu/vGn8P7nmfs6ZNR46dpQnFufILtTIFh35oqF+0VBbMaRrAdf2uHYOAomBtOao1S35jCObMWRzhu68JZ+zZGuWlfmE1lyd1bkaKzN1rmrWOFlbw6frWBNomJzU5MwkXYIYFpKUY0mdpbTJ8VqDi90my40Gy50GK+06rUZKt52Stxy+7XD1onhpg6sZXI1eIROSInjRgWQGawMmC4AFHzAUqdGheHsJBmOKcMWigMEbjLOIjwWMOIMtC45e6KLBOundlyQmR/eSpZMYtGi8QXKzEboYBPEG74v5BYt3HudsTHy2MTna2fjm44zgbKBmfV96dGIDuVgSE0itj0WJCQRjSWz8yNAST1yp9b031GoBE4uW0AsctGbjxFemR8fx5cl/47kqXw0vHGKwSOiN36KAgfGLmMFk6Pi82TR+VGL0sHWD8YqZvlToIcVLuU2jAhd3qi9EcB+Kl92G401SXExStOxU9djs5AQ1KqlcE6LVNNpxmf+Wt7yFv/iLv+C//tf/uuk5Y/p/EURk07hBW03z7ne/m6Wlpd7twQcf3OlqK6WUUmqK7ahweetb38of/MEf8Cd/8idce+21vfGnT58G2HTl5Pz5872rMKdPn6bb7bK4uDhymkH1ep0jR4703dQlduVchVRKKXWITVS4iAhvectb+NSnPsUf//Efc8MNN/Q9f8MNN3D69Gnuuuuu3rhut8vdd9/N85//fABuuukm0jTtm+aRRx7hG9/4Rm8adQjp1WR1APRjDKXUoIm+4/LmN7+ZT37yk/z3//7fmZ+f711ZWVhYoNlsYozhlltu4bbbbuPGG2/kxhtv5LbbbmNmZobXvOY1vWnf8IY38I53vIMTJ05w/Phx3vnOd/LMZz6z919GSikF+h0MpdRmExUuH/7whwF40Yte1Df+ox/9KD/zMz8DwLve9S5arRZvetObWFxc5LnPfS6f/exnmZ+f703/gQ98gCRJePWrX02r1eLFL34xH/vYx3DO7W5r1P7Rj4qUUkodAhMVLiLbn72MMdx6663ceuutI6dpNBrccccd3HHHHZMsXh0k/aNXHQC92qKUGqRZRWo8esVFHYDd9iVRSl1+tHBR49E/fJVSSh0CWrgopZRSampo4aKUOrT0Oy5KqUG7yio6aE3b5fqZRWaSjEcbbR6dm2N1oUn3aI3ORUttyVFbttRWHUkrZhe5bgAB143ZPzazuK7BtS3ZuiFZt2RrhmzOsTiXsjLX5MLsDI/OtDjRWCuyi1rMuzZ1m5Eaj0No2AxnAg2b0bRdZpMuM0mXmbTBSr3Oaq1Oux6zi3qt//uyi4oIgDLDqGti6/+uwWYWmwVMbmNmURCMLz77H8guwlkQsCEgwcQso8QQvMHkYNKirX8eW/8bb7B5bP1vytb/KRgfn5MEQiKYYAi5iW3/00rr/14OUsB709f6P/eWxIUYA2DjuMQGnCmziwKJ9Zta/wcMwZheVpEPdiOzqGh9ngIUeUWbsoug99HWuK3/d5pbVJ3P5iyiydr/97XynyC3aNh6D1tP6G8XP7S9/4Rt/3dqMO9oq1iBsEet94dNv9u8or1q979pXYb8G/h2rf73+/tAk0YXHObvJ+02m0odrKkuXGZdh6sa63xX4zFW5hqcWzjCA8eO8+DyAheXZuleqFFbdDG3aKnILlo1JC2P7QZs25MYqKUWX3fkM5ZsxtKdM2Tztrg5Lh5JWZurszxXZ2WmwYnGGifrqxxL15lzbZzJmbExBHLOtVlwKcfSdVZqDVbq/dlFq50arWaNbjshbyX4tsW1bSxa6pUCpg1JB0LqcN2A7Zq4zrnB5AFjiuDFanaRMZjgY+CiNYg3vewiaw3BGWxuEVcGLJriPr3gxJBQBDRCSNkoaHIwaSxWggdJzEb4ojd4bwjOEpzgbcC6GL6Yh1is5FZIivDFxAZS58lNIBFLEvoLmDh0RSFjYtFSDK2pZLaUGUN9gYvFiaQ8CVTfmyQ+t10Bs5Pcoup8hp3UtytiqsVLnGZzoTKqqNmYbyVQcoxCpjwRBkzfdg2OH1W89Oa9iyKmP4to/OJlr2xVvGyVVzTOOmx1or+UwYV7sb+22pZJspNGFWTTVkTsRf6U2jn9qEgptWO7vfKilFKT0ncdpZQqTNtf/pPay6tVSh0ULVyUUjtW/chIHX6jvh+k1DTRn2KllFJKTQ0tXJRSO6bfcVFKXWr6rqOU2jH9qEgpdalp4aKUUoVJ/rVXKXUwtHBRSqmC/leRUoefFi5KKVW43K+46H8VqcuB/hQrpXbscvtyrl5xUerwu7zedZRSl5R+OXe66BUXdTmY6qyi3//O93H9NW2eNHORE+lazA460uHamYucOzrPuZNHeGJlluWLDdzFJIYuLjlqKwm11UCyHoMXrQ+4Ir8oWTXUlmNuUXfWkM2VmUUJi/MNFufneGiuw9HZFscaLY7X1zheW2chaTFju73gxbkihHHetTmarLPq66w0GqxkdZa6TVa7NVbbdTrtlKydkHcctmVjVlHLxODFtiFpC67jcB3BdS2uK9hMsFnA5gGTC4Qit6jMFxTAF+OLzCJT5hflgjiDdTG7SJwhpAbphSXSf0uLoMUUQtcQUno5RuJivlHMLIrziKGLFuMCuROsDTgnOBewtghXdAHnHakNveDF1PoieNFTc/2ZRbn1WITM+l5ekTcxU8iKxCFCMAFb/MW8VfCil40TrjWyKbPIb5H5Mzh+q8wi2D63CGJ20WDO0WBm0WCwoqt8pDEst6icbqvt2DZYsRjfl2M0EI44uL3jqu6XneQV7cSovKi+DKKB3JxheUXjGCfLZqsspHJ9x93mcQMNtwqp7E0zhdlB6soy1YXL0uIMj8ylNFzGnOvE4iHpcjJd5Wi6zkLa5qH0KI+k86wkM3RcCsYCBsRiAhgv2AxMHmJBYA0uC9jMYXOHzS3GG0wwmGDJJKUlBqm80TojOAI2iWnGzsQTbGo8DZNTtxl1m1O3OTWb907UzghrNrBuhcwJwTjEFqckY+KJ1hjECGx6IylSoAmYHBCDBIlFi8Q3sXi/uCOmdzNiCGKxQRCxGBFCauK0UoQ2SvEY6W1rea6Mj4UgYMX0TltxcoOIIAJGBJwBCYiAcxuvF1cUGJV9GcSAAzxxGOJmEiC1njw4sJDiyShmZkIMVCyHperjgeBFa+IJsUyOroYuTnoSHpUY3Xt+i+DF0jjJy8PSooc91z/f/gTprdZ9u+Jlq3Xeia0Spve6eJk0pXi3J+7DFMA3TqG1F0XhpXSY9u+kxv3ZGqfAvJLpnpkm0/Peoi5z03SiU0pdXrRwUUoppdTU0MJlmkzvFVJ1mZn04xel1PQ5rP+FpoXLNNGr80opddk7LN9vOSzrMehwrpUaTv/IVYeEfsdFqf1zWK90HBZauCilJqYfFSm1fw7rlY7DQvfONNE/ctUhoVdclNo/esVla1q4TBP9I1cdEnrFRan9c1iuuBzWAupw7B01Hv0jVymlLnuHpWA4LAXUoMO5VkqpQ00/KlLq8ndYCqhBU93yf+6bdVYuHuHekwv85YlTnF5Y4UmzF7mqtkrTZVzTWGI+bXNVc5VH5+d59Ngcq0tN2ksptYu2yC6y1FYS0rVA0vLYPIAXknWP6wZcy5GuW7IVS3fekC07snlLez7l7+caPD43x5HZNseb65xoxNyiI0mLGbeRW5Qaz7xrkxrPjOvGeIK0zkqtwXIj3lY7NVrNGt12Qt5K8G2La9leZpFvg2sTs4w6QtIRbGawXVvkFllMHjBBMKHIKRqVXRQM1sd2/OKF4AzGW2wvr6iSWZTHPCKTg01iVlFIwOSmyCwSjI8t/CURQgI4QbxBrOCTYv7O4q1gXcC5QB4CNnckzpO6QBYsqY2t5p1x5OKH5BYFAoYcR2I9wcR2/Y4YoWARMnFFhpGN2UXVvKKB1v+9K1iyfW7RqMyi8rmt2v5X5wej84uqbe7L6YblEw3mFsXX9H90U40AGPbmEwa2y4vFFvMo84nGySzqn+dkxcxgO//qPHba9n/8zJ7++WzVYn0wU2grB9GO/jB+bLfbdZpkn++V3S7T6mf5Q+1H9tV0Fy4Pe7IArU7Kej7L34f4hpqaQNMtsZCsczxZ42S6yvHaGgu1BR6uL/BEbY5OWickLp5wLWBiZo9ZjwGGJvfQAdsNRcihw2UW1zWxYMgMWZ7Q9YalYPDBENi4QTxgzsb8orQoYhriqJucpivyi1zML6onDVYSz6qr03aCdwneglhbrJ9BrOlFDmFiDFDMM4qBPhYLeTy5GNicXYTpFTA42xtaAQkBSQyEeH43AkZM3CdiMAKhmmEkRZRQ8VgkngisCBKKcS5OJ8GCCMEFBBujkMTgXIizEkPiNn6wgzWbhjHHKIdQnIiLHCMnoZdnFHOjxssuckjMaxoztygWVFsXL+V02xlVxAyeTPtP2KGvSBkMVhw86VaLnGE2FT5Diq9xsoz65zn6jXtUsbFVZtEoe3Wi3qoI2smb7U6Klr0MWhw06q/lvczBmYaT9bRlMant6UdFSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaU905N2kLZlkIqUGco02TB4Khkye0fMrVjRXmXAdnAnWbs5C2aTdTcu9YFENGDXCIid1zxTpCYkjWDK4TML5oBe8DrmuQ9kYnW7Gm6FgLGTVWis6MQQx5sGQ1R5Y65lyHhs2o2yzOywRSm9PAkDlHEItPN15bdnhsUzS5ja8itqstutZWO3oaQ6+NrBWsAXIpW9+CD7Fj7gAjgmBiPEDJF01oTbWiNb1liyk7ZVa63FI2rjUb3UYrs5TKUCR2J44/dQERi3PDj601UnTDdRubFxzY+PJghLx43FsJKe5s0T237Jhbbf+/V91zYbz2/33TD3SO3Un33MHn+ue/dQfdYcbpkjupwW63o7dRtm37v1920/Z/nK65+9H6fL/a/e/HuvbmfQk72R7GOAS1e1NduNQvdEiWDHN/7+gcTWmdcLSunuPhq2f4+6sXOHV8mevmL/KkxkVOpGtcXVvmu2YeY/HIDI8cX+DhtQXOL8+xttikvZhSX7TULlrqS5b6ciBZ87h2jm15bCsnWbXUao76jCObc3TnDN15S3bEks0nLB6psTw/w2OzsxxttjneWOdEfY2j6TpHkjYzrkPD5KS2y4ztMufadJKUdkhZ9XWW8wZLWZOLnSYr3TorM3XarRpZO8G3HL5lcS0Tbx2DbxtcW2IcQVeKm8Hkgs0E6wPkJrb+96HX/t8IMbfIxGLEWIPkBqxBkhhnEJzB5BbrDCGNeUU227gfMuL9DEIKNofgivyiRJCkyC9yRX5RIoiTIr/IElzAOOnlF+UukCSe3FsyF0iL7KIsOFLrqTlPHiyJddSs72UXZWJJTcDjcQSskV5+URkBUL4Jj8ov2rhf/GDtQXZRddrtVLOJyuUCQ0/ag8VLfF1/+/9S9SQ8KsuoLwtpILdosHgZzDIaNEmBs/U2XrriZZK2/8OKl3Hb/Jf7blhBcBC5PIOmvS3+NEQPjHIYjv+00Y+KlFJKKTU1tHBRSqkDpH9tKzUZLVyUUuoA7SRVWqkrmRYuSil1gPSKi1KT0cJFKaUOkF5xUWoyWrgopdQB0isuSk1GCxellFJKTQ0tXJRSSik1NbRwUUqpA6TfcVFqMlq4KKXUAdLvuCg1malu+Z/N17AhZg3ZTEhaQrpq8A1Dp1bjcTfX+2vGY1lIWlhiS/jZpMNCrU2rmZJ1E7LckuUO4w3WG0ywvaAd2/UYHx8YL9hccN1A0raEtGh9nxhCYvEuoWXqOCM4G9vLV/MygrXUbdZrwZ4ajzeWus1puoxuSOgmDi+WzDu8t4Rg8d4QvMH4mC9kgsEHIBSPBYwYjBisgAgELJaAyUGw/blFUmyPECMBbDHKA8bE+YXYBt8EsD7mORkfowKMAWMFa2KwUTBFbJIHawzBlC30DcZIjBbAxK32xfqYgBiQYAjG4L3FAMYI1mzc92LJgxRxTEIutpdPtCm3qGjXHzdodHZRzCcylawis2Vu0aD9yC3acl4j2v7H52RoFtGW2TsjXtP3+gkziyaNOrjUBqMVhtlqn22a34Tt/quPt4oSGFyHUe34t8rhGYx/UOpyMtWFy+KNKUcu1mhcyKkvdqlfEGYfSeguJDG36Ko5Hr5qhoevOsqJ46tcf2SRa2cucjJd5WS6wpMbF1g60uT88XkeWjvKueV5VhdnaC8m1C5Y6heL27Lr5RaVt2TFUqsPyS2aj7lFF4/UWJ5r8tjsHAszLU40Y27RQtpiIWkx59pFblEMYJyXNscTx3paY6XeYCVvcLHR5GK3yVKnwUq7yC1qpeRti1u3uHbMLco7hqTMLepu5BbZrmBzi81CLDhyE4ciGE9f8VLNLhIvYA02MQRnsXnMLtrIKiLesiK3KKUo3ECK56wrHwumklsUM4wESQTJDD4RgrMYF/BOisyimFeU2EDiHKnzpDYOc2vjeOOpuZhXNJhbVBaLjjiPsgCpBkHa4sQQTMBierlFvVDGgdyiMlBzMHSxNKqIGRy/1Ul9nMyi8vlqxlB8buMkNhi+WDUsv8hjdpxZNM42D+NlYxsnzSsq98HQZbP9soft5zCicBhWZExivzJ0dhseOEmBNnIeI7ZtJ+u2n6GO00wL0OEO559GSimllFJDaOGilFJKqamhhYtSakf262MQpZTaihYuSimllJoaWrgopXZEv0yplDoIWrgopXZEPypSSh0ELVyUUjuiV1yUUgdBCxellFJKTQ0tXJRSO6IfFSmlDoIWLkoppZSaGhMVLh/+8Id51rOexZEjRzhy5AjPe97z+J//83/2nhcRbr31Vs6cOUOz2eRFL3oR3/zmN/vm0el0eOtb38rJkyeZnZ3lla98JQ899NCOVj6bh+68IZtz5E2HpEWb7lxwHUhakKwbwlrKynqDJ9qzPN6ZYylvsu7rBAyOQM3mNJOMZi0jaeT4ZsDPCHkT8qYhb1p83RJqjpBaxMXP9mPrfMFmgs2IbfYzsF2wHUvoOLqdhFY3ZS2rsZLVWcvrrPsa675OWxKC2P5cEoTUeBLjqbucmvXUnCd1HpcEbOrBlW32QdKNNvshgeA2bpLEDvbiDGKLMCFrkKK1fwwX2sxIL+wo5iB5KfKRqjcqw8H7UHbQN8HEocT79PKVDIiBYJBgEClvEELMZyrHhcrNB1vctxvjes9bAhvPQWyX77HFdPH5cjgoiN02v2dUq3kvdqyMnnHa4Q+2rZ+0hbrb4krITlqI7/eVler2Vbe9On5wHzjC0NthclBXpC7nNvGapK0AjIiM/ZPwh3/4hzjn+O7v/m4APv7xj/Pv//2/52tf+xrf+73fy6//+q/z3ve+l4997GM87WlP41d/9Vf5/Oc/z7e//W3m5+cB+Lmf+zn+8A//kI997GOcOHGCd7zjHVy4cIF7770X59xY67G8vMzCwgI/8Km3cX71FMnDdWb/3jBzPlC/kOHaHgDfcHQXEtrHLa2rDO2rAlzV4arjK1x/ZJHrmoucSNdo2IxMHBfyWc53Ym7RI0tHWFts4i6k1C8Y6otCfUmoL3uSdY9te2xehASmDl935DOObM7SnTNkc4ZsHrJ5IZ/32LmM5myXozMtjo/KLTI5AJkkZOJYDzWW8ibLeYPF7gxLnSbL3TorrQbtdopvJZiWw7Ysrg1Jy+A64NrgOoLrMpBbJJXcomLoQ8wqGvwxMEW+UDHExdwiSYpAyV6wZJldVM0q6s8tEkcvt0h6RVW8L0nMLcIKJIJJAsYJzgWsCyRJwNlA4jxpkV+UOk/NepwN1Kwnsb7IL4o5RakJJNbjjGCReJ+YX2RNLAydCVikf1i84TsEa0JvGMfF4aiT7DDjFCmwdX5RtVAaDNobVkSNyp8ZVZBVp69O0ze+zHWqPD9OkTbOl3er86lu36jtHhY2uBtb7d/qPpjki8jjFiyD8xxMiR48ltXspi2XP+bP3aj5b7WOfcvZRVbRuMsa3CejCpdx1mXT8R1jeaMMW4+dHvdRyw1iRx7LnSx/kuX25rlHy68ue767yH/6p7/P0tISR44c2XKdR5noisuP/diP8aM/+qM87WlP42lPexrvfe97mZub40tf+hIiwgc/+EHe85738KpXvYqzZ8/y8Y9/nPX1dT75yU8CsLS0xEc+8hF+4zd+g5e85CU8+9nP5hOf+AT33Xcfn/vc53a0AUoppZS6cuz4Oy7ee+68807W1tZ43vOex/3338+5c+e4+eabe9PU63Ve+MIXcs899wBw7733kmVZ3zRnzpzh7NmzvWmG6XQ6LC8v992UUkopdeWZuHC57777mJubo16v88Y3vpFPf/rTPOMZz+DcuXMAnDp1qm/6U6dO9Z47d+4ctVqNY8eOjZxmmNtvv52FhYXe7brrrpt0tZVSSil1GZi4cPkH/+Af8PWvf50vfelL/NzP/Ryvf/3r+da3vtV73gx84VNENo0btN007373u1laWurdHnzwwUlXWymllFKXgYkLl1qtxnd/93fznOc8h9tvv53v+77v4z/8h//A6dOnATZdOTl//nzvKszp06fpdrssLi6OnGaYer3e+0+m8qaUUkqpK8+u+7iICJ1OhxtuuIHTp09z11139Z7rdrvcfffdPP/5zwfgpptuIk3TvmkeeeQRvvGNb/SmUUoppZQaJZlk4l/8xV/kZS97Gddddx0rKyvceeed/Omf/imf+cxnMMZwyy23cNttt3HjjTdy4403cttttzEzM8NrXvMaABYWFnjDG97AO97xDk6cOMHx48d55zvfyTOf+Uxe8pKX7MsGKqWUUuryMVHh8uijj/K6172ORx55hIWFBZ71rGfxmc98hpe+9KUAvOtd76LVavGmN72JxcVFnvvc5/LZz36218MF4AMf+ABJkvDqV7+aVqvFi1/8Yj72sY+N3cNFKaWUUleuiQqXj3zkI1s+b4zh1ltv5dZbbx05TaPR4I477uCOO+6YZNFKKaWUUtOdVTRpK3SllFJKTbeJrrgcNi85/X9Ze/I5/vopV/M3j51k6ZFZmo/UaT4qNC8Eass5zce71Bcts+cc7aOO9okmiycbPHryON84uc41R5e5fm6Ra+pLHE/WOJmu8JTmEzx2ZJ6/P3mEh1aOcuHiHK3FGrULlvpiSn0xobYSSFdj+3+TB5L1DNfOSVcd9YYjm7Vks0X7//mEbM7Rmq+zNtfk/Ow8c7Ntjs20ONFY41htnaNpiwXXYsZ1SI1nxuTUbcaca3MsqXMiXWO53uRi1mR5psHFdpOVdp31do2sneBbCXnL4tom3jqmaP1PvN8tIwBMka1kMWUEQIit/ylyiAAIASMGYwziQZzFWUEyg00MoWt6uU2x/b9UIgCk0u7f9HKUJDEEB7b3GMQaJBFCAiSCOIskAUksxgV8LkXrf0tWtPyvJZaudaQ2kLnYvr/mYr5TYgNdIyQhkFofW/6LxRkhMR5rhBxLYkMvBsCZgJWN9v/BBCyGgGAxOIRA0X5bNlr/V9vFD2v/X21nv1X7//K5YW30HaG3nLJQL1uXl8usrke1RXe1dfdgflHZ3r+cPojtTeMx/eNNwIvttfQOmE3bM2zdx2kBXs673L7qtlW3u9rufi/b/o9aTnwcevvQItu2/Z80m2hwns7Ili3nx/lDbZKcour2lfMfd9/upt3/pMvaqW2jEcY4pupwmuorLjOuy5GkzZG0TbPeRWqBkG6ECwLgBesDNisze8B2DSY35Lkl844sOHxxYkqNp2Ezmq5Lw+U0khyX+N7JtXcSthShhcVyyvDBIpDQ+iIbKBdsDtYbTA54QwgWX9zyYDcCA4uAwFIMjhNSm5MWmTypjSfomN8TsFYwNmb9iI25hX1Da4rHphhXCVmE3vrLqD46IjEgsQxehBig2LtJcas+pghojNOW0w8bDj5frAxxcWZjVBG4GHf15uyaavBiyfees33DcvoyfHEnJn3dbgIYtwtdHJWZtNVJbKsgxktt3Eyn0k6vtJZZVfthPwIVL+ewRKV2Y6oLF6WUUkpdWbRwUUoppdTU0MJFKaWUUlNDCxellFJKTQ0tXJRSSik1NbRwUUoppdTU0MJFKaWUUlNDCxellFJKTQ0tXJRSSik1Naa65f/Xlq/n+rTFNY0l0qs899e7PDR/lIsLDTrHHI3HLI2LgXQtYPJAfQlsLiRtS7Lu6KzO8uBKnQsnZnh0YZ7rZxc5WV9l3rU5lqyTznhmky5ztQ6PNI6w1JzBz9TIm5Z8xpHNWOorlmTNk7Q9JgsYH3AtMF56rfVd1+I6Btux5B1D1nYstx3tTsrabI2VZoMTjTprtTpHk3XmXKfX+t+aQIMc51qkxlO3OTWbU3OeusupJTkraZ12UiNLE/KaI7RsbMOfGkJKrw1/SAyhC86BSyziBMkN1gZMLmCIHYD9QBdQEYwv7wcQgxEDEtv4I2B87JRrgondgxNDCMRuwgFCMHGYxOmCNxhPjAcQgwQIAQix8zHBxm7FAhJMvBXdc30I+GDwRQRAcAYvgdwEahiCeHIT2/oHE1v/B29IrCcEE1v/IzgJBGMgFC3IMaTGb7RdNiHeLzuYiqXsEO4IBDG9Tqxl2/hRXWwhds/drktstQV+3/hKa3rY3DJ9WPv/ON1G6/7N85S+1v/lNMPGV2MJhrVKHycCoG+9RrRbH6ftf/l4p4btu1HL2W6d96Nj7k7tR6fdaWiLf6mPgbvCM/IOw/ZPdeHy8OoCVx31XNe4wNW1ZY6na8ykXf6fO0nLzoBYrLe4rpB2BNPJi9yeBJs7rDcQEtaY4ZwRUhuLgXnXZqYoHmZctygWPA/awEU7S5c6EPvqm2B7rf6dF2weMMFjfMB412t9b4LtO+FnJHSBZSBUfg4s0svPcVZITWz3n0qOLbJ0HAFnpDdtVQbxFGaKk6wxlaHE4SYWS4jbVJwAjQeqbf7LcUaKbQdLiBk+xat6p6reS2Lr/tjWPxYoSNzejdcYRKQYZ3qn/V6r/2LKYCzk2/xAWOh6CNaQlG/ixYKsCcUCPeCwUqyEBDIcKb63BcOKF2tCPJn37ttNxQvQGz/KfhYvw6bZmHZ4ATNJ8dI3v0pu0ahtKI0qYqonxUkzi3Zr3HltyvOprPPE2UQj9uOlKAzKk81WWUjjOkzF2uVMIx9G04+KlFI7ctj/Er+S6Enu8jPsKqmKdM8opXbkSv7L+0redqUOmhYuSqkduZKvuBy2bde/zi8/ehVtNP1pV0rtyJV81eFK3nZ1aWgxOpruGaWUUkpNDS1clFI7ctg+LrmS6ccKlx89pqNp4aKU2hH9uESp/aMfFY2me2YMWvkqpQ4zPcmpK4n+tI9B3xSU2kw/Kjo89I8rdSWZ6s655y4c4a+POAKG4+ka1ghHay0W5lo8fjSl065hM4PxDrGQrPnYPDYXkraQrEHaMISaY63W4NFkjpqN7eCvqllmXAeLMJt0OFJrcbRZp91NWe848m6KzQw2B+ttbHkvgDEYX3SfFcFmAdc1hFRw7aL9vjNIArlzZC6l5QIrLramT0zAmoArhgBp0TLWIaQ2py6Opu3ScQndxJEFR+Yd3ltCsPiiRX4o2/B7g6kV7fiDxKEU61vegsEKUHS3xQimPDGV3XOlGCcSRwWDMQIejI2deY0HY0xsUGuEYGKrf+shGInPGQimaOLri30WJHYZ9mWn37gqeBBjCSbEF3iDMfEWgsUbMEawoSguLViJyw1iyIMFG3/QgxHy4OLKGGLnVGNBAr4YOlN0ei3b/UNf91yIhex+d8/djVHdc2F499btuuT2TVvtcLtN99zqa2D7GIDDYruuujv5iKz82dl+/8qedLfdqb3sTrzXDkOr+UtJi9HRprpwqX91lr9ZOcbD1y/wjKvP8bS58zx74QFunDvP/zt2FX911VU8dm6BziMpM+cSmo9Z6kuedD0nXc+pX3Q0LzhaT1jaFxpcvLrG0lWzPHZilifPL/KUmSc4ma6ykKxzKl3m2sZFHpo9ygPzx3l0YZ71I02yI45s0VJfNNSWLbUVT9Ly2G7AdjymG3CdgGtZ0jVHtmbprhmyNUM2Hx+3Wo7OXMrqbJ3lmQarzTpr9TrHa2ssJC3mXJuGyajbjFRyGiZnxnWYSzrMp03m0w6zyQxLtQar9Tpr9RqdeoqvJYSOxdcsrg0uNbiawdXAdSjiDwSXCK5rkEyKYixAbsCHSkZRpXgRAzYWGxIs4uP94AzGW2wuhNTEgsmDzWPBZnzMcAreYHKwScwuCh5MHos5k8bnxcd8IxJBghRZRgZxBgmWkPgio8j0cotS5/HBktuYU5QWwyCeYA25WBITCBgsgrcGZ4TEeAhsyi3qO0EVxUugeEMZUrzARoZOtXAYVsRsV7yMOtkPZhJVC6Zh2UWD6wL9b4hb5hNRbovpjwMYiCSo7qetiphhMQDV4ucg2/5PWuyNv4zdnXzGWfZ2y9jqhL+bbds8r90VFochF2mnheOV/n2vS7390/EnkFJKKaUOpUtdcGrhopRSSu3CQV8putJo4aKUUkrtwpX+UdGlpoWLUkoptQt6xeXS0sJFKaWU2gW94nJpaeGilFJK7YJecbm0tHBRSimlduFKv+Ki/w6tlFJKTZEr/YqL/ju0UkoppdQIWrgopZRSampMdcv/dF3IVwxrSw0enllgNukS6vGSVdNlzNc7XJzJyOcSuvMG1zK4zGKCw2ZFdkgusf19G9y6IVtLWGo2eSLtMp+2Sa1ngdj2PLWeuaTLXK3Dcr1OZybFty15y+CaBtslzt9LzN7Bxlb5IkVmkMTldQ2+C7a8dSwhdXSThFaasJbWaCQZTZ9Rtzmp8TgrvcwiawJOhNR46janZnMaSUbbx+yiTuLIE4uksR2/pBKHHkKZG+RNXB8P4gwhoWjhH3OOMEIMFRqSWUR8WspxRcaRCUAQxBbt/U2cZ5lFFLOSwPqYJSShyFIKxGyiQMwrssQcpKLVP8XjeN8gEtv/G2OxVhAxiBh8sBgbsBJzinwlv4hAbP9PjAmIWUUWZzxBLN7IpswiKC6BVjKLXLW9/UDb/zjOjN36fL8zi0pbtbOfxFjt58ds2z4YGzBs/Kj2/pciT2c/lzG4Hw9Dq/vD6qCzm8al33G5tNs/1YXLsb9ssfBYyupDNR6//hTnrl/gKdc8wfcsPMqTm09wbWORRxeO8O2rruaBq4/TPtegc87RfNTSvOBJV3PqT3hqSznNCwntxx2tkyntq47wV1c3OX9yjicfXeQpc09wTW2Jq9NlTiarnKot8+jsEf5u/jgPH1lgeWGG7EJKbdGSLTrqS4baqiVZD7iWx+QB18pxHUPSsiRrjnTdkq1auvNFbtFcSjafcGE9ZXWuwdJcg4vNJicba5ysr3IsWWfetWNekfGkzlO3GTO2y4ztciRpM590WKo1WK41WKo3WGvUaLdq5I2ErO3wLYtrG0LN4DoQ2obQyy2CkFhcJtiOwTqDzQVrDBIE40MsMIpCrMwsMqbMD4oFiwSL8XGc8QabF0VRL7OozC2SjQyjhJhTVIQsSgIhjYVOyIvCq8ws8gafhDhNEEKRU5SHQGIDmQ2kLuBsILUBJwEfQswvkkBiPEmRWeSt78svSkzMOkorw3Eyi+L4ONgqt2ivM4s2ZxANzy0aXHb1ddXgyJHZRMNyjIbkDvXmOeRNbNiJeTCwsZxmWPEyuL27ycWp7pv9yivaTUbR4Ml6q2VPmlO0k0Kgemx2EzA5uA2HOdBRHW76UdEol6CA1N9Zpca3VeK2UurKoYXLAbrCUtqV2pW9+LhLKTX99J1gFL0aopRSSh06WrgopaaCflSklAItXJRSU0I/KlJKgRYuo+n3T5Q6VPSKi1IKtHAZTb/jotSholdclFKghYtSakroFRelFGjhopSaEnrFRSkFWriMpt9xUepQ0SsuSimY8sJFUouRImuoZQhrKRdbDS50Z1jKm2TisAjNJKNWzwjNQN6AvAl5w+BrNrarL7J0bCa4LriOwXQcrU7KSlZnJWuwHmp0QorH4ExsHd9wGfU0x9UCoS6EmuDr4GsGnxpCYpDExMyf8jszEvN7jN/ILrIZ2MxgMyCzeG/JckfHJ3SDoxsSOiEhE0cQSyiyeCC2006NxxohtZ6a9SRFu/vEBqwLGBfACuIEccShpbhvKkODWBN/Kop1lnLdrSnCh7b+8o8ps4uKbUXiuBgXUG4//Y+HjO8NBSgyjMrpKfOUxBTpAzGrKFSGfTdMX2vxjXEbP/5eNh6X0/a9BoMXG3ONtvgCVPWqwGA7872+YrDfJ/Jx29aPk7c0jVkuO40V2E27f6XU9qY6q6h1ImX+oufoxYzZcylrDyasXneSL16/wFVPusjTjz/K9c1F/vHxJb57/jH+9uRJ/vbUCRbPzdJ+JKH5qGXm8UBtKSddykiXcxqLCZ3HHa3HHK2r5/nbq5s8ctURrjt2lO868jhn6hdZcC0Wmi1O15a5fnaRB+aP8eDCMS4cnaV7oUZ30VG/6KhdtNRXLMlaIGl5bBawHY/tepK2J1l31NYs2YqlO2fIVi3ZvCVbs1yc38gsWpyd4URjjZO1NY6m6ywk6zRMTmpyUutJjadhM+Zdm/mkwZG0zcW0yXK9wXK9wWqnxnq9RtZJyFsJvm1xbRuDJdsG1zHxficWbS41uK7gOkVeUddg8oAxRWaRmC0yiwzWC3iDcTYWSInFOiGkMXsoJPRyikIiMcsoiRlFpsgskjwGNYZEMN4QvEF8kVmUFJlFXop5BLw3OGdxldwiX80ssqHIBXIkNpCEQFLJKkqtJ4yRWRQwpAAm4MURkBi0KG4jgHHC3KIyl2enmUXV+ZYmzS3aNptoIMcozndjmVtlF/XWqRpQWcklKl+zXV5Rue67LQDLfVM9NrvNK5q0WNmLoMVJc4rGXpchGULbFZ67yY4ax7jbUtrv9dkNDdXcvam+4qKUUkqpK4sWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFLqsneYe7uoyWjhopRS6rI32FRPTS8tXJRSSik1Naa65f+jL4D1cw3mHww0nsg48kCH5hMJa48krDzpKv7s2qMcv2aJG48/znXNRZ618DDXzlzk744d5++uPs7io7O0HnE0z1uaT8TW/8laTtLKqS0lNC44Wo8ntK+a56+unuGhqxa47thFnjr/BGfqF5lzbZ7iOhxL1jndXOGh+aM8vHCEi8dm6S7WqC06uhcdtSVLbcWSrgaStsd2AyYLpLngOhbXsqTrlu6aJVs1dFdt0f7fsVi0/r8w12Rxpmj9X1/lSNLua/0/YzukxjNju0Xr/zYreWP81v/1Suv/NiQdCKnDdQM2MdhubP9vcoPxA63/Ibb+90AQjK20/neGEEAsGF9p/e9M0fZ/c+t/44nt/wOYvIgCSGOb/+BBvIlt/0PZ+t8QnCU4wRf5TL7S+j+3QuI8zgip83gJ5CaQiN3U+j8OXREBYGK7/2JozUarbis2tquvtvsvW7CX7dOrf+BJfG671v/b5f5UW+H3jR/R+r+6TBje/r/a9j9Os7nF/6g4gHL6wXWsGra+ZQv5wRb/1fFbtf0fta2TqM5zVNv/wfb3e5lDtFWEgDOCH1hukP7jtJWtWuQPzns/7cX+2mpbJsnAGhVlMG3t9yeNP7gcTfUVF6l7QhrDAgFMFnDdQNKOwYuma+nmCXmIm5kaz6zrMJd2aNQypBYItZh303v/EMHkgs0CLhNct8jqyQx5EXzY8clGvgxCw2Y0bZeGy6glHpeEXqZOSIowwyK8UMqQQiGe5H15i/k9Jo/DeDPgDSFYcu/oBkcullxcESA4mE8T4g3BEYMgE+tJTMzqcVZwLmAsYCWujy0CFyu3ON4gpljfXshisaBRv+ci9H6nKgGJplLc9Acrbjwux5lyHmXoYuX5eDMDjzfGiVRXxfRu5eNyWA1j3E55AqueJL3Y3ptd9RiMOglVbbfMUTk/49pJ8OI4r9nqBLR9Zs7+hA5ebmnR0xhEOQkNn1R7ZaoLF6WUUkpdWbRwUUoppdTU0MJFKaWUUlNjV4XL7bffjjGGW265pTdORLj11ls5c+YMzWaTF73oRXzzm9/se12n0+Gtb30rJ0+eZHZ2lle+8pU89NBDu1kVpZRSSl0Bdly4fOUrX+G3fuu3eNazntU3/n3vex/vf//7+dCHPsRXvvIVTp8+zUtf+lJWVlZ609xyyy18+tOf5s477+QLX/gCq6urvOIVr8B7v/MtUUoppdRlb0eFy+rqKq997Wv57d/+bY4dO9YbLyJ88IMf5D3veQ+vetWrOHv2LB//+MdZX1/nk5/8JABLS0t85CMf4Td+4zd4yUtewrOf/Ww+8YlPcN999/G5z31ub7ZKKaWUUpelHRUub37zm3n5y1/OS17ykr7x999/P+fOnePmm2/ujavX67zwhS/knnvuAeDee+8ly7K+ac6cOcPZs2d70wzqdDosLy/33ZRSSil15Zm4Ad2dd97JV7/6Vb7yla9seu7cuXMAnDp1qm/8qVOn+M53vtObplar9V2pKacpXz/o9ttv55d/+ZcnXVWllFJKXWYmuuLy4IMP8ra3vY1PfOITNBqNkdMZ099kS0Q2jRu01TTvfve7WVpa6t0efPDBSVZbKaWUUpeJia643HvvvZw/f56bbrqpN857z+c//3k+9KEP8e1vfxuIV1Wuueaa3jTnz5/vXYU5ffo03W6XxcXFvqsu58+f5/nPf/7Q5dbrder1+qbxT7n+MR5cqJHP1JmbqzHzWCBZ9zQuejBgc8d65wh/0U5ZvGqGp8xd4Gi6zunGMhyH+63weDJPqNUIdYevG+oXHelajssCtWUfO7h6i/GOdj7DA5mjnSes+RrXNRc5lqyT2pxj6ToQ20o7IzzuAh3XICQJITH41BBSqKUGWTe4jsfksXW+6xRdY4NgxMWOscFAMBAceYDVELuu+mDp+oSs4cjEsZC0mLHdoiV96HXyLdcFYotoa2L7e2cD1gptUyO34J1FrC265hbdcY1BjCCm6ChsbPEYrDHYrOiAGYo2//0ta+M4F7vcG4A8NuoVgUCsliUQO94iRcfcoqusQGyDazZmKwAGKUbErrfFZOXkYkEEL0XXXBcq0xoSF+cfGxYbgjXUgC6xvXwQD46NFSyGVmKH4RRPRtmiOfTa+ve6nRb7Pojd1PrfmthCvuz0GsT0tf4f7AC7Xev/UW3/e89XWtYPKpe7Xev/US3+yzgAP9A+eVT7/+o6V7ev97pKy/Vh2zWq7X91vbcySSzAuG3/L5Vhbf/Hfd2ltJepy9PYgl9dehMVLi9+8Yu57777+sb9y3/5L3n605/Oz//8z/PUpz6V06dPc9ddd/HsZz8bgG63y913382v//qvA3DTTTeRpil33XUXr371qwF45JFH+MY3vsH73ve+iVb+OccfoDmf8FfhNK1unaRtSdY96WqOzQTjE8RY1l2TR1LPbNplPm1zIl2j6TISEzBGeEwWMD7F5BabgevamFmUZbG4kASMBWNp2TpPuDnqztN0GXWbc9xmLLgWqen/r6jHxNAVUxQjButNUQSB8YL1vsjjCfEE2fv9d9Brt2/AOHIL66YojGzM17EmxIIFwbqABVKT91r/b3WEyxb4OQlBwIjFCPGNsmijH9vvG3ygaMkfi4n42vLszujixQiC7Xsb2nhVebnPEHpTCVBGCwjBGMpdYHOKx3E5powiMDHXCF/uPkswgY2LidX7RUXlPARLF6hVVswG6StaCMTMIjEbLf4ptr0sXipFTLV4icWKGXg8fvGynd0UL7D7k/Gw4iXOd+sCBrZfdxhd0Ey63sP2a3W/XKqiZFRe1FZ5Rftpqyyk+PhgijWlxjFR4TI/P8/Zs2f7xs3OznLixIne+FtuuYXbbruNG2+8kRtvvJHbbruNmZkZXvOa1wCwsLDAG97wBt7xjndw4sQJjh8/zjvf+U6e+cxnbvqyr1JKqb13qUIWldoPe54O/a53vYtWq8Wb3vQmFhcXee5zn8tnP/tZ5ufne9N84AMfIEkSXv3qV9NqtXjxi1/Mxz72MZxze706SimlBlzKhGil9tquC5c//dM/7XtsjOHWW2/l1ltvHfmaRqPBHXfcwR133LHbxSullJqQFi1qmmlWkVJKKaWmhhYuSil1hbnU/3mk1F7SwkUppa4w+lGRmmZauCillFJqamjhopRSSqmpoYWLUkpdYfQ7LmqaTXXhkonjSK1Nc75N51igfczQXUjIGw4x4LpCuiYkK4bWcoPza3Ocb8+znDcIYmi6jPl6h3SmSzYnZHPQnTPkMxZfd4g1mCC4TiBpCUkLknVDtpayuN7kic4sF7MZ1kONTGIPmrrNmXVd5modGvUM08zxDcE3IG8a8obB1w2+ZpHEgo2fNfda/ueCzQTXLW9gu2A6ltB1dLoJa90a63mNtbzOeqjRkYRMHEFsX1fQ1OakxlO3OTWbU3Oe1Po4dB6XeEwSIBFCKoSE3k0q90MC4iAkhuBMjAawFNEAJnbuH5EzZaTo9S9S3I9dg2OsgRQ3+m6U9/3GML4uRiH0T2sq8QhFZ99gYtv/YAjlOCAEixSxCUHiNLHd/+ab7xvG/RrE9IbDxOlM7z4w5LGtTD/6ewbbdZYdx3bdeAdbtVenr7aXdxstnUeO3zzvrZddjQCwW8xnP0zapRh219Z+L1viK6X2oQHdpbTua9x07AG+e/YxvnrsOv7qxCm6/6/B/AMpM4/mNJ7ISFct9YsJaxdrPLZ0kotPavLdVzf47vnHuL55gVP1Za6ZOc7/WzjBowtHyY7UyGcSZs5bGk9Asp5T63iSdUe6lpKuWForNVbWEv5yvc7FE03WFmo8qXGR48kaV9eWmXMdjqbrHK23+PvmAuebc7SbDfJmgm8YfN1SrxkkMSTrHtvx2DzgfMBmFtcNuI7DdR2uA7ZjcB1D1k7pdC0XsoROltCaTWn7hFY95WiasuBazLgOqfGbbnWb03QZs67LUtKgnuSspHXWUk87qZGnjjx1hNQiiUGciYVKUbSIM7iO4Gxsiy+ZwdqAySW23h/MLZKYQVSEA4GzIGBDQJxBQkASE4sUDyHtL158iEUjUhQqEgsoisKEIDF6IBS5R0mx6GARJ4REYrElsXgJwRJcIIjB2ViAlcWJswGR2Nq/Jp5gfWw3b/Ne+/8QDInxEGL0AKFoi47ptaev5hbFfTAqCmBz6/+yoKmeVHebWVSd31bZRdUCapLMonJ8nP/o9v9xOzcvv6+Vf7EPh43bNO0etKMflUU0Kq9oUlsVK1ut/2BWz6SN4g7TlZRxspX2I1pgPwvFw7R/JzVuDtRBxVBM4nCv3SUk1V8e/cL94TO97xdK7Zj+949Sm2nhopRSSqmpoYVLwVQvAepf94eP/uGprkDT/NGEUvtFCxc1HfT9WymlFFq49Oh3XA45PSbqCqTfcVFqMy1cCvpRkVLqsNGPipTaTAsXpZRSSk0NLVzUdNA/PJVSSqGFi5oW+lG/ugLpd1yU2myqCxcvZnPnUEPRgn5g4rKLa6HarXFTh8fy9dZs0cq+fz1iO/j+aYe1Mh+6bqOUvepHjJYhb2qTdDwsv9djBj5Hn2gdN890hy8cMbteJ97y8RYTj3FV5rBcuNlpR1Z1ZdHvuCi12VS3/P+T/32Wvzy7xnOuepAXnPx/PO3Ieb5++loevPYka3+XMv9gQvPxnJlHM+pLluZjCWuPzvOta2e4/8xxbjz5ON819xjfNfM41zSWefDIMf765FU8ftURWo+kzDxSY/a8o76Y41o5zUc9teWExsWE9QuW1oUZHjpV59GT8zx84iJPmbvANY0lFpJ1nlS/yLF0nZP1Vf5+ZoGH5xd4YmGW9YsNsiOObNFSWzLUlyzpqiNZ97iOx/iAawVsN+DajqTlSNct3TVDtmboridk85aV+YTWfJ3luTqLMzOcaKxxsr7G0WSdOdehYbNeu/9516ZhM2ZslznXYTbpMJ92WEobrNQarNTrrNbrdOopeS1B6o5QM7i6wdVMvN8Gl0LSgZA6XDdguwbbDdjcYPKAsTF7iKJFfy+jiPi8GMBZTIh5RxIMxsboA+PLIb1bSAwmLbKNUtMfDeAN4sGkQijuizNIIvHmBXEGk8RhCIL3Budi6/88BBIb8C7gbMBbS+pi2/9ELDXJCWLITSARS2oC3hqCsSS2mM6GXsv+vtb/Rav+QFEUi+27T3G/bP1fzgPoiwOA8dr+l9NtpW+eA0VTuexhre7HaftfPrcx//Hb/49q+19u01Zt/4eZpH38pG3/t2zVv6sso923WD+oAucgc5j2K+Nq0piFrYwbabDbZV7qvK+DNtV/9pkQA/MgHri6zaknOSQh5uuUUTESg/ysl+KkWGTXVN4QHYHEehLnwcWcG2xx9QE2rtjI5kBAEWJw35ArLqnxJDbgjMSLEUYQE9dNRl0dChuhhKbI/DFhY9nlC8urLuUvhi9DAAfXw4S+E4c1gkV6Q2Nk46qLATGysU7VdTQgg1dUrCme2+KXTmTz48FxMPxyiPQPjWweb6ByoPrnExfVv27DFr3xXH8o4l4bdlKPy9vfwMX9MCqLZqvwxa1epw4XDYdUh9XhfEdUSh16hz2ITe3OXocfKrVX9J1HKbUjeuVEKXUQtHBRSiml1NTQwkUppdQm+h0XdVhp4aKUUmoT/Y6LOqy0cFFKKaXU1NDCRSml1Cb6UZE6rLRwUUoptYl+VKQOKy1clFJKbaJXXNRhNdUt/4/+pWEpnOCzT23wjGse5SlzT/APjpzHPTnwN/WruDjTJJtNmTlvSVcD9YseBGxmWW/P8c12wsWrm9y48Bgn66tc01gmPRaoO89DtWOs1OrkDYevGxoXLOlajut4ahfBBIfN463dneGBbsJ6lrJypM61Mxc5lqzTsBnH0nUAEhNIned8Msd60kTShJBYQmoIqaGWGNI1g2v52D4/CLbjSYTYQj/YePOxPX7mHVmosZTbeN872s2UbkjIU0uWOGZsl7rNcAgOYcZ1YpfgolOwNRK7+hadfa0R2i6Q2RRvQayNHYSNiV2ErUGsFEOLs4J0YydgawRrgVxie1sfMGWX2KJVrRHAh9htFyAPGGdit9pgkKKhmZH+G5heB+EgJnbGTYvZiiGIYKXsIgwEAyJI0aFXRGIzYmdAAsHGZYrbGJZCuW4O8AnB+vjYxuNQDoMxvWGKj0+ULf1NIIjbaMNd9jsRW7T6N5W2zuXiNlr/D7b9h/Fa/4/bYbec97DW/5O0/R98rn8ZZYTBqBb5lXkOafvfN+2Itv/D57v1yXYnVxEG2/7vhS3jAwb2wXbt4Hfa7n9wvnsRPVCd16jx2rhQ7dZUFy4zj3pkwbC80OSxo7NcP3uB0/UlrAmsZzUeXEvprKTUVizpWsC1A3ViBo5vGNZna1yYnWFppsHJ+ioLrkXa8HRCLEIeazmy9ZTuqiFpWZKWwYSAa+dgwKcGX7f4uqXdTFhuNFisz7CQtplzHWZMhxnjIYVMHG2f0PWOLHNkmSXvGlzXkHfAFfdtFjN58DH3xxqDs/HkmiRCSOgVOyG1+MTRTRPWk5R6UqPhMmo2J7EhZhWJxxnfy8lJjaducryzMR5ADHliyYIjDxYfLN5bvDdIbgi5wdckFkwJmMRgvBDKTKEEJJiYDSSxwDEiYA0SpNKivyheiOMxBhMknt6MKeITYiQDxmDsRqyC9bEWMbZYZjGU3mMTYxJMjHPACBLYCMssowoMBGOxBIIxGGPwxmCCwRgbiy8xBAx5sFgXTy65WKwIwQhBLMEIiMUbAQl4Y4kL3CheBjOEyjfyUCletmqNP6x42c4kxQv0n5C3e34nxcuwaScx6faMY7BgGCev6FLY7xP6sIJw1/Pcpysy+7GuvXlfwuOqV6z2j5a+SimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aIub9r9UymlLitauKjLm/ZSUEqpy4oWLkoppZSaGlPdOdd4obYk1M87Hpk7yl+mGfkRR2I8xxvrPH6sRWfVkbQsNnfUl4AAaStQW7bki5a1mSYP1I/RcDk0ITWeo2mL4811VhYadNYdruVwHYvrJqQCNgsYLyQtIV2DfAV809Gp13m8NkszyUiKVvEztotFaLqMhbTNaq3OerPGUjch71hs0S3X5mBzGzvBBgEpOtAGwfqAzWJnXdcxuDaEGoR27J6bpQmtpEYt8dRdTs16UhM751oELKTFPrMmkNqcVBx1m9N1OY2Q002yGB2Qxs654mPX3OAN1htCXnSy9UUnW2+KdQXjQBKLSIit8ouPZwzE7rmYXufc3rETQTBFDEDxnI/pAhTLMQaMMRgjGFsszxZdfG2lo66n6I5bvL7ovluO6++gK0W7/tgt15hycsEaGyczgrUSu+daIYgU3XMDFkMWHKn1BDE4U7SRH9I9FxN6bf9D8dghve655fjYSdf2tf0fZru2/3thp51Fd9I9d7/a/m+/rpe2K+5+mqTd/352pFXqUprqwsVmgYW/a1NbrbG03uBvuqcJNxieeezv+b6jD3GivsZfNM7weP0oIY2b2rjgqV/IcW2Haztsp8Zj2TFyb+EkPGXmCa5rXOBI0uJIrc2306tZrs0jSYI4x4yF+sUM186xWcBmCa6bYDNLK0tZzue438f8oDBnOVVfZiFZ56RdoWEzmi6j4XL+PvFcSGbpJnXEWYKL+T9iDWIgJYduwOYB8YHEC8YLxjust9gis8h4SxZS2sEQgokt+4MlF0vAEJLijcrGoiw1PmYXOek9TowntXHobMAaWDNCx0CwkBuLmI11wxSZRUVxUEbviLFxn2CLYiRsLl6KzCGE2JofGws0MRBfGSOB+t6PTXxBebIRAcxGLVTkFMWhbOQZSfncxlCkWF4CG+FDwyWb3uQTggmk1ve91CI4CfHxkOKF6km2UrwMFjKDxctOM4vK6cYxrO3/TjKLyueBLbOLgJFxAcMKklEFTXUf7DYWYJy2/9vFI+zEbvOKxi1a7BbREofB5VRITpvtcrAOK/2oSCmllFJTQwsXpZRSSk0NLVyUUkopNTW0cFFKKaXU1NDCRSmllFJTQwsXpZRSSk0NLVyUUlNrt/8KrZSaPvpbr5SaWvvdkE8pdfhMdQO65afUWHgiJVkPzD1sCEnK38op2tcnPO3oY8y6LjccfYJu7liRI2AcITE0Fj02ExoXA+IsmIRFFrgvWLonHU+eucCc63Bdc5H8uOWvxbAqc0ACxiHWUFvKsV1P0vbIsiE4EBvntS6zPCiGgCEXS1Z3zLs2dZNzNF0na8bmcCKGRSCjTqwhy0ZADoDExO67xsdOva4TwJYdYe1GMzgDOQldYBkIElu/BTF4MXji8mZsl5Siey70ytbgRjcg6gBBEjyAWIwY8rhWvUZw/WILOWs27o/qoBu75sZjYIJAHpu39ZrQ9c23aEJnyuXFBl3lNOX0cZyw0f4uvkygaGAXiucsOFM0jBvNInQHNq/aQRcLCT52zg0QjCHFs6kJXbH/4qaMbkI3aCdN6GCy7rLbNVfbqgldXMfxG9GVrxvWSbdvHhN0yp206V5cx0vf9Gy7/bxV9+FN89qj5nPbNSAbtZ9GdXeOz11ZxeRhb/B3OZrqwmX9lGGmbZl5tIs7FxCTEtKUc80FjjVanDxyjhtmn8Aa4T5vaXXmcS1L0jLUO4Haco4Rh1hHqDmWa7M83FjgWG2dY8k6p+tLWCPk4vjr3NHuzmK7Nrb/71hs12O6gTTk8TzuDJIYQuJopQ3OFy34my5jxnapuw7zJkAtvqnnweKDYckbsryOzU1xi+3/rRdMHmKb/TwgxUlfDKTWxOW5WHiIA3GOzKWsWyF1AWdCX/v/1HisCVj6i5fyBBHE4lNDLq7owBu78Xa9QYIheCka4hqCBxMMJgg+bHSqNSF2pw2E2P12VPv/YlsMZqN4Kdv/99r4G2wulZb9JnbbNQZbdOvFlNEAbEQGFEVO7y5FsYeAL96Eq+3/fezaa4wQgsWbou1/sORGINAbWiPkRcGSEoo3fbdRvEjAD2n/v+nkOqR4AYa2/j+I4mWSE/s4Lf23es123XP75rlF2/rddNMdpzPuTrrnDh63vejAO0mb/0Hbtf2fpHhS6qDoT2ipcoJVSiml1OGkhYtSSimlpsYVU7gYvZSilFJKTb0rpnAR/f6U2qUr7UuHSil1GF0xhcu2V1x28YU3pZRSSl0aV0zhsu0Vl0v8r5FKKaWUmtwVU7hsq7jiotddlFJKqcPriilctv2oSP8dWm1D+1sopdTBm8oGdGUjttBpk2eBPO8ixuC7Ht+xhPU22VqXjs2wRshaXfK1DqGd4juWPPO43GO8kGcO33X4DoRWRr7WoVvr0s4zvPV0uhnZWhe/3ia0HL5j8V0hz3JMnmG8IFjyLMF3E3zHENpCaGX49Q5Z2qUrXTq1DOtyghg63tHtdMlbHfx6jbBukZbg2xbfoZh/wOQeyXNsHlt1iRiCceS5K9bb4rsmrnsqhCRAkuNNF0+H3HfIfJduktFJM1KXITYnLzryilgyEdrB0PGGbp6QeUPWTcg7cb6+HQhtj7QSaFtM20DHYDrEtrpdgS6QCVLcbCYYH7B50UAvSOyMG4qmcNXP7YyJvevEgjWINUjR9VfEImGjC3AQgwTw1aGH4CEEYjM+XzSmSyR2M06kaM4niBMobiYRjA0YJ2BDMS4U4wLG+uK+j03kXMDYnMwGxATEeoRAsB5nhGAC1giWgLcBR3zsTMAiReO/uN2umLZsOmeLBnSm+PJv2bSs2p10sJFZOZ/tTNKIbbAxWrUBXfU5GTLPUZ1wtyr2qq/pW1bxmtD3/MZ8tmqgNjiPrQw22Cu3sTp+cJow4d96dshxq85j0/z7trNf2eF2kgZ0w9Z21HyHrcOwdYTddc4d9jOxeT9vf4xHdazdat3GXdao5fshy9xuPYbtv0mWuZt12Olyy2M0vJv35MuvLqObZcDGeXwnjOzm1QfkoYce4rrrrjvo1VBKKaXUDjz44INce+21O3rtVBYuIQS+/e1v84xnPIMHH3yQI0eOHPQqXXGWl5e57rrrdP8fID0GB0+PwcHTY3DwJjkGIsLKygpnzpzB2p19/D6VHxVZa3nSk54EwJEjR/SH9QDp/j94egwOnh6Dg6fH4OCNewwWFhZ2tRz9tqFSSimlpoYWLkoppZSaGlNbuNTrdX7pl36Jer1+0KtyRdL9f/D0GBw8PQYHT4/BwbvUx2Aqv5yrlFJKqSvT1F5xUUoppdSVRwsXpZRSSk0NLVyUUkopNTW0cFFKKaXU1JjKwuU3f/M3ueGGG2g0Gtx000382Z/92UGv0mXh9ttv5wd+4AeYn5/n6quv5sd//Mf59re/3TeNiHDrrbdy5swZms0mL3rRi/jmN7/ZN02n0+Gtb30rJ0+eZHZ2lle+8pU89NBDl3JTLhu33347xhhuueWW3jg9Bvvv4Ycf5qd+6qc4ceIEMzMz/MN/+A+59957e8/rMdhfeZ7z7/7dv+OGG26g2Wzy1Kc+lV/5lV8hhI3sHD0Ge+vzn/88P/ZjP8aZM2cwxvD7v//7fc/v1f5eXFzkda97HQsLCywsLPC6172OixcvTrayMmXuvPNOSdNUfvu3f1u+9a1vydve9jaZnZ2V73znOwe9alPvh3/4h+WjH/2ofOMb35Cvf/3r8vKXv1yuv/56WV1d7U3za7/2azI/Py+/93u/J/fdd5/8xE/8hFxzzTWyvLzcm+aNb3yjPOlJT5K77rpLvvrVr8oP/dAPyfd93/dJnucHsVlT68tf/rI85SlPkWc961nytre9rTdej8H+unDhgjz5yU+Wn/mZn5E///M/l/vvv18+97nPyd/8zd/0ptFjsL9+9Vd/VU6cOCH/43/8D7n//vvlv/23/yZzc3PywQ9+sDeNHoO99Ud/9Efynve8R37v935PAPn0pz/d9/xe7e8f+ZEfkbNnz8o999wj99xzj5w9e1Ze8YpXTLSuU1e4/KN/9I/kjW98Y9+4pz/96fILv/ALB7RGl6/z588LIHfffbeIiIQQ5PTp0/Jrv/ZrvWna7bYsLCzIf/7P/1lERC5evChpmsqdd97Zm+bhhx8Wa6185jOfubQbMMVWVlbkxhtvlLvuukte+MIX9goXPQb77+d//uflBS94wcjn9Rjsv5e//OXyr/7Vv+ob96pXvUp+6qd+SkT0GOy3wcJlr/b3t771LQHkS1/6Um+aL37xiwLI//2//3fs9Zuqj4q63S733nsvN998c9/4m2++mXvuueeA1urytbS0BMDx48cBuP/++zl37lzf/q/X67zwhS/s7f97772XLMv6pjlz5gxnz57VYzSBN7/5zbz85S/nJS95Sd94PQb77w/+4A94znOewz//5/+cq6++mmc/+9n89m//du95PQb77wUveAH/63/9L/7qr/4KgP/zf/4PX/jCF/jRH/1RQI/BpbZX+/uLX/wiCwsLPPe5z+1N84//8T9mYWFhomMyVSGLjz/+ON57Tp061Tf+1KlTnDt37oDW6vIkIrz97W/nBS94AWfPngXo7eNh+/873/lOb5parcaxY8c2TaPHaDx33nknX/3qV/nKV76y6Tk9Bvvvb//2b/nwhz/M29/+dn7xF3+RL3/5y/zrf/2vqdfr/PRP/7Qeg0vg53/+51laWuLpT386zjm897z3ve/lJ3/yJwH9PbjU9mp/nzt3jquvvnrT/K+++uqJjslUFS4lY0zfYxHZNE7tzlve8hb+4i/+gi984QubntvJ/tdjNJ4HH3yQt73tbXz2s5+l0WiMnE6Pwf4JIfCc5zyH2267DYBnP/vZfPOb3+TDH/4wP/3TP92bTo/B/vnd3/1dPvGJT/DJT36S7/3e7+XrX/86t9xyC2fOnOH1r399bzo9BpfWXuzvYdNPekym6qOikydP4pzbVJmdP39+UyWodu6tb30rf/AHf8Cf/MmfcO211/bGnz59GmDL/X/69Gm63S6Li4sjp1Gj3XvvvZw/f56bbrqJJElIkoS7776b//gf/yNJkvT2oR6D/XPNNdfwjGc8o2/c93zP9/DAAw8A+ntwKfzbf/tv+YVf+AX+xb/4Fzzzmc/kda97Hf/m3/wbbr/9dkCPwaW2V/v79OnTPProo5vm/9hjj010TKaqcKnVatx0003cddddfePvuusunv/85x/QWl0+RIS3vOUtfOpTn+KP//iPueGGG/qev+GGGzh9+nTf/u92u9x99929/X/TTTeRpmnfNI888gjf+MY39BiN4cUvfjH33XcfX//613u35zznObz2ta/l61//Ok996lP1GOyzf/JP/smmNgB/9Vd/xZOf/GRAfw8uhfX1daztPz0553r/Dq3H4NLaq/39vOc9j6WlJb785S/3pvnzP/9zlpaWJjsm43/P+HAo/x36Ix/5iHzrW9+SW265RWZnZ+Xv/u7vDnrVpt7P/dzPycLCgvzpn/6pPPLII73b+vp6b5pf+7Vfk4WFBfnUpz4l9913n/zkT/7k0H+Ju/baa+Vzn/ucfPWrX5V/9s/+mf4L4i5U/6tIRI/Bfvvyl78sSZLIe9/7Xvnrv/5r+S//5b/IzMyMfOITn+hNo8dgf73+9a+XJz3pSb1/h/7Upz4lJ0+elHe96129afQY7K2VlRX52te+Jl/72tcEkPe///3yta99rddqZK/294/8yI/Is571LPniF78oX/ziF+WZz3zm5f/v0CIi/+k//Sd58pOfLLVaTb7/+7+/9++6aneAobePfvSjvWlCCPJLv/RLcvr0aanX6/KDP/iDct999/XNp9VqyVve8hY5fvy4NJtNecUrXiEPPPDAJd6ay8dg4aLHYP/94R/+oZw9e1bq9bo8/elPl9/6rd/qe16Pwf5aXl6Wt73tbXL99ddLo9GQpz71qfKe97xHOp1Obxo9BnvrT/7kT4a+/7/+9a8Xkb3b30888YS89rWvlfn5eZmfn5fXvva1sri4ONG6GhGRHVw5UkoppZS65KbqOy5KKaWUurJp4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoa/38EkGeSCdJLSQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# call fixed temporal embedding with the vector of 'times'\n", + "plt.imshow(emb(ref_times).numpy(), aspect='auto')" + ] + }, + { + "cell_type": "markdown", + "id": "a972707a-51a7-45ff-987e-80ee0dea4752", + "metadata": {}, + "source": [ + "### Rotary Positional Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5d01ca30-c642-4a50-bd5b-802711c4bb16", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import torchtune.modules as tune\n", + "from dreem.models.transformer import TransformerEncoderLayer\n", + "from dreem.models import VisualEncoder\n", + "from dreem.models import GlobalTrackingTransformer" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "87bebe90-d8e7-40bf-8783-ee5c57944632", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "feat_dim = 1024\n", + "xfmr_encoder = TransformerEncoderLayer(d_model=feat_dim, nhead=8)\n", + "visual_encoder = VisualEncoder(d_model=feat_dim, model_name=\"resnet18\")\n", + "rope = tune.RotaryPositionalEmbeddings(16,32,10000)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "7999fcef-953b-42cf-927c-f3b617f68157", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def extract_features(\n", + " instances: list[\"Instance\"], \n", + " visual_encoder: \"dreem.models.VisualEncoder\",\n", + " force_recompute: bool = False\n", + " ) -> None:\n", + " \"\"\"Extract features from instances using visual encoder backbone.\n", + "\n", + " Args:\n", + " instances: A list of instances to compute features for\n", + " VisualEncoder : pass an instance of a visual encoder\n", + " force_recompute: indicate whether to compute features for all instances regardless of if they have instances\n", + " \"\"\"\n", + " if not force_recompute:\n", + " instances_to_compute = [\n", + " instance\n", + " for instance in instances\n", + " if instance.has_crop() and not instance.has_features()\n", + " ]\n", + " else:\n", + " instances_to_compute = instances\n", + "\n", + " if len(instances_to_compute) == 0:\n", + " return\n", + " elif len(instances_to_compute) == 1: # handle batch norm error when B=1\n", + " instances_to_compute = instances\n", + "\n", + " crops = torch.concatenate([instance.crop for instance in instances_to_compute])\n", + "\n", + " features = visual_encoder(crops)\n", + "\n", + " for i, z_i in enumerate(features):\n", + " instances_to_compute[i].features = z_i" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", + "metadata": {}, + "outputs": [], + "source": [ + "# pass instances through visual encoder to get the feature vector (q,k,v)\n", + "x = extract_features()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "dreem", + "language": "python", + "name": "dreem" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 6f2c7adea523335e8ef3f4c1a0085d01bb101214 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 31 Jul 2024 15:51:21 -0700 Subject: [PATCH 02/63] test update of notebook --- rope.ipynb | 180 +++++++++++------------------------------------------ 1 file changed, 36 insertions(+), 144 deletions(-) diff --git a/rope.ipynb b/rope.ipynb index 2652e38..310fa2c 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -12,7 +12,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "/opt/conda/envs/dreem/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "/opt/miniconda3/envs/dreem/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] } @@ -33,7 +33,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", "metadata": { "tags": [] @@ -289,129 +289,7 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "525188c5-1317-4003-90d1-bb1c4b9e9112", - "metadata": { - "jupyter": { - "source_hidden": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "def _learned_pos_embedding(self, boxes: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute learned positional embeddings for boxes using given parameters.\n", - "\n", - " Args:\n", - " boxes: the input boxes of shape N x 4 or B x N x 4\n", - " where the last dimension is the bbox coords in [y1, x1, y2, x2].\n", - " (Note currently `B=batch_size=1`).\n", - "\n", - " Returns:\n", - " torch.Tensor, the learned positional embeddings.\n", - " \"\"\"\n", - " pos_lookup = self.lookup\n", - "\n", - " N, n_anchors, _ = boxes.shape\n", - " boxes = boxes.view(N, n_anchors, 4)\n", - "\n", - " if self.over_boxes:\n", - " xywh = boxes\n", - " else:\n", - " xywh = torch.cat(\n", - " [\n", - " (boxes[:, :, 2:] + boxes[:, :, :2]) / 2,\n", - " (boxes[:, :, 2:] - boxes[:, :, :2]),\n", - " ],\n", - " dim=1,\n", - " )\n", - "\n", - " left_ind, right_ind, left_weight, right_weight = self._compute_weights(xywh)\n", - " f = pos_lookup.weight.shape[1] # self.features // 4\n", - "\n", - " try:\n", - " pos_emb_table = pos_lookup.weight.view(\n", - " self.emb_num, n_anchors, 4, f\n", - " ) # T x 4 x (D * 4)\n", - " except RuntimeError as e:\n", - " logger.exception(\n", - " f\"Hint: `n_points` ({self.n_points}) may be set incorrectly!\"\n", - " )\n", - " logger.exception(e)\n", - " raise (e)\n", - "\n", - " left_emb = pos_emb_table.gather(\n", - " 0,\n", - " left_ind[:, :, :, None].to(pos_emb_table.device).expand(N, n_anchors, 4, f),\n", - " ) # N x 4 x d\n", - " right_emb = pos_emb_table.gather(\n", - " 0,\n", - " right_ind[:, :, :, None]\n", - " .to(pos_emb_table.device)\n", - " .expand(N, n_anchors, 4, f),\n", - " ) # N x 4 x d\n", - " pos_emb = left_weight[:, :, :, None] * right_emb.to(\n", - " left_weight.device\n", - " ) + right_weight[:, :, :, None] * left_emb.to(right_weight.device)\n", - "\n", - " pos_emb = pos_emb.flatten(1)\n", - " pos_emb = self.mlp(pos_emb)\n", - "\n", - " return pos_emb.view(N, self.features)\n", - "\n", - "\n", - "def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute learned temporal embeddings for times using given parameters.\n", - "\n", - " Args:\n", - " times: the input times of shape (N,) or (N,1) where N = (sum(instances_per_frame))\n", - " which is the frame index of the instance relative\n", - " to the batch size\n", - " (e.g. `torch.tensor([0, 0, ..., 0, 1, 1, ..., 1, 2, 2, ..., 2,..., B, B, ...B])`).\n", - "\n", - " Returns:\n", - " torch.Tensor, the learned temporal embeddings.\n", - " \"\"\"\n", - " temp_lookup = self.lookup\n", - " N = times.shape[0]\n", - "\n", - " left_ind, right_ind, left_weight, right_weight = self._compute_weights(times)\n", - "\n", - " left_emb = temp_lookup.weight[\n", - " left_ind.to(temp_lookup.weight.device)\n", - " ] # T x D --> N x D\n", - " right_emb = temp_lookup.weight[right_ind.to(temp_lookup.weight.device)]\n", - "\n", - " temp_emb = left_weight[:, None] * right_emb.to(\n", - " left_weight.device\n", - " ) + right_weight[:, None] * left_emb.to(right_weight.device)\n", - "\n", - " return temp_emb.view(N, self.features)\n", - "\n", - " def _compute_weights(self, data: torch.Tensor) -> tuple[torch.Tensor, ...]:\n", - " \"\"\"Compute left and right learned embedding weights.\n", - "\n", - " Args:\n", - " data: the input data (e.g boxes or times).\n", - "\n", - " Returns:\n", - " A torch.Tensor for each of the left/right indices and weights, respectively\n", - " \"\"\"\n", - " data = data * self.emb_num\n", - "\n", - " left_ind = data.clamp(min=0, max=self.emb_num - 1).long() # N x 4\n", - " right_ind = (left_ind + 1).clamp(min=0, max=self.emb_num - 1).long() # N x 4\n", - "\n", - " left_weight = data - left_ind.float() # N x 4\n", - "\n", - " right_weight = 1.0 - left_weight\n", - "\n", - " return left_ind, right_ind, left_weight, right_weight" - ] - }, - { - "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", "metadata": { "tags": [] @@ -424,7 +302,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", "metadata": { "tags": [] @@ -432,15 +310,15 @@ "outputs": [], "source": [ "# get sample crops from training data to pass through the network\n", - "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", - " \n", + "# train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", + "train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\n", "data = SleapDataset([os.path.join(train_path,\"10-1.slp\")], [os.path.join(train_path,\"10-1.mp4\")], crop_size=64,\n", " mode=\"train\", clip_length=32, anchors=\"centroid\")" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", "metadata": { "tags": [] @@ -456,7 +334,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 7, "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", "metadata": { "tags": [] @@ -470,7 +348,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", "metadata": { "tags": [] @@ -479,10 +357,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 7, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" }, @@ -512,7 +390,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "5d01ca30-c642-4a50-bd5b-802711c4bb16", "metadata": { "tags": [] @@ -527,11 +405,9 @@ }, { "cell_type": "code", - "execution_count": 27, - "id": "87bebe90-d8e7-40bf-8783-ee5c57944632", - "metadata": { - "tags": [] - }, + "execution_count": 11, + "id": "8b17fdb7", + "metadata": {}, "outputs": [], "source": [ "feat_dim = 1024\n", @@ -542,7 +418,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 12, "id": "7999fcef-953b-42cf-927c-f3b617f68157", "metadata": { "tags": [] @@ -585,13 +461,29 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'Tensor' object has no attribute 'has_crop'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[14], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# pass instances through visual encoder to get the feature vector (q,k,v)\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[43mextract_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43mref_times\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvisual_encoder\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# pass through fixed positional embedding (just to test output)\u001b[39;00m\n", + "Cell \u001b[0;32mIn[12], line 14\u001b[0m, in \u001b[0;36mextract_features\u001b[0;34m(instances, visual_encoder, force_recompute)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Extract features from instances using visual encoder backbone.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \n\u001b[1;32m 8\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124;03m force_recompute: indicate whether to compute features for all instances regardless of if they have instances\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m force_recompute:\n\u001b[0;32m---> 14\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m \u001b[43m[\u001b[49m\n\u001b[1;32m 15\u001b[0m \u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\n\u001b[1;32m 16\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstances\u001b[49m\n\u001b[1;32m 17\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_crop\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mand\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 18\u001b[0m \u001b[43m \u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 20\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m instances\n", + "Cell \u001b[0;32mIn[12], line 17\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Extract features from instances using visual encoder backbone.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \n\u001b[1;32m 8\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124;03m force_recompute: indicate whether to compute features for all instances regardless of if they have instances\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m force_recompute:\n\u001b[1;32m 14\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 15\u001b[0m instance\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m instance \u001b[38;5;129;01min\u001b[39;00m instances\n\u001b[0;32m---> 17\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_crop\u001b[49m() \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m instance\u001b[38;5;241m.\u001b[39mhas_features()\n\u001b[1;32m 18\u001b[0m ]\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 20\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m instances\n", + "\u001b[0;31mAttributeError\u001b[0m: 'Tensor' object has no attribute 'has_crop'" + ] + } + ], "source": [ "# pass instances through visual encoder to get the feature vector (q,k,v)\n", - "x = extract_features()" + "x = extract_features(ref_instances, visual_encoder)\n", + "\n", + "# pass through fixed positional embedding (just to test output)" ] } ], @@ -599,7 +491,7 @@ "kernelspec": { "display_name": "dreem", "language": "python", - "name": "dreem" + "name": "python3" }, "language_info": { "codemirror_mode": { From b82c4d445f3755f0c5af31e55c2b98bceacd28a0 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Thu, 1 Aug 2024 20:07:24 -0700 Subject: [PATCH 03/63] implement rope embedding - changes to embedding class - add apply() function to Embedding class - remove references to embedding from encoderlayer fwd pass --- dreem/models/embedding.py | 173 +++++++++++++++++++++++++++++- dreem/models/transformer.py | 14 ++- rope.ipynb | 207 +++++++++++++++++++++++++----------- 3 files changed, 320 insertions(+), 74 deletions(-) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 8a959c9..c5b9ce3 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -3,15 +3,127 @@ import math import torch import logging +from torch import nn, Tensor +from typing import Optional from dreem.models.mlp import MLP logger = logging.getLogger("dreem.models") # todo: add named tensors, clean variable names -class Embedding(torch.nn.Module): - """Class that wraps around different embedding types. +class RotaryPositionalEmbeddings(nn.Module): + """ + This class implements Rotary Positional Embeddings (RoPE) + proposed in https://arxiv.org/abs/2104.09864. + + Reference implementation (used for correctness verfication) + can be found here: + https://github.com/meta-llama/llama/blob/main/llama/model.py#L80 + + In this implementation we cache the embeddings for each position upto + ``max_seq_len`` by computing this during init. + + Args: + dim (int): Embedding dimension. This is usually set to the dim of each + head in the attention module computed as ````embed_dim`` // ``num_heads```` + max_seq_len (int): Maximum expected sequence length for the + model, if exceeded the cached freqs will be recomputed + base (int): The base for the geometric progression used to compute + the rotation angles + """ + + def __init__( + self, + dim: int, + max_seq_len: int = 4096, + base: int = 10_000, + ) -> None: + super().__init__() + self.dim = dim + self.base = base + self.max_seq_len = max_seq_len + self._rope_init() + + # We need to explicitly define reset_parameters for FSDP initialization, see + # https://github.com/pytorch/pytorch/blob/797d4fbdf423dd9320ebe383fb57ffb1135c4a99/torch/distributed/fsdp/_init_utils.py#L885 + def reset_parameters(self): + self._rope_init() + + def _rope_init(self): + theta = 1.0 / ( + self.base + ** (torch.arange(0, self.dim, 2)[: (self.dim // 2)].float() / self.dim) + ) + self.register_buffer("theta", theta, persistent=False) + self.build_rope_cache(self.max_seq_len) + + def build_rope_cache(self, max_seq_len: int = 4096) -> None: + # Create position indexes `[0, 1, ..., max_seq_len - 1]` + seq_idx = torch.arange( + max_seq_len, dtype=self.theta.dtype, device=self.theta.device + ) + + # Outer product of theta and position index; output tensor has + # a shape of [max_seq_len, dim // 2] + idx_theta = torch.einsum("i, j -> ij", seq_idx, self.theta).float() + + # cache includes both the cos and sin components and so the output shape is + # [max_seq_len, dim // 2, 2] + cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) + self.register_buffer("cache", cache, persistent=False) + + def forward(self, x: Tensor, *, input_pos: Optional[Tensor] = None) -> Tensor: + """ + Args: + x (Tensor): input tensor with shape + [b, s, n_h, h_d] + input_pos (Optional[Tensor]): Optional tensor which contains the position ids + of each token. During training, this is used to indicate the positions + of each token relative to its sample when packed, shape [b, s]. + During inference, this indicates the position of the current token. + If none, assume the index of the token is its position id. Default is None. + Returns: + Tensor: output tensor with RoPE applied + + Notation used for tensor shapes: + - b: batch size + - s: sequence length + - n_h: num heads + - h_d: head dim + + TODO: The implementation below can be made more efficient + for inference. + """ + # input tensor has shape [b, s, n_h, h_d] + seq_len = x.size(1) + + # extract the values based on whether input_pos is set or not + rope_cache = ( + self.cache[:seq_len] if input_pos is None else self.cache[input_pos] + ) + + # reshape input; the last dimension is used for computing the output. + # Cast to float to match the reference implementation + # tensor has shape [b, s, n_h, h_d // 2, 2] + xshaped = x.float().reshape(*x.shape[:-1], -1, 2) + + # reshape the cache for broadcasting + # tensor has shape [b, s, 1, h_d // 2, 2] if packed samples, + # otherwise has shape [1, s, 1, h_d // 2, 2] + rope_cache = rope_cache.view(-1, xshaped.size(1), 1, xshaped.size(3), 2) + + return rope_cache + + + + + + + +class Embedding(torch.nn.Module): + """Class that wraps around different embedding types. + Creates embedding array and transforms the input data Used for both learned and fixed embeddings. """ @@ -112,6 +224,10 @@ def __init__( self._emb_func = self._sine_box_embedding elif self.emb_type == "temp": self._emb_func = self._sine_temp_embedding + + elif self.mode == "rope": + self._emb_func = self._rope_embedding + def _check_init_args(self, emb_type: str, mode: str): """Check whether the correct arguments were passed to initialization. @@ -136,7 +252,40 @@ def _check_init_args(self, emb_type: str, mode: str): f"Embedding `mode` must be one of {self.EMB_MODES} not {mode}" ) - def forward(self, seq_positions: torch.Tensor) -> torch.Tensor: + + def _transform(self, x, emb): + + if emb==self._rope_embedding: + return self._apply_rope(x, emb) + else: + return self._apply_additive_embeddings(x, emb) + + + def _apply_rope(self, x, emb): + + + # tensor has shape [b, s, n_h, h_d // 2, 2] + x_out = torch.stack( + [ + x[..., 0] * emb[..., 0] + - x[..., 1] * emb[..., 1], + x[..., 1] * emb[..., 0] + + x[..., 0] * emb[..., 1], + ], + -1, + ) + # tensor has shape [b, s, n_h, h_d] + x_out = x_out.flatten(3) + + return x_out + + + def _apply_additive_embeddings(self, x, emb): + + return x + emb + + + def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: """Get the sequence positional embeddings. Args: @@ -147,7 +296,11 @@ def forward(self, seq_positions: torch.Tensor) -> torch.Tensor: Returns: An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. """ + # create embedding array (_emb_func selects appropriate callback based on config input) emb = self._emb_func(seq_positions) + + # transform the input data with the embedding + x = self._transform(emb, x) if emb.shape[-1] != self.features: raise RuntimeError( @@ -156,7 +309,7 @@ def forward(self, seq_positions: torch.Tensor) -> torch.Tensor: f"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions." ) ) - return emb + return x, emb def _torch_int_div( self, tensor1: torch.Tensor, tensor2: torch.Tensor @@ -172,6 +325,18 @@ def _torch_int_div( """ return torch.div(tensor1, tensor2, rounding_mode="floor") + + def _rope_embedding(self, x: torch.Tensor, emb_ids: torch.Tensor) -> torch.Tensor: + + # input must be of shape (num_batches, num_instances, num_attn_heads, d_model) + # use num_heads=1 for compatibility with torch ROPE + x_rope = torch.unsqueeze(x, 2) + rope = RotaryPositionalEmbeddings(self.features) + rot_mat = rope(x_rope) + + return rot_mat + + def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor: """Compute sine positional embeddings for boxes using given parameters. diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 4be6db6..13e4529 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -19,6 +19,7 @@ import copy import torch import torch.nn.functional as F +from typing import List # todo: add named tensors # todo: add flash attention @@ -298,21 +299,22 @@ def __init__( self.activation = _get_activation_fn(activation) def forward( - self, queries: torch.Tensor, pos_emb: torch.Tensor = None + self, queries: torch.Tensor, embeddings : List[Embedding] ) -> torch.Tensor: """Execute a forward pass of the encoder layer. Args: - queries: Input sequence for encoder (n_query, batch_size, embed_dim). + queries: Input sequence for encoder (n_query, batch_size, embed_dim); transformed with embedding pos_emb: Position embedding, if provided is added to src Returns: The output tensor of shape (n_query, batch_size, embed_dim). """ - if pos_emb is None: - pos_emb = torch.zeros_like(queries) + # TODO: delete this section; keep to check that pos_emb None is taken care of automatically by config +# if pos_emb is None: +# pos_emb = torch.zeros_like(queries) - queries = queries + pos_emb +# queries = queries + pos_emb # q = k = src @@ -471,6 +473,8 @@ def forward( The output tensor of shape (n_query, batch_size, embed_dim). """ for layer in self.layers: + # TODO: add embedding object call + # TODO: add the embedding object into the argument list to the forward() call queries = layer(queries, pos_emb=pos_emb) encoder_features = self.norm(queries) diff --git a/rope.ipynb b/rope.ipynb index 310fa2c..ee920e4 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -2,38 +2,32 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 35, "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/miniconda3/envs/dreem/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], + "outputs": [], "source": [ "import numpy as np\n", "import dreem\n", "import os\n", "import matplotlib.pyplot as plt\n", - "\n", "import math\n", "import torch\n", "import logging\n", "from dreem.models.mlp import MLP\n", "from dreem.models.model_utils import *\n", - "from dreem.datasets import SleapDataset" + "from dreem.datasets import SleapDataset\n", + "import torchtune.modules as tune\n", + "from dreem.models.transformer import TransformerEncoderLayer\n", + "from dreem.models import VisualEncoder\n", + "from dreem.models import GlobalTrackingTransformer" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 36, "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", "metadata": { "tags": [] @@ -289,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 62, "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", "metadata": { "tags": [] @@ -297,12 +291,13 @@ "outputs": [], "source": [ "# create Embedding object\n", - "emb = Embedding(emb_type=\"temp\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)" + "emb_t = Embedding(emb_type=\"temp\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)\n", + "emb_p = Embedding(emb_type=\"pos\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 39, "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", "metadata": { "tags": [] @@ -310,15 +305,15 @@ "outputs": [], "source": [ "# get sample crops from training data to pass through the network\n", - "# train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", - "train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\n", + "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", + "# train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\n", "data = SleapDataset([os.path.join(train_path,\"10-1.slp\")], [os.path.join(train_path,\"10-1.mp4\")], crop_size=64,\n", " mode=\"train\", clip_length=32, anchors=\"centroid\")" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 40, "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", "metadata": { "tags": [] @@ -334,7 +329,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 41, "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", "metadata": { "tags": [] @@ -351,6 +346,10 @@ "execution_count": 8, "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + }, "tags": [] }, "outputs": [ @@ -380,45 +379,23 @@ "plt.imshow(emb(ref_times).numpy(), aspect='auto')" ] }, - { - "cell_type": "markdown", - "id": "a972707a-51a7-45ff-987e-80ee0dea4752", - "metadata": {}, - "source": [ - "### Rotary Positional Embeddings" - ] - }, { "cell_type": "code", - "execution_count": 10, - "id": "5d01ca30-c642-4a50-bd5b-802711c4bb16", + "execution_count": 43, + "id": "8b17fdb7", "metadata": { "tags": [] }, "outputs": [], - "source": [ - "import torchtune.modules as tune\n", - "from dreem.models.transformer import TransformerEncoderLayer\n", - "from dreem.models import VisualEncoder\n", - "from dreem.models import GlobalTrackingTransformer" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "8b17fdb7", - "metadata": {}, - "outputs": [], "source": [ "feat_dim = 1024\n", "xfmr_encoder = TransformerEncoderLayer(d_model=feat_dim, nhead=8)\n", - "visual_encoder = VisualEncoder(d_model=feat_dim, model_name=\"resnet18\")\n", - "rope = tune.RotaryPositionalEmbeddings(16,32,10000)" + "visual_encoder = VisualEncoder(d_model=feat_dim, model_name=\"resnet18\")" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 44, "id": "7999fcef-953b-42cf-927c-f3b617f68157", "metadata": { "tags": [] @@ -461,29 +438,129 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 92, + "id": "e299e8a0-61eb-4eee-901c-49aa7e678b3b", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# partial forward pass of the transformer - up until the encoder\n", + "\n", + "def prepare_for_xfmr(ref_instances):\n", + " # extract visual encoder features from instance object; shape=(1,n_instances,d=1024)\n", + " ref_features = torch.cat(\n", + " [instance.features for instance in ref_instances], dim=0\n", + " ).unsqueeze(0)\n", + "\n", + " # window_length = len(frames)\n", + " # instances_per_frame = [frame.num_detected for frame in frames]\n", + " total_instances = len(ref_instances)\n", + " embed_dim = ref_features.shape[-1]\n", + " # print(f'T: {window_length}; N: {total_instances}; N_t: {instances_per_frame} n_reid: {reid_features.shape}')\n", + " ref_boxes = get_boxes(ref_instances) # (n_instances,1,4)\n", + " ref_boxes = torch.nan_to_num(ref_boxes, -1.0)\n", + " ref_times, query_times = get_times(ref_instances, query_instances=None)\n", + "\n", + " # clip length \n", + " window_length = len(ref_times.unique())\n", + "\n", + " # computes the temporal embedding vector for each instance\n", + " ref_temp_emb = emb_t(ref_times)\n", + " # computes the positional embedding vector for each instance\n", + " ref_pos_emb = emb_p(ref_boxes)\n", + "\n", + " return_embedding=False\n", + " if return_embedding:\n", + " for i, instance in enumerate(ref_instances):\n", + " instance.add_embedding(\"pos\", ref_pos_emb[i])\n", + " instance.add_embedding(\"temp\", ref_temp_emb[i])\n", + "\n", + " # we need a single vector so average the temporal and spatial embeddings\n", + " ref_emb = (ref_pos_emb + ref_temp_emb) / 2.0\n", + "\n", + " # add a new dim at the beginning to represent the batch size (in our case 1)\n", + " ref_emb = ref_emb.view(1, total_instances, embed_dim)\n", + "\n", + " ref_emb = ref_emb.permute(1, 0, 2) # (total_instances, batch_size, embed_dim)\n", + "\n", + " batch_size, total_instances, embed_dim = ref_features.shape\n", + "\n", + " ref_features = ref_features.permute(\n", + " 1, 0, 2\n", + " ) # (total_instances, batch_size, embed_dim); note batch_size = 1\n", + "\n", + " return ref_features" + ] + }, + { + "cell_type": "code", + "execution_count": 45, "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", - "metadata": {}, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# pass instances through visual encoder to get the feature vector (q,k,v)\n", + "extract_features(ref_instances, visual_encoder)" + ] + }, + { + "cell_type": "code", + "execution_count": 123, + "id": "f0823cf1-2a35-4920-a62e-896bd9dbb078", + "metadata": { + "tags": [] + }, "outputs": [ { - "ename": "AttributeError", - "evalue": "'Tensor' object has no attribute 'has_crop'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[14], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# pass instances through visual encoder to get the feature vector (q,k,v)\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[43mextract_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43mref_times\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvisual_encoder\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# pass through fixed positional embedding (just to test output)\u001b[39;00m\n", - "Cell \u001b[0;32mIn[12], line 14\u001b[0m, in \u001b[0;36mextract_features\u001b[0;34m(instances, visual_encoder, force_recompute)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Extract features from instances using visual encoder backbone.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \n\u001b[1;32m 8\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124;03m force_recompute: indicate whether to compute features for all instances regardless of if they have instances\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m force_recompute:\n\u001b[0;32m---> 14\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m \u001b[43m[\u001b[49m\n\u001b[1;32m 15\u001b[0m \u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\n\u001b[1;32m 16\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstances\u001b[49m\n\u001b[1;32m 17\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_crop\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mand\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 18\u001b[0m \u001b[43m \u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 20\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m instances\n", - "Cell \u001b[0;32mIn[12], line 17\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Extract features from instances using visual encoder backbone.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \n\u001b[1;32m 8\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124;03m force_recompute: indicate whether to compute features for all instances regardless of if they have instances\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m force_recompute:\n\u001b[1;32m 14\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 15\u001b[0m instance\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m instance \u001b[38;5;129;01min\u001b[39;00m instances\n\u001b[0;32m---> 17\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_crop\u001b[49m() \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m instance\u001b[38;5;241m.\u001b[39mhas_features()\n\u001b[1;32m 18\u001b[0m ]\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 20\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m instances\n", - "\u001b[0;31mAttributeError\u001b[0m: 'Tensor' object has no attribute 'has_crop'" - ] + "data": { + "text/plain": [ + "torch.Size([1, 491, 1, 1024])" + ] + }, + "execution_count": 123, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "# pass instances through visual encoder to get the feature vector (q,k,v)\n", - "x = extract_features(ref_instances, visual_encoder)\n", - "\n", - "# pass through fixed positional embedding (just to test output)" + "# prepare data and apply rope\n", + "rope = tune.RotaryPositionalEmbeddings(feat_dim)\n", + "\n", + "ref_features = torch.cat(\n", + " [instance.features for instance in ref_instances], dim=0\n", + " ).unsqueeze(0)\n", + "\n", + "# input must be of shape (num_batches, num_instances, num_attn_heads, d_model)\n", + "# use num_heads=1 to use torch ROPE; we pass this into torch multiheadattn later which doesn't \n", + "# use num_heads in the input data\n", + "ref_features = torch.unsqueeze(ref_features, 2)\n", + "rope_ref_feat = rope(ref_features)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 93, + "id": "48894fba-2ffc-4f5a-aceb-26b711b7b51f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "encoder_queries = prepare_for_xfmr(ref_instances)\n", + "encoder_features = xfmr_encoder(\n", + " encoder_queries, pos_emb=ref_emb\n", + ") # (total_instances, batch_size, embed_dim)" + ] + }, + { + "cell_type": "markdown", + "id": "a972707a-51a7-45ff-987e-80ee0dea4752", + "metadata": {}, + "source": [ + "### Rotary Positional Embeddings" ] } ], @@ -491,7 +568,7 @@ "kernelspec": { "display_name": "dreem", "language": "python", - "name": "python3" + "name": "dreem" }, "language_info": { "codemirror_mode": { From a07ea57d8e395a30532f43857296c512e0adc619 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Sun, 4 Aug 2024 22:22:11 -0700 Subject: [PATCH 04/63] minor changes - add batch job file to repo --- dreem/models/embedding.py | 7 +------ rope.ipynb | 18 ++++++++-------- run_batch_job.py | 44 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 15 deletions(-) create mode 100644 run_batch_job.py diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index c5b9ce3..23190ad 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -92,8 +92,6 @@ def forward(self, x: Tensor, *, input_pos: Optional[Tensor] = None) -> Tensor: - n_h: num heads - h_d: head dim - TODO: The implementation below can be made more efficient - for inference. """ # input tensor has shape [b, s, n_h, h_d] seq_len = x.size(1) @@ -116,10 +114,7 @@ def forward(self, x: Tensor, *, input_pos: Optional[Tensor] = None) -> Tensor: return rope_cache - - - - + class Embedding(torch.nn.Module): """Class that wraps around different embedding types. diff --git a/rope.ipynb b/rope.ipynb index ee920e4..d5a3120 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -502,10 +502,18 @@ }, "outputs": [], "source": [ - "# pass instances through visual encoder to get the feature vector (q,k,v)\n", + "# pass instances through visual encoder to get the feature vector (q,k,v); modifies the feature attribute of each Instance in ref_instances\n", "extract_features(ref_instances, visual_encoder)" ] }, + { + "cell_type": "markdown", + "id": "a972707a-51a7-45ff-987e-80ee0dea4752", + "metadata": {}, + "source": [ + "### Rotary Positional Embeddings" + ] + }, { "cell_type": "code", "execution_count": 123, @@ -554,14 +562,6 @@ " encoder_queries, pos_emb=ref_emb\n", ") # (total_instances, batch_size, embed_dim)" ] - }, - { - "cell_type": "markdown", - "id": "a972707a-51a7-45ff-987e-80ee0dea4752", - "metadata": {}, - "source": [ - "### Rotary Positional Embeddings" - ] } ], "metadata": { diff --git a/run_batch_job.py b/run_batch_job.py new file mode 100644 index 0000000..52345da --- /dev/null +++ b/run_batch_job.py @@ -0,0 +1,44 @@ +import os +import subprocess as sp + +gpu = "0.1" +job_name = "mustafa-test-batch-job" + +base = "/home/runner/talmodata-smb/aadi/biogtr_expts/run/animal/eight_flies" #where to run the job from +dreem_repo = base.replace("biogtr_expts/run/animal/eight_flies", "dreem") #where the dreem repo is stored + +config_dir=os.path.join(base, "configs") #where to find the configs +config_name= "base" #base config name +params_cfg = os.path.join(config_dir, "sample_efficiency.yaml") #override config +# if running just 1 job, comment this line out and delete the ++batch_config command in the command below +task_csv = os.path.join(config_dir, "sample_efficiency.csv") # csv for tasks - each pod is a task + +pods = 1 # total number of tasks for job to run; should be number of rows in csv file +par = 1 # number of tasks that can be run in parallel - max. = # of pods + +cmd = [ + "runai", + "submit", + "--gpu", + gpu, + "--name", + job_name, + "--preemptible", + "-i", + "asheridan/biogtr", + "-v", + "/data/talmolab-smb:/home/runner/talmodata-smb", + "-e", + f"RUNNER_CMD=cp -r {dreem_repo} ~ && mamba env create -n dreem -f ~/dreem/environment.yml && export WANDB_API_KEY=6cc5012a6ecfb9cd970bd07686dbfcefd3190a04 && cd {base} && conda run -n dreem dreem-train --config-dir={config_dir} --config-name={config_name} ++params_config={params_cfg} ++batch_config={task_csv}", + "--parallelism", + str(par), + "--completions", + str(pods), +] + +print(f"base directory: {base}") +print(f"running with {pods} pods") +print(f"max pods that can run concurrently: {par}") +print(f"runner command: {cmd}") + +sp.run(cmd) \ No newline at end of file From 6d135fbce990a0b7e8eeeed3f5bb47b39e84e148 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 5 Aug 2024 14:34:35 -0700 Subject: [PATCH 05/63] add local train run script, minor changes --- dreem/training/configs/base.yaml | 16 +++--- rope.ipynb | 97 ++++++++++++++++---------------- run_trainer.py | 10 ++++ 3 files changed, 68 insertions(+), 55 deletions(-) create mode 100644 run_trainer.py diff --git a/dreem/training/configs/base.yaml b/dreem/training/configs/base.yaml index 7779cd1..f9ed413 100644 --- a/dreem/training/configs/base.yaml +++ b/dreem/training/configs/base.yaml @@ -66,24 +66,24 @@ runner: dataset: train_dataset: - slp_files: ["../../tests/data/sleap/two_flies.slp"] - video_files: ["../../tests/data/sleap/two_flies.mp4"] + slp_files: ["tests/data/sleap/two_flies.slp"] + video_files: ["tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: true clip_length: 32 val_dataset: - slp_files: ["../../tests/data/sleap/two_flies.slp"] - video_files: ["../../tests/data/sleap/two_flies.mp4"] + slp_files: ["tests/data/sleap/two_flies.slp"] + video_files: ["tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: True clip_length: 32 test_dataset: - slp_files: ["../../tests/data/sleap/two_flies.slp"] - video_files: ["../../tests/data/sleap/two_flies.mp4"] + slp_files: ["tests/data/sleap/two_flies.slp"] + video_files: ["tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: True @@ -137,8 +137,8 @@ trainer: limit_test_batches: 1.0 limit_val_batches: 1.0 log_every_n_steps: 1 - max_epochs: 100 - min_epochs: 10 + max_epochs: 1 + min_epochs: 1 view_batch: enable: False diff --git a/rope.ipynb b/rope.ipynb index d5a3120..8213213 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 35, + "execution_count": 4, "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", "metadata": { "tags": [] @@ -19,15 +19,14 @@ "from dreem.models.mlp import MLP\n", "from dreem.models.model_utils import *\n", "from dreem.datasets import SleapDataset\n", - "import torchtune.modules as tune\n", - "from dreem.models.transformer import TransformerEncoderLayer\n", + "from dreem.models.transformer import *\n", "from dreem.models import VisualEncoder\n", "from dreem.models import GlobalTrackingTransformer" ] }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 5, "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", "metadata": { "tags": [] @@ -283,7 +282,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 6, "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", "metadata": { "tags": [] @@ -297,12 +296,35 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 7, "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", "metadata": { "tags": [] }, - "outputs": [], + "outputs": [ + { + "ename": "FileNotFoundError", + "evalue": "[Errno 2] Unable to open file (unable to open file: name = '/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train/10-1.slp', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[7], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m train_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\u001b[39;00m\n\u001b[0;32m----> 4\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[43mSleapDataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_path\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m10-1.slp\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_path\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m10-1.mp4\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcrop_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m64\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclip_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m32\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43manchors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcentroid\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/Documents/GitHub/dreem/dreem/datasets/sleap_dataset.py:108\u001b[0m, in \u001b[0;36mSleapDataset.__init__\u001b[0;34m(self, slp_files, video_files, padding, crop_size, anchors, chunk, clip_length, mode, handle_missing, augmentations, n_chunks, seed, verbose)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m=\u001b[39m verbose\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# if self.seed is not None:\u001b[39;00m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;66;03m# np.random.seed(self.seed)\u001b[39;00m\n\u001b[0;32m--> 108\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabels \u001b[38;5;241m=\u001b[39m \u001b[43m[\u001b[49m\u001b[43msio\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_slp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mslp_files\u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvideos \u001b[38;5;241m=\u001b[39m [imageio\u001b[38;5;241m.\u001b[39mget_reader(vid_file) \u001b[38;5;28;01mfor\u001b[39;00m vid_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvid_files]\n\u001b[1;32m 110\u001b[0m \u001b[38;5;66;03m# do we need this? would need to update with sleap-io\u001b[39;00m\n\u001b[1;32m 111\u001b[0m \n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# for label in self.labels:\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;66;03m# label.remove_empty_instances(keep_empty_frames=False)\u001b[39;00m\n", + "File \u001b[0;32m~/Documents/GitHub/dreem/dreem/datasets/sleap_dataset.py:108\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m=\u001b[39m verbose\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# if self.seed is not None:\u001b[39;00m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;66;03m# np.random.seed(self.seed)\u001b[39;00m\n\u001b[0;32m--> 108\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabels \u001b[38;5;241m=\u001b[39m [\u001b[43msio\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_slp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m slp_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mslp_files]\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvideos \u001b[38;5;241m=\u001b[39m [imageio\u001b[38;5;241m.\u001b[39mget_reader(vid_file) \u001b[38;5;28;01mfor\u001b[39;00m vid_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvid_files]\n\u001b[1;32m 110\u001b[0m \u001b[38;5;66;03m# do we need this? would need to update with sleap-io\u001b[39;00m\n\u001b[1;32m 111\u001b[0m \n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# for label in self.labels:\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;66;03m# label.remove_empty_instances(keep_empty_frames=False)\u001b[39;00m\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/main.py:19\u001b[0m, in \u001b[0;36mload_slp\u001b[0;34m(filename)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mload_slp\u001b[39m(filename: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Labels:\n\u001b[1;32m 11\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Load a SLEAP dataset.\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \n\u001b[1;32m 13\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[38;5;124;03m The dataset as a `Labels` object.\u001b[39;00m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 19\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mslp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread_labels\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/slp.py:1011\u001b[0m, in \u001b[0;36mread_labels\u001b[0;34m(labels_path)\u001b[0m\n\u001b[1;32m 1002\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_labels\u001b[39m(labels_path: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Labels:\n\u001b[1;32m 1003\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read a SLEAP labels file.\u001b[39;00m\n\u001b[1;32m 1004\u001b[0m \n\u001b[1;32m 1005\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1009\u001b[0m \u001b[38;5;124;03m The processed `Labels` object.\u001b[39;00m\n\u001b[1;32m 1010\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m-> 1011\u001b[0m tracks \u001b[38;5;241m=\u001b[39m \u001b[43mread_tracks\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabels_path\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1012\u001b[0m videos \u001b[38;5;241m=\u001b[39m read_videos(labels_path)\n\u001b[1;32m 1013\u001b[0m skeletons \u001b[38;5;241m=\u001b[39m read_skeletons(labels_path)\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/slp.py:448\u001b[0m, in \u001b[0;36mread_tracks\u001b[0;34m(labels_path)\u001b[0m\n\u001b[1;32m 439\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_tracks\u001b[39m(labels_path: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mlist\u001b[39m[Track]:\n\u001b[1;32m 440\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read `Track` dataset in a SLEAP labels file.\u001b[39;00m\n\u001b[1;32m 441\u001b[0m \n\u001b[1;32m 442\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 446\u001b[0m \u001b[38;5;124;03m A list of `Track` objects.\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 448\u001b[0m tracks \u001b[38;5;241m=\u001b[39m [json\u001b[38;5;241m.\u001b[39mloads(x) \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m \u001b[43mread_hdf5_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabels_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtracks_json\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m]\n\u001b[1;32m 449\u001b[0m track_objects \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m track \u001b[38;5;129;01min\u001b[39;00m tracks:\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/utils.py:21\u001b[0m, in \u001b[0;36mread_hdf5_dataset\u001b[0;34m(filename, dataset)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_hdf5_dataset\u001b[39m(filename: \u001b[38;5;28mstr\u001b[39m, dataset: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m np\u001b[38;5;241m.\u001b[39mndarray:\n\u001b[1;32m 12\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read data from an HDF5 file.\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \n\u001b[1;32m 14\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;124;03m The data as an array.\u001b[39;00m\n\u001b[1;32m 20\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 21\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[43mh5py\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mFile\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 22\u001b[0m data \u001b[38;5;241m=\u001b[39m f[dataset][()]\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m data\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/h5py/_hl/files.py:562\u001b[0m, in \u001b[0;36mFile.__init__\u001b[0;34m(self, name, mode, driver, libver, userblock_size, swmr, rdcc_nslots, rdcc_nbytes, rdcc_w0, track_order, fs_strategy, fs_persist, fs_threshold, fs_page_size, page_buf_size, min_meta_keep, min_raw_keep, locking, alignment_threshold, alignment_interval, meta_block_size, **kwds)\u001b[0m\n\u001b[1;32m 553\u001b[0m fapl \u001b[38;5;241m=\u001b[39m make_fapl(driver, libver, rdcc_nslots, rdcc_nbytes, rdcc_w0,\n\u001b[1;32m 554\u001b[0m locking, page_buf_size, min_meta_keep, min_raw_keep,\n\u001b[1;32m 555\u001b[0m alignment_threshold\u001b[38;5;241m=\u001b[39malignment_threshold,\n\u001b[1;32m 556\u001b[0m alignment_interval\u001b[38;5;241m=\u001b[39malignment_interval,\n\u001b[1;32m 557\u001b[0m meta_block_size\u001b[38;5;241m=\u001b[39mmeta_block_size,\n\u001b[1;32m 558\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwds)\n\u001b[1;32m 559\u001b[0m fcpl \u001b[38;5;241m=\u001b[39m make_fcpl(track_order\u001b[38;5;241m=\u001b[39mtrack_order, fs_strategy\u001b[38;5;241m=\u001b[39mfs_strategy,\n\u001b[1;32m 560\u001b[0m fs_persist\u001b[38;5;241m=\u001b[39mfs_persist, fs_threshold\u001b[38;5;241m=\u001b[39mfs_threshold,\n\u001b[1;32m 561\u001b[0m fs_page_size\u001b[38;5;241m=\u001b[39mfs_page_size)\n\u001b[0;32m--> 562\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mmake_fid\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43muserblock_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfcpl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mswmr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mswmr\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 564\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(libver, \u001b[38;5;28mtuple\u001b[39m):\n\u001b[1;32m 565\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_libver \u001b[38;5;241m=\u001b[39m libver\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/h5py/_hl/files.py:235\u001b[0m, in \u001b[0;36mmake_fid\u001b[0;34m(name, mode, userblock_size, fapl, fcpl, swmr)\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m swmr \u001b[38;5;129;01mand\u001b[39;00m swmr_support:\n\u001b[1;32m 234\u001b[0m flags \u001b[38;5;241m|\u001b[39m\u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mACC_SWMR_READ\n\u001b[0;32m--> 235\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mh5f\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mopen\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mflags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfapl\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m mode \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mr+\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 237\u001b[0m fid \u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mopen(name, h5f\u001b[38;5;241m.\u001b[39mACC_RDWR, fapl\u001b[38;5;241m=\u001b[39mfapl)\n", + "File \u001b[0;32mh5py/_objects.pyx:54\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32mh5py/_objects.pyx:55\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32mh5py/h5f.pyx:102\u001b[0m, in \u001b[0;36mh5py.h5f.open\u001b[0;34m()\u001b[0m\n", + "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] Unable to open file (unable to open file: name = '/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train/10-1.slp', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)" + ] + } + ], "source": [ "# get sample crops from training data to pass through the network\n", "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", @@ -313,7 +335,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", "metadata": { "tags": [] @@ -329,7 +351,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": null, "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", "metadata": { "tags": [] @@ -343,7 +365,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", "metadata": { "collapsed": true, @@ -381,7 +403,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": null, "id": "8b17fdb7", "metadata": { "tags": [] @@ -395,7 +417,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": null, "id": "7999fcef-953b-42cf-927c-f3b617f68157", "metadata": { "tags": [] @@ -438,7 +460,7 @@ }, { "cell_type": "code", - "execution_count": 92, + "execution_count": null, "id": "e299e8a0-61eb-4eee-901c-49aa7e678b3b", "metadata": { "tags": [] @@ -495,7 +517,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": null, "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", "metadata": { "tags": [] @@ -516,51 +538,32 @@ }, { "cell_type": "code", - "execution_count": 123, + "execution_count": null, "id": "f0823cf1-2a35-4920-a62e-896bd9dbb078", "metadata": { "tags": [] }, "outputs": [ { - "data": { - "text/plain": [ - "torch.Size([1, 491, 1, 1024])" - ] - }, - "execution_count": 123, - "metadata": {}, - "output_type": "execute_result" + "ename": "NameError", + "evalue": "name 'ref_instances' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[3], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# input data for transformer\u001b[39;00m\n\u001b[1;32m 2\u001b[0m ref_features \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mcat(\n\u001b[0;32m----> 3\u001b[0m [instance\u001b[38;5;241m.\u001b[39mfeatures \u001b[38;5;28;01mfor\u001b[39;00m instance \u001b[38;5;129;01min\u001b[39;00m \u001b[43mref_instances\u001b[49m], dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 4\u001b[0m )\u001b[38;5;241m.\u001b[39munsqueeze(\u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# create transformer instance to test embeddings \u001b[39;00m\n\u001b[1;32m 7\u001b[0m tfmr \u001b[38;5;241m=\u001b[39m Transformer()\n", + "\u001b[0;31mNameError\u001b[0m: name 'ref_instances' is not defined" + ] } ], "source": [ - "# prepare data and apply rope\n", - "rope = tune.RotaryPositionalEmbeddings(feat_dim)\n", - "\n", + "# input data for transformer\n", "ref_features = torch.cat(\n", " [instance.features for instance in ref_instances], dim=0\n", " ).unsqueeze(0)\n", "\n", - "# input must be of shape (num_batches, num_instances, num_attn_heads, d_model)\n", - "# use num_heads=1 to use torch ROPE; we pass this into torch multiheadattn later which doesn't \n", - "# use num_heads in the input data\n", - "ref_features = torch.unsqueeze(ref_features, 2)\n", - "rope_ref_feat = rope(ref_features)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 93, - "id": "48894fba-2ffc-4f5a-aceb-26b711b7b51f", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "encoder_queries = prepare_for_xfmr(ref_instances)\n", - "encoder_features = xfmr_encoder(\n", - " encoder_queries, pos_emb=ref_emb\n", - ") # (total_instances, batch_size, embed_dim)" + "# create transformer instance to test embeddings \n", + "tfmr = Transformer()\n" ] } ], @@ -568,7 +571,7 @@ "kernelspec": { "display_name": "dreem", "language": "python", - "name": "dreem" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/run_trainer.py b/run_trainer.py new file mode 100644 index 0000000..c69d93f --- /dev/null +++ b/run_trainer.py @@ -0,0 +1,10 @@ +from dreem.training import train +from omegaconf import OmegaConf + +base_config = "dreem/training/configs/base.yaml" +# params_config = "/path/to/override.yaml" + +cfg = OmegaConf.load(base_config) +# cfg["params_config"] = params_config + +train.run(cfg) \ No newline at end of file From 4714aea753a920964b0fae5919fa2e5e03760371 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 5 Aug 2024 15:28:39 -0700 Subject: [PATCH 06/63] Update rope.ipynb --- rope.ipynb | 127 ++++++++++++++++++++++++++++------------------------- 1 file changed, 67 insertions(+), 60 deletions(-) diff --git a/rope.ipynb b/rope.ipynb index 8213213..593439b 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 4, + "execution_count": 156, "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", "metadata": { "tags": [] @@ -21,12 +21,13 @@ "from dreem.datasets import SleapDataset\n", "from dreem.models.transformer import *\n", "from dreem.models import VisualEncoder\n", - "from dreem.models import GlobalTrackingTransformer" + "from dreem.models import GlobalTrackingTransformer\n", + "from dreem.models.gtr_runner import GTRRunner" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 130, "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", "metadata": { "tags": [] @@ -282,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 131, "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", "metadata": { "tags": [] @@ -296,35 +297,12 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 132, "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", "metadata": { "tags": [] }, - "outputs": [ - { - "ename": "FileNotFoundError", - "evalue": "[Errno 2] Unable to open file (unable to open file: name = '/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train/10-1.slp', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[7], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m train_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\u001b[39;00m\n\u001b[0;32m----> 4\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[43mSleapDataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_path\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m10-1.slp\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_path\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m10-1.mp4\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcrop_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m64\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclip_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m32\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43manchors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcentroid\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/Documents/GitHub/dreem/dreem/datasets/sleap_dataset.py:108\u001b[0m, in \u001b[0;36mSleapDataset.__init__\u001b[0;34m(self, slp_files, video_files, padding, crop_size, anchors, chunk, clip_length, mode, handle_missing, augmentations, n_chunks, seed, verbose)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m=\u001b[39m verbose\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# if self.seed is not None:\u001b[39;00m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;66;03m# np.random.seed(self.seed)\u001b[39;00m\n\u001b[0;32m--> 108\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabels \u001b[38;5;241m=\u001b[39m \u001b[43m[\u001b[49m\u001b[43msio\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_slp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mslp_files\u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvideos \u001b[38;5;241m=\u001b[39m [imageio\u001b[38;5;241m.\u001b[39mget_reader(vid_file) \u001b[38;5;28;01mfor\u001b[39;00m vid_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvid_files]\n\u001b[1;32m 110\u001b[0m \u001b[38;5;66;03m# do we need this? would need to update with sleap-io\u001b[39;00m\n\u001b[1;32m 111\u001b[0m \n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# for label in self.labels:\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;66;03m# label.remove_empty_instances(keep_empty_frames=False)\u001b[39;00m\n", - "File \u001b[0;32m~/Documents/GitHub/dreem/dreem/datasets/sleap_dataset.py:108\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m=\u001b[39m verbose\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# if self.seed is not None:\u001b[39;00m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;66;03m# np.random.seed(self.seed)\u001b[39;00m\n\u001b[0;32m--> 108\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabels \u001b[38;5;241m=\u001b[39m [\u001b[43msio\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_slp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m slp_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mslp_files]\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvideos \u001b[38;5;241m=\u001b[39m [imageio\u001b[38;5;241m.\u001b[39mget_reader(vid_file) \u001b[38;5;28;01mfor\u001b[39;00m vid_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvid_files]\n\u001b[1;32m 110\u001b[0m \u001b[38;5;66;03m# do we need this? would need to update with sleap-io\u001b[39;00m\n\u001b[1;32m 111\u001b[0m \n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# for label in self.labels:\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;66;03m# label.remove_empty_instances(keep_empty_frames=False)\u001b[39;00m\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/main.py:19\u001b[0m, in \u001b[0;36mload_slp\u001b[0;34m(filename)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mload_slp\u001b[39m(filename: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Labels:\n\u001b[1;32m 11\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Load a SLEAP dataset.\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \n\u001b[1;32m 13\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[38;5;124;03m The dataset as a `Labels` object.\u001b[39;00m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 19\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mslp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread_labels\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/slp.py:1011\u001b[0m, in \u001b[0;36mread_labels\u001b[0;34m(labels_path)\u001b[0m\n\u001b[1;32m 1002\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_labels\u001b[39m(labels_path: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Labels:\n\u001b[1;32m 1003\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read a SLEAP labels file.\u001b[39;00m\n\u001b[1;32m 1004\u001b[0m \n\u001b[1;32m 1005\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1009\u001b[0m \u001b[38;5;124;03m The processed `Labels` object.\u001b[39;00m\n\u001b[1;32m 1010\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m-> 1011\u001b[0m tracks \u001b[38;5;241m=\u001b[39m \u001b[43mread_tracks\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabels_path\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1012\u001b[0m videos \u001b[38;5;241m=\u001b[39m read_videos(labels_path)\n\u001b[1;32m 1013\u001b[0m skeletons \u001b[38;5;241m=\u001b[39m read_skeletons(labels_path)\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/slp.py:448\u001b[0m, in \u001b[0;36mread_tracks\u001b[0;34m(labels_path)\u001b[0m\n\u001b[1;32m 439\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_tracks\u001b[39m(labels_path: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mlist\u001b[39m[Track]:\n\u001b[1;32m 440\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read `Track` dataset in a SLEAP labels file.\u001b[39;00m\n\u001b[1;32m 441\u001b[0m \n\u001b[1;32m 442\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 446\u001b[0m \u001b[38;5;124;03m A list of `Track` objects.\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 448\u001b[0m tracks \u001b[38;5;241m=\u001b[39m [json\u001b[38;5;241m.\u001b[39mloads(x) \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m \u001b[43mread_hdf5_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabels_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtracks_json\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m]\n\u001b[1;32m 449\u001b[0m track_objects \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m track \u001b[38;5;129;01min\u001b[39;00m tracks:\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/utils.py:21\u001b[0m, in \u001b[0;36mread_hdf5_dataset\u001b[0;34m(filename, dataset)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_hdf5_dataset\u001b[39m(filename: \u001b[38;5;28mstr\u001b[39m, dataset: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m np\u001b[38;5;241m.\u001b[39mndarray:\n\u001b[1;32m 12\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read data from an HDF5 file.\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \n\u001b[1;32m 14\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;124;03m The data as an array.\u001b[39;00m\n\u001b[1;32m 20\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 21\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[43mh5py\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mFile\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 22\u001b[0m data \u001b[38;5;241m=\u001b[39m f[dataset][()]\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m data\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/h5py/_hl/files.py:562\u001b[0m, in \u001b[0;36mFile.__init__\u001b[0;34m(self, name, mode, driver, libver, userblock_size, swmr, rdcc_nslots, rdcc_nbytes, rdcc_w0, track_order, fs_strategy, fs_persist, fs_threshold, fs_page_size, page_buf_size, min_meta_keep, min_raw_keep, locking, alignment_threshold, alignment_interval, meta_block_size, **kwds)\u001b[0m\n\u001b[1;32m 553\u001b[0m fapl \u001b[38;5;241m=\u001b[39m make_fapl(driver, libver, rdcc_nslots, rdcc_nbytes, rdcc_w0,\n\u001b[1;32m 554\u001b[0m locking, page_buf_size, min_meta_keep, min_raw_keep,\n\u001b[1;32m 555\u001b[0m alignment_threshold\u001b[38;5;241m=\u001b[39malignment_threshold,\n\u001b[1;32m 556\u001b[0m alignment_interval\u001b[38;5;241m=\u001b[39malignment_interval,\n\u001b[1;32m 557\u001b[0m meta_block_size\u001b[38;5;241m=\u001b[39mmeta_block_size,\n\u001b[1;32m 558\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwds)\n\u001b[1;32m 559\u001b[0m fcpl \u001b[38;5;241m=\u001b[39m make_fcpl(track_order\u001b[38;5;241m=\u001b[39mtrack_order, fs_strategy\u001b[38;5;241m=\u001b[39mfs_strategy,\n\u001b[1;32m 560\u001b[0m fs_persist\u001b[38;5;241m=\u001b[39mfs_persist, fs_threshold\u001b[38;5;241m=\u001b[39mfs_threshold,\n\u001b[1;32m 561\u001b[0m fs_page_size\u001b[38;5;241m=\u001b[39mfs_page_size)\n\u001b[0;32m--> 562\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mmake_fid\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43muserblock_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfcpl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mswmr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mswmr\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 564\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(libver, \u001b[38;5;28mtuple\u001b[39m):\n\u001b[1;32m 565\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_libver \u001b[38;5;241m=\u001b[39m libver\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/h5py/_hl/files.py:235\u001b[0m, in \u001b[0;36mmake_fid\u001b[0;34m(name, mode, userblock_size, fapl, fcpl, swmr)\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m swmr \u001b[38;5;129;01mand\u001b[39;00m swmr_support:\n\u001b[1;32m 234\u001b[0m flags \u001b[38;5;241m|\u001b[39m\u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mACC_SWMR_READ\n\u001b[0;32m--> 235\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mh5f\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mopen\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mflags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfapl\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m mode \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mr+\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 237\u001b[0m fid \u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mopen(name, h5f\u001b[38;5;241m.\u001b[39mACC_RDWR, fapl\u001b[38;5;241m=\u001b[39mfapl)\n", - "File \u001b[0;32mh5py/_objects.pyx:54\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32mh5py/_objects.pyx:55\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32mh5py/h5f.pyx:102\u001b[0m, in \u001b[0;36mh5py.h5f.open\u001b[0;34m()\u001b[0m\n", - "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] Unable to open file (unable to open file: name = '/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train/10-1.slp', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)" - ] - } - ], + "outputs": [], "source": [ "# get sample crops from training data to pass through the network\n", "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", @@ -335,14 +313,14 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 133, "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", "metadata": { "tags": [] }, "outputs": [], "source": [ - "# get a list of all instances; this is the format that the model pipeline uses as input data\n", + "# get a list of all instances in the first clip; this is the format that the model pipeline uses as input data\n", "ref_instances = []\n", "for frame in data[0]:\n", " for instance in frame.instances:\n", @@ -351,7 +329,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 134, "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", "metadata": { "tags": [] @@ -365,7 +343,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 135, "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", "metadata": { "collapsed": true, @@ -378,10 +356,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 8, + "execution_count": 135, "metadata": {}, "output_type": "execute_result" }, @@ -403,7 +381,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 136, "id": "8b17fdb7", "metadata": { "tags": [] @@ -417,7 +395,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 137, "id": "7999fcef-953b-42cf-927c-f3b617f68157", "metadata": { "tags": [] @@ -460,7 +438,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 138, "id": "e299e8a0-61eb-4eee-901c-49aa7e678b3b", "metadata": { "tags": [] @@ -517,7 +495,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 139, "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", "metadata": { "tags": [] @@ -538,32 +516,61 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 140, "id": "f0823cf1-2a35-4920-a62e-896bd9dbb078", "metadata": { "tags": [] }, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'ref_instances' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[3], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# input data for transformer\u001b[39;00m\n\u001b[1;32m 2\u001b[0m ref_features \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mcat(\n\u001b[0;32m----> 3\u001b[0m [instance\u001b[38;5;241m.\u001b[39mfeatures \u001b[38;5;28;01mfor\u001b[39;00m instance \u001b[38;5;129;01min\u001b[39;00m \u001b[43mref_instances\u001b[49m], dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 4\u001b[0m )\u001b[38;5;241m.\u001b[39munsqueeze(\u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# create transformer instance to test embeddings \u001b[39;00m\n\u001b[1;32m 7\u001b[0m tfmr \u001b[38;5;241m=\u001b[39m Transformer()\n", - "\u001b[0;31mNameError\u001b[0m: name 'ref_instances' is not defined" - ] - } - ], + "outputs": [], "source": [ - "# input data for transformer\n", - "ref_features = torch.cat(\n", - " [instance.features for instance in ref_instances], dim=0\n", - " ).unsqueeze(0)\n", - "\n", "# create transformer instance to test embeddings \n", - "tfmr = Transformer()\n" + "tfmr = Transformer()" + ] + }, + { + "cell_type": "code", + "execution_count": 143, + "id": "5e0b9d31-34be-40f8-91dc-b91d59aee170", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "assoc = tfmr(ref_instances)" + ] + }, + { + "cell_type": "code", + "execution_count": 157, + "id": "9f29ca35-9ff2-4e9a-bba0-37a3a14ad522", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "gtr = GTRRunner()" + ] + }, + { + "cell_type": "code", + "execution_count": 160, + "id": "0aa3876a-6246-4d02-80a5-013d382f6d38", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "metrics = gtr._shared_eval_step(data[0],\"train\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aee0d129-83f2-4f76-b452-132391554b4c", + "metadata": {}, + "outputs": [], + "source": [ + "metrics" ] } ], @@ -571,7 +578,7 @@ "kernelspec": { "display_name": "dreem", "language": "python", - "name": "python3" + "name": "dreem" }, "language_info": { "codemirror_mode": { From 9c6478968c008d04a5898cf1cee53d07dc5d2369 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Tue, 6 Aug 2024 12:32:46 -0700 Subject: [PATCH 07/63] refactor transformer encoder - add support for stack/avg/concatenate - move embedding processing out of transformer and into encoder --- dreem/io/config.py | 2 + dreem/models/embedding.py | 1 + dreem/models/transformer.py | 102 +++++++++++++++++++++---------- dreem/training/configs/base.yaml | 19 +++--- dreem/training/train.py | 2 + run_trainer.py | 4 +- 6 files changed, 90 insertions(+), 40 deletions(-) diff --git a/dreem/io/config.py b/dreem/io/config.py index b018790..53a1db2 100644 --- a/dreem/io/config.py +++ b/dreem/io/config.py @@ -40,6 +40,8 @@ def __init__(self, cfg: DictConfig, params_cfg: DictConfig | None = None): else: self.cfg = cfg + OmegaConf.set_struct(self.cfg, False) + def __repr__(self): """Object representation of config class.""" return f"Config({self.cfg})" diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 23190ad..44046b0 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -221,6 +221,7 @@ def __init__( self._emb_func = self._sine_temp_embedding elif self.mode == "rope": + # TODO: pos/temp uses the same processing but takes the input differently self._emb_func = self._rope_embedding diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 13e4529..6139b43 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -19,7 +19,7 @@ import copy import torch import torch.nn.functional as F -from typing import List +from typing import Dict, Tuple # todo: add named tensors # todo: add flash attention @@ -161,7 +161,6 @@ def forward( [instance.features for instance in ref_instances], dim=0 ).unsqueeze(0) - # window_length = len(frames) # instances_per_frame = [frame.num_detected for frame in frames] total_instances = len(ref_instances) embed_dim = ref_features.shape[-1] @@ -170,23 +169,6 @@ def forward( ref_boxes = torch.nan_to_num(ref_boxes, -1.0) ref_times, query_times = get_times(ref_instances, query_instances) - window_length = len(ref_times.unique()) - - ref_temp_emb = self.temp_emb(ref_times) - - ref_pos_emb = self.pos_emb(ref_boxes) - - if self.return_embedding: - for i, instance in enumerate(ref_instances): - instance.add_embedding("pos", ref_pos_emb[i]) - instance.add_embedding("temp", ref_temp_emb[i]) - - ref_emb = (ref_pos_emb + ref_temp_emb) / 2.0 - - ref_emb = ref_emb.view(1, total_instances, embed_dim) - - ref_emb = ref_emb.permute(1, 0, 2) # (total_instances, batch_size, embed_dim) - batch_size, total_instances, embed_dim = ref_features.shape ref_features = ref_features.permute( @@ -195,10 +177,20 @@ def forward( encoder_queries = ref_features - encoder_features = self.encoder( - encoder_queries, pos_emb=ref_emb + encoder_features, ref_pos_emb, ref_temp_emb = self.encoder( + encoder_queries, + embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + ref_boxes=ref_boxes, + ref_times=ref_times, + embedding_agg_method=self.embedding_meta["embedding_agg_method"] ) # (total_instances, batch_size, embed_dim) + # TODO: check if instance.add_embedding() supports rotation matrices + if self.return_embedding: + for i, instance in enumerate(ref_instances): + instance.add_embedding("pos", ref_pos_emb[i]) + instance.add_embedding("temp", ref_temp_emb[i]) + n_query = total_instances query_features = ref_features @@ -299,13 +291,13 @@ def __init__( self.activation = _get_activation_fn(activation) def forward( - self, queries: torch.Tensor, embeddings : List[Embedding] + self, queries: torch.Tensor ) -> torch.Tensor: """Execute a forward pass of the encoder layer. Args: - queries: Input sequence for encoder (n_query, batch_size, embed_dim); transformed with embedding - pos_emb: Position embedding, if provided is added to src + queries: Input sequence for encoder (n_query, batch_size, embed_dim); + data is already transformed with embedding Returns: The output tensor of shape (n_query, batch_size, embed_dim). @@ -461,24 +453,71 @@ def __init__( self.norm = norm if norm is not None else nn.Identity() def forward( - self, queries: torch.Tensor, pos_emb: torch.Tensor = None - ) -> torch.Tensor: + self, queries: torch.Tensor, embedding_map: Dict[str, Embedding], + ref_boxes: torch.Tensor, ref_times: torch.Tensor, + embedding_agg_method: str + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Execute a forward pass of encoder layer. Args: queries: The input tensor of shape (n_query, batch_size, embed_dim). - pos_emb: The positional embedding tensor of shape (n_query, embed_dim). + embedding_map: Dict of Embedding objects defining the pos/temp embeddings to be applied to + the input data before it passes to the EncoderLayer + ref_boxes: + ref_times: + embedding_agg_method: Returns: The output tensor of shape (n_query, batch_size, embed_dim). """ + for layer in self.layers: - # TODO: add embedding object call - # TODO: add the embedding object into the argument list to the forward() call - queries = layer(queries, pos_emb=pos_emb) + pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] + # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings + queries = queries.permute(1,0,2) + # queries is now of shape (batch_size, n_query, embed_dim) + # calculate temporal embeddings and transform queries + queries_t, ref_temp_emb = temp_emb(queries, ref_times) + # if avg temp and pos, need bounding boxes + if embedding_agg_method == "average": + _, ref_pos_emb = pos_emb(queries, ref_boxes) + ref_emb = (ref_pos_emb + ref_temp_emb) / 2 + queries = queries + ref_emb + queries = queries.permute(1, 0, 2) + else: + # todo: input for x,y should be different (not ref_boxes) + # just extract the x,y coordinates from ref_boxes? + # calculate spatial embedding for x, y separately + queries_x, ref_pos_emb = pos_emb(queries, ref_x) + queries_y, ref_pos_emb = pos_emb(queries, ref_y) + + # concatenate, stack, or average the queries + queries = self.collate_queries( + (queries, queries_t, queries_x, queries_y), + embedding_agg_method) + + # todo: encoderLayer needs to be made compatible with stack/concatenate; + # need to pass in embedding_agg_method + queries = layer(queries) encoder_features = self.norm(queries) - return encoder_features + + return encoder_features, ref_pos_emb, ref_temp_emb + + def collate_queries(self, _queries: Tuple[torch.Tensor], embedding_agg_method + ) -> torch.Tensor: + queries, queries_t, queries_x, queries_y = _queries + + if embedding_agg_method == "average": + return queries + elif embedding_agg_method == "stack": + # stacked of shape (3, batch_size, n_query, embed_dim) + stacked = torch.stack((queries_t, queries_x, queries_y), dim=-1) + # transpose for input to EncoderLayer + return stacked.permute(0, 2, 1, 3) + elif embedding_agg_method == "concatenate": + # todo: complete this, and transpose output + return class TransformerDecoder(nn.Module): @@ -577,3 +616,4 @@ def _get_activation_fn(activation: str) -> callable: if activation == "glu": return F.glu raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") + diff --git a/dreem/training/configs/base.yaml b/dreem/training/configs/base.yaml index f9ed413..6a507cc 100644 --- a/dreem/training/configs/base.yaml +++ b/dreem/training/configs/base.yaml @@ -16,10 +16,11 @@ model: dropout_attn_head: 0.1 embedding_meta: pos: - mode: "fixed" + mode: "fixed" # supports fixed, learned, rope normalize: true temp: - mode: "fixed" + mode: "fixed" # supports fixed, learned, rope + embedding_agg_method: "stack" # supports stack, average, concatenate return_embedding: False decoder_self_attn: False @@ -66,24 +67,24 @@ runner: dataset: train_dataset: - slp_files: ["tests/data/sleap/two_flies.slp"] - video_files: ["tests/data/sleap/two_flies.mp4"] + slp_files: ["../../tests/data/sleap/two_flies.slp"] + video_files: ["../../tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: true clip_length: 32 val_dataset: - slp_files: ["tests/data/sleap/two_flies.slp"] - video_files: ["tests/data/sleap/two_flies.mp4"] + slp_files: ["../../tests/data/sleap/two_flies.slp"] + video_files: ["../../tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: True clip_length: 32 test_dataset: - slp_files: ["tests/data/sleap/two_flies.slp"] - video_files: ["tests/data/sleap/two_flies.mp4"] + slp_files: ["../../tests/data/sleap/two_flies.slp"] + video_files: ["../../tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: True @@ -130,6 +131,8 @@ checkpointing: every_n_epochs: 10 trainer: + accelerator: "mps" + devices: 1 check_val_every_n_epoch: 1 enable_checkpointing: true gradient_clip_val: null diff --git a/dreem/training/train.py b/dreem/training/train.py index 372bfa6..c34b499 100644 --- a/dreem/training/train.py +++ b/dreem/training/train.py @@ -53,6 +53,7 @@ def run(cfg: DictConfig): logger.info(f"Final train config: {train_cfg}") model = train_cfg.get_model() + train_dataset = train_cfg.get_dataset(mode="train") train_dataloader = train_cfg.get_dataloader(train_dataset, mode="train") @@ -83,6 +84,7 @@ def run(cfg: DictConfig): _ = callbacks.append(train_cfg.get_early_stopping()) accelerator = "gpu" if torch.cuda.is_available() else "cpu" + devices = torch.cuda.device_count() if torch.cuda.is_available() else cpu_count() trainer = train_cfg.get_trainer( diff --git a/run_trainer.py b/run_trainer.py index c69d93f..5b129ab 100644 --- a/run_trainer.py +++ b/run_trainer.py @@ -1,7 +1,9 @@ from dreem.training import train from omegaconf import OmegaConf +import os -base_config = "dreem/training/configs/base.yaml" +os.chdir("./dreem/training") +base_config = "./configs/base.yaml" # params_config = "/path/to/override.yaml" cfg = OmegaConf.load(base_config) From 67bf6e4c5d9adf53afb88f91cfe1370f7ce3f80b Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Tue, 6 Aug 2024 13:51:21 -0700 Subject: [PATCH 08/63] further changes for rope - get centroid from x,y for spatial embedding - complete stack agg method - add docstrings --- dreem/models/transformer.py | 60 +++++++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 6139b43..9e9d18d 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -191,6 +191,8 @@ def forward( instance.add_embedding("pos", ref_pos_emb[i]) instance.add_embedding("temp", ref_temp_emb[i]) + # -------------- Begin decoder pre-processing --------------- # + n_query = total_instances query_features = ref_features @@ -243,6 +245,7 @@ def forward( asso_output = [] for frame_features in decoder_features: + # todo: this needs to handle the 3x queries that come out of the encoder/decoder asso_matrix = self.attn_head(frame_features, encoder_features).view( n_query, total_instances ) @@ -474,22 +477,20 @@ def forward( for layer in self.layers: pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings - queries = queries.permute(1,0,2) - # queries is now of shape (batch_size, n_query, embed_dim) + queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) # calculate temporal embeddings and transform queries queries_t, ref_temp_emb = temp_emb(queries, ref_times) - # if avg temp and pos, need bounding boxes + # if avg. of temp and pos, need bounding boxes if embedding_agg_method == "average": _, ref_pos_emb = pos_emb(queries, ref_boxes) ref_emb = (ref_pos_emb + ref_temp_emb) / 2 queries = queries + ref_emb - queries = queries.permute(1, 0, 2) + queries = queries.permute(1, 0, 2) # transpose back before input to EncoderLayer else: - # todo: input for x,y should be different (not ref_boxes) - # just extract the x,y coordinates from ref_boxes? - # calculate spatial embedding for x, y separately - queries_x, ref_pos_emb = pos_emb(queries, ref_x) - queries_y, ref_pos_emb = pos_emb(queries, ref_y) + # calculate embedding array for x,y from bounding box centroids + ref_x, ref_y = self._spatial_emb_from_bb(ref_boxes) + queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) + queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) # concatenate, stack, or average the queries queries = self.collate_queries( @@ -504,20 +505,49 @@ def forward( return encoder_features, ref_pos_emb, ref_temp_emb - def collate_queries(self, _queries: Tuple[torch.Tensor], embedding_agg_method + def collate_queries(self, _queries: Tuple[torch.Tensor], embedding_agg_method: str ) -> torch.Tensor: + """ + + Args: + _queries: 3-tuple of queries (already transformed by embeddings) for x, y, t + each of shape (batch_size, n_query, embed_dim) + embedding_agg_method: String representing the aggregation method for embeddings + + Returns: Tensor of aggregated queries; can be concatenated (increased length of tokens), + stacked (increased number of tokens), or averaged (original token number and length) + """ + queries, queries_t, queries_x, queries_y = _queries if embedding_agg_method == "average": return queries elif embedding_agg_method == "stack": - # stacked of shape (3, batch_size, n_query, embed_dim) - stacked = torch.stack((queries_t, queries_x, queries_y), dim=-1) + # stacked is of shape (batch_size, 3*n_query, embed_dim) + stacked = torch.cat((queries_t, queries_x, queries_y), dim=1) # transpose for input to EncoderLayer - return stacked.permute(0, 2, 1, 3) + return stacked.permute(1, 0, 2) elif embedding_agg_method == "concatenate": - # todo: complete this, and transpose output - return + # todo: complete this, pass it through an MLP and transpose output + + return concatenated.permute(1, 0, 2) + + + def _spatial_emb_from_bb(self, bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes + Args: + bb: Bounding boxes of shape (n_query, batch_size, 4) from which to compute x,y centroids; + each bounding box is [ymin, xmin, ymax, xmax] + + Returns: + A tuple of tensors containing the emebdding array for x,y dimensions + """ + + centroid_x, centroid_y = bb[:,:,[1,3]].mean(axis=2), bb[:,:,[0,2]].mean(axis=2) + + return + class TransformerDecoder(nn.Module): From fa61af061fd911a166b8afcfd84de4bbc7a9713f Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Tue, 6 Aug 2024 16:49:48 -0700 Subject: [PATCH 09/63] complete encoder section of rope - concatenation method with mlp - complete pre-processing for input to EncoderLayer - fix shape issues in rope/additive_embedding/forward modules in embedding.py --- dreem/models/embedding.py | 75 ++++++++++++++++++++++++++----------- dreem/models/mlp.py | 2 + dreem/models/transformer.py | 56 ++++++++++++++------------- 3 files changed, 84 insertions(+), 49 deletions(-) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 44046b0..186ecab 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -36,7 +36,7 @@ def __init__( self, dim: int, max_seq_len: int = 4096, - base: int = 10_000, + base: int = 10000, ) -> None: super().__init__() self.dim = dim @@ -258,9 +258,20 @@ def _transform(self, x, emb): def _apply_rope(self, x, emb): + """ + Applies Rotary Positional Embedding to input queries + + Args: + x: Input queries of shape (batch_size, n_query, embed_dim) + emb: Rotation matrix of shape (batch_size, n_query, num_heads, embed_dim // 2, 2) - - # tensor has shape [b, s, n_h, h_d // 2, 2] + Returns: + Tensor of input queries transformed by RoPE + """ + x_out = torch.unsqueeze(x, 2) + # input needs shape [batch_size, n_query, num_heads, embed_dim // 2, 2] + x_out = x_out.float().reshape(*x_out.shape[:-1], -1, 2) + # apply RoPE to each query token x_out = torch.stack( [ x[..., 0] * emb[..., 0] @@ -270,15 +281,24 @@ def _apply_rope(self, x, emb): ], -1, ) - # tensor has shape [b, s, n_h, h_d] + # output has shape [batch_size, n_query, num_heads, embed_dim] x_out = x_out.flatten(3) return x_out def _apply_additive_embeddings(self, x, emb): + """ + Applies additive embeddings to input queries + + Args: + x: Input tensor of shape (batch_size, N, embed_dim) + emb: Embedding array of shape (N, embed_dim) - return x + emb + Returns: + Tensor: Input queries with embeddings added - shape (batch_size, N, embed_dim) + """ + return x + emb.unsqueeze(0) def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: @@ -286,25 +306,29 @@ def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: Args: seq_positions: - * An (`N`, 1) tensor where seq_positions[i] represents the temporal position of instance_i in the sequence. - * An (`N`, n_anchors x 4) tensor where seq_positions[i, j, :] represents the [y1, x1, y2, x2] spatial locations of jth point of instance_i in the sequence. + * An (N,) tensor where seq_positions[i] represents the temporal position of instance_i in the sequence. + * An (N, n_anchors x 4) tensor where seq_positions[i, j, :] represents the [y1, x1, y2, x2] spatial locations of jth point of instance_i in the sequence. + x: Input data of shape ((batch_size, N, embed_dim)) Returns: - An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. + - Tensor: input queries transformed by embedding + - An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. """ - # create embedding array (_emb_func selects appropriate callback based on config input) + # create embedding array; either rotation matrix of shape + # (batch_size, n_query, num_heads, embed_dim // 2, 2), + # or (N, embed_dim) array emb = self._emb_func(seq_positions) # transform the input data with the embedding - x = self._transform(emb, x) - - if emb.shape[-1] != self.features: - raise RuntimeError( - ( - f"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \n" - f"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions." - ) - ) + x = self._transform(x, emb) + + # if emb.shape[-1] != self.features: + # raise RuntimeError( + # ( + # f"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \n" + # f"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions." + # ) + # ) return x, emb def _torch_int_div( @@ -322,12 +346,19 @@ def _torch_int_div( return torch.div(tensor1, tensor2, rounding_mode="floor") - def _rope_embedding(self, x: torch.Tensor, emb_ids: torch.Tensor) -> torch.Tensor: - - # input must be of shape (num_batches, num_instances, num_attn_heads, d_model) + def _rope_embedding(self, x: torch.Tensor) -> torch.Tensor: + """ + Computes the rotation matrix to apply RoPE to input queries + Args: + x: Input queries of shape (num_batches, n_queries, embed_dim) + Returns: + Tensor: (N, embed_dim) rotation matrix + """ + # input must be of shape (num_batches, num_instances, num_attn_heads, embed_dim) # use num_heads=1 for compatibility with torch ROPE x_rope = torch.unsqueeze(x, 2) - rope = RotaryPositionalEmbeddings(self.features) + # RoPE module takes in dimension, num_queries as input to calculate rotation matrix + rope = RotaryPositionalEmbeddings(self.features, x.shape[1]) rot_mat = rope(x_rope) return rot_mat diff --git a/dreem/models/mlp.py b/dreem/models/mlp.py index 872d715..4f09551 100644 --- a/dreem/models/mlp.py +++ b/dreem/models/mlp.py @@ -34,8 +34,10 @@ def __init__( self.layers = torch.nn.ModuleList( [ torch.nn.Linear(n, k) + # list concatenations to ensure layer shape compability for n, k in zip([input_dim] + h, h + [output_dim]) ] + ) if self.dropout > 0.0: self.dropouts = torch.nn.ModuleList( diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 9e9d18d..c40cc33 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -14,6 +14,7 @@ from dreem.io import AssociationMatrix from dreem.models.attention_head import ATTWeightHead from dreem.models import Embedding +from dreem.models.mlp import MLP from dreem.models.model_utils import get_boxes, get_times from torch import nn import copy @@ -245,7 +246,7 @@ def forward( asso_output = [] for frame_features in decoder_features: - # todo: this needs to handle the 3x queries that come out of the encoder/decoder + # TODO: this needs to handle the 3x queries that come out of the encoder/decoder asso_matrix = self.attn_head(frame_features, encoder_features).view( n_query, total_instances ) @@ -460,13 +461,13 @@ def forward( ref_boxes: torch.Tensor, ref_times: torch.Tensor, embedding_agg_method: str ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Execute a forward pass of encoder layer. + """Execute a forward pass of encoder layer. Computes and applies embeddings before input to EncoderLayer Args: queries: The input tensor of shape (n_query, batch_size, embed_dim). embedding_map: Dict of Embedding objects defining the pos/temp embeddings to be applied to the input data before it passes to the EncoderLayer - ref_boxes: + ref_boxes: Bounding box based embedding ids of shape (n_query, batch_size, 4) ref_times: embedding_agg_method: @@ -485,52 +486,55 @@ def forward( _, ref_pos_emb = pos_emb(queries, ref_boxes) ref_emb = (ref_pos_emb + ref_temp_emb) / 2 queries = queries + ref_emb - queries = queries.permute(1, 0, 2) # transpose back before input to EncoderLayer else: - # calculate embedding array for x,y from bounding box centroids + # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) ref_x, ref_y = self._spatial_emb_from_bb(ref_boxes) + # forward pass of Embedding object transforms input queries with embeddings queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) - # concatenate, stack, or average the queries + # concatenate or stack the queries (avg. method done above since it applies differently) queries = self.collate_queries( (queries, queries_t, queries_x, queries_y), embedding_agg_method) - - # todo: encoderLayer needs to be made compatible with stack/concatenate; - # need to pass in embedding_agg_method + # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) + queries = queries.permute(1, 0, 2) + # pass through EncoderLayer queries = layer(queries) encoder_features = self.norm(queries) return encoder_features, ref_pos_emb, ref_temp_emb - def collate_queries(self, _queries: Tuple[torch.Tensor], embedding_agg_method: str + + def collate_queries(self, queries: Tuple[torch.Tensor], embedding_agg_method: str ) -> torch.Tensor: """ Args: - _queries: 3-tuple of queries (already transformed by embeddings) for x, y, t + _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t each of shape (batch_size, n_query, embed_dim) embedding_agg_method: String representing the aggregation method for embeddings - Returns: Tensor of aggregated queries; can be concatenated (increased length of tokens), + Returns: Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), stacked (increased number of tokens), or averaged (original token number and length) """ - queries, queries_t, queries_x, queries_y = _queries + queries_t, queries_x, queries_y = queries + + mlp = MLP(input_dim=queries_t.shape[-1]*3, hidden_dim=queries_t.shape[-1]*2, + output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) - if embedding_agg_method == "average": - return queries - elif embedding_agg_method == "stack": + if embedding_agg_method == "stack": # stacked is of shape (batch_size, 3*n_query, embed_dim) - stacked = torch.cat((queries_t, queries_x, queries_y), dim=1) - # transpose for input to EncoderLayer - return stacked.permute(1, 0, 2) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) elif embedding_agg_method == "concatenate": - # todo: complete this, pass it through an MLP and transpose output - - return concatenated.permute(1, 0, 2) + # concatenated is of shape (batch_size, n_query, 3*embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) + # pass through MLP to project into space of (batch_size, n_query, embed_dim) + collated_queries = mlp(collated_queries) + + return collated_queries def _spatial_emb_from_bb(self, bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: @@ -541,12 +545,10 @@ def _spatial_emb_from_bb(self, bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Te each bounding box is [ymin, xmin, ymax, xmax] Returns: - A tuple of tensors containing the emebdding array for x,y dimensions + A tuple of tensors containing the emebdding array for x,y dimensions, each of shape (n_query,) """ - - centroid_x, centroid_y = bb[:,:,[1,3]].mean(axis=2), bb[:,:,[0,2]].mean(axis=2) - - return + # compute avg of xmin,xmax and ymin,ymax + return bb[:,:,[1,3]].mean(axis=2).squeeze(), bb[:,:,[0,2]].mean(axis=2).squeeze() From 55f5f257861f75263c3fa74f20317553be36ab64 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 11:35:04 -0700 Subject: [PATCH 10/63] setup batch training --- dreem/training/configs/override.yaml | 142 ++++++++++++++++++++ dreem/training/configs/test_batch_train.csv | 4 - dreem/training/demo_batch.csv | 3 + run_batch_job.py | 28 ++-- 4 files changed, 165 insertions(+), 12 deletions(-) create mode 100644 dreem/training/configs/override.yaml delete mode 100644 dreem/training/configs/test_batch_train.csv create mode 100644 dreem/training/demo_batch.csv diff --git a/dreem/training/configs/override.yaml b/dreem/training/configs/override.yaml new file mode 100644 index 0000000..6d1ccb4 --- /dev/null +++ b/dreem/training/configs/override.yaml @@ -0,0 +1,142 @@ +model: + ckpt_path: null + encoder_cfg: + model_name: "resnet18" + in_chans: 3 + backend: "torchvision" + pretrained: false + d_model: 128 + nhead: 1 + num_encoder_layers: 1 + num_decoder_layers: 1 + dropout: 0.1 + activation: "relu" + return_intermediate_dec: True + norm: False + num_layers_attn_head: 1 + dropout_attn_head: 0.1 + embedding_meta: + pos: + mode: "fixed" + normalize: true + n_points: 1 + temp: + mode: "fixed" + return_embedding: False + decoder_self_attn: True + +loss: + epsilon: 0.0001 + asso_weight: 10.0 + +optimizer: + lr: 0.0001 + weight_decay: 0 + +scheduler: + factor: 0.5 + patience: 5 + threshold: 0.001 + +dataset: + train_dataset: + dir: + # note: if using batch runner, use format: /home/runner/talmodata-smb/... + # if using interactive, use format: "/home/jovyan/talmolab-smb/datasets/..." + path: "/home/runner/talmolab-smb/datasets/mot/animal/sleap/btc/large_run/als/train" + labels_suffix: ".slp" + vid_suffix: ".mp4" + clip_length: 32 + crop_size: 64 + padding: 0 + anchors: "centroid" + augmentations: + Rotate: + limit: 45 + p: 0.3 + GaussianBlur: + blur_limit: [3,7] + sigma_limit: 0 + p: 0.3 + RandomBrightnessContrast: + brightness_limit: 0.1 + contrast_limit: 0.3 + p: 0.3 + MotionBlur: + blur_limit: [3,7] + p: 0.3 + NodeDropout: + p: 0.3 + n: 5 + InstanceDropout: + p: 0.3 + n: 1 + n_chunks: 1000 + handle_missing: "centroid" + + val_dataset: + dir: + # note: if using batch runner, use format: /home/runner/talmodata-smb/... + path: "/home/runner/talmolab-smb/datasets/mot/animal/sleap/btc/large_run/als/val" + labels_suffix: ".slp" + vid_suffix: ".mp4" + crop_size: 64 + padding: 0 + anchors: "centroid" + n_chunks: 300 + handle_missing: "centroid" + + # to not run test, just use empty lists to override the paths in the base.yaml + test_dataset: + slp_files: [] + video_files: [] + +dataloader: + train_dataloader: + num_workers: 0 + val_dataloader: + num_workers: 0 + test_dataloader: + num_workers: 0 + +checkpointing: + save_top_k: -1 + +trainer: + max_epochs: 50 + min_epochs: -1 + # limit_train_batches: 0.001 + # limit_test_batches: 1.0 + # limit_val_batches: 0.004 + # profiler: "advanced" + + +logging: + project: "dreem" + group: "test-batch-job" # experiment/test + entity: "mushaikh" + name: "sample-efficiency" # name of the run (within a group) + notes: "test `dreem-train" + logger_type: "WandbLogger" + +tracker: + window_size: 8 + use_vis_feats: true + overlap_thresh: 0.1 + mult_thresh: true + decay_time: null + iou: null + max_center_dist: null + +runner: + persistent_tracking: + train: false + val: false + test: false + metrics: + train: [] + +# view_batch: +# enable: True +# num_frames: 5 +# no_train: True \ No newline at end of file diff --git a/dreem/training/configs/test_batch_train.csv b/dreem/training/configs/test_batch_train.csv deleted file mode 100644 index a0303c7..0000000 --- a/dreem/training/configs/test_batch_train.csv +++ /dev/null @@ -1,4 +0,0 @@ -model.d_model,model.dim_feedforward,model.feature_dim_attn_head,model.num_encoder_layers,model.num_decoder_layers -256,256,256,1,1 -512,512,512,2,2 -1024,1024,1024,4,4 diff --git a/dreem/training/demo_batch.csv b/dreem/training/demo_batch.csv new file mode 100644 index 0000000..67f08f5 --- /dev/null +++ b/dreem/training/demo_batch.csv @@ -0,0 +1,3 @@ +logging.name,dataset.train_dataset.n_chunks +n_chunks:1.1,1.1 +n_chunks:10,10 diff --git a/run_batch_job.py b/run_batch_job.py index 52345da..f5910c9 100644 --- a/run_batch_job.py +++ b/run_batch_job.py @@ -1,21 +1,31 @@ import os import subprocess as sp +import pandas as pd -gpu = "0.1" +# to use this, just run python run_batch_job.py in cmd + +gpu = "0.1" # amount of GPU to use per task job_name = "mustafa-test-batch-job" -base = "/home/runner/talmodata-smb/aadi/biogtr_expts/run/animal/eight_flies" #where to run the job from -dreem_repo = base.replace("biogtr_expts/run/animal/eight_flies", "dreem") #where the dreem repo is stored +base = "/home/runner/talmodata-smb/mustafa/dreem-experiments/run/mice-btc" #where to run the job from +dreem_repo = "/home/runner/talmodata-smb/mustafa/dreem-experiments/src/dreem" #where the dreem repo is stored config_dir=os.path.join(base, "configs") #where to find the configs config_name= "base" #base config name -params_cfg = os.path.join(config_dir, "sample_efficiency.yaml") #override config +params_cfg = os.path.join(config_dir, "override.yaml") #override config + # if running just 1 job, comment this line out and delete the ++batch_config command in the command below -task_csv = os.path.join(config_dir, "sample_efficiency.csv") # csv for tasks - each pod is a task +# each row in this file is a separate run with overrides +# naming method: have the first column as logging.name (wandb logging); this creates the directory ./models/logging.name +task_csv = os.path.join(config_dir, "demo_batch.csv") # csv for tasks - each pod is a task -pods = 1 # total number of tasks for job to run; should be number of rows in csv file -par = 1 # number of tasks that can be run in parallel - max. = # of pods +# number of VMs that are spun up (also the number of tasks that you are running) +# note that the server must be mounted locally as a network location to use this if the csv is on the cluster +pods = len(pd.read_csv(task_csv.replace("/home/runner/talmodata-smb", "/Volumes/talmodata"))) +par = min(int(1/float(gpu)), pods) #number of tasks that can be run in parallel (always smaller than pods) +# enter your WANDB API KEY in the cmd section +# mount both smb and vast volumes cmd = [ "runai", "submit", @@ -27,7 +37,9 @@ "-i", "asheridan/biogtr", "-v", - "/data/talmolab-smb:/home/runner/talmodata-smb", + "/data/talmolab-smb:/home/runner/talmodata-smb", + "-v", + "/talmo:/home/runner/vast" "-e", f"RUNNER_CMD=cp -r {dreem_repo} ~ && mamba env create -n dreem -f ~/dreem/environment.yml && export WANDB_API_KEY=6cc5012a6ecfb9cd970bd07686dbfcefd3190a04 && cd {base} && conda run -n dreem dreem-train --config-dir={config_dir} --config-name={config_name} ++params_config={params_cfg} ++batch_config={task_csv}", "--parallelism", From 9cec3a2e32f9cdf7497ee941b9ad916d89d3d077 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:08:34 -0700 Subject: [PATCH 11/63] remove batch run commands from repo --- run_batch_job.py => dreem/training/configs/run_batch_job.py | 0 dreem/training/demo_batch.csv | 3 --- 2 files changed, 3 deletions(-) rename run_batch_job.py => dreem/training/configs/run_batch_job.py (100%) delete mode 100644 dreem/training/demo_batch.csv diff --git a/run_batch_job.py b/dreem/training/configs/run_batch_job.py similarity index 100% rename from run_batch_job.py rename to dreem/training/configs/run_batch_job.py diff --git a/dreem/training/demo_batch.csv b/dreem/training/demo_batch.csv deleted file mode 100644 index 67f08f5..0000000 --- a/dreem/training/demo_batch.csv +++ /dev/null @@ -1,3 +0,0 @@ -logging.name,dataset.train_dataset.n_chunks -n_chunks:1.1,1.1 -n_chunks:10,10 From f02a173934ad4f9e3989596a620aeb88a2d87335 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:10:01 -0700 Subject: [PATCH 12/63] remove batch training script --- dreem/training/configs/base.yaml | 9 ++-- dreem/training/configs/run_batch_job.py | 56 ------------------------- 2 files changed, 5 insertions(+), 60 deletions(-) delete mode 100644 dreem/training/configs/run_batch_job.py diff --git a/dreem/training/configs/base.yaml b/dreem/training/configs/base.yaml index 6a507cc..bd79ece 100644 --- a/dreem/training/configs/base.yaml +++ b/dreem/training/configs/base.yaml @@ -111,7 +111,7 @@ logging: group: "example" save_dir: './logs' project: "GTR" - log_model: "all" + log_model: null early_stopping: monitor: "val_loss" @@ -128,11 +128,12 @@ checkpointing: save_last: true dirpath: null auto_insert_metric_name: true - every_n_epochs: 10 + every_n_epochs: 1 trainer: - accelerator: "mps" - devices: 1 + # only use this for local apple silicon runs; change for cluster runs + # accelerator: "mps" + # devices: 1 check_val_every_n_epoch: 1 enable_checkpointing: true gradient_clip_val: null diff --git a/dreem/training/configs/run_batch_job.py b/dreem/training/configs/run_batch_job.py deleted file mode 100644 index f5910c9..0000000 --- a/dreem/training/configs/run_batch_job.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import subprocess as sp -import pandas as pd - -# to use this, just run python run_batch_job.py in cmd - -gpu = "0.1" # amount of GPU to use per task -job_name = "mustafa-test-batch-job" - -base = "/home/runner/talmodata-smb/mustafa/dreem-experiments/run/mice-btc" #where to run the job from -dreem_repo = "/home/runner/talmodata-smb/mustafa/dreem-experiments/src/dreem" #where the dreem repo is stored - -config_dir=os.path.join(base, "configs") #where to find the configs -config_name= "base" #base config name -params_cfg = os.path.join(config_dir, "override.yaml") #override config - -# if running just 1 job, comment this line out and delete the ++batch_config command in the command below -# each row in this file is a separate run with overrides -# naming method: have the first column as logging.name (wandb logging); this creates the directory ./models/logging.name -task_csv = os.path.join(config_dir, "demo_batch.csv") # csv for tasks - each pod is a task - -# number of VMs that are spun up (also the number of tasks that you are running) -# note that the server must be mounted locally as a network location to use this if the csv is on the cluster -pods = len(pd.read_csv(task_csv.replace("/home/runner/talmodata-smb", "/Volumes/talmodata"))) -par = min(int(1/float(gpu)), pods) #number of tasks that can be run in parallel (always smaller than pods) - -# enter your WANDB API KEY in the cmd section -# mount both smb and vast volumes -cmd = [ - "runai", - "submit", - "--gpu", - gpu, - "--name", - job_name, - "--preemptible", - "-i", - "asheridan/biogtr", - "-v", - "/data/talmolab-smb:/home/runner/talmodata-smb", - "-v", - "/talmo:/home/runner/vast" - "-e", - f"RUNNER_CMD=cp -r {dreem_repo} ~ && mamba env create -n dreem -f ~/dreem/environment.yml && export WANDB_API_KEY=6cc5012a6ecfb9cd970bd07686dbfcefd3190a04 && cd {base} && conda run -n dreem dreem-train --config-dir={config_dir} --config-name={config_name} ++params_config={params_cfg} ++batch_config={task_csv}", - "--parallelism", - str(par), - "--completions", - str(pods), -] - -print(f"base directory: {base}") -print(f"running with {pods} pods") -print(f"max pods that can run concurrently: {par}") -print(f"runner command: {cmd}") - -sp.run(cmd) \ No newline at end of file From 287c475dd3579d2597031c9655aadacfae6a39e0 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:11:04 -0700 Subject: [PATCH 13/63] Update base.yaml --- dreem/training/configs/base.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dreem/training/configs/base.yaml b/dreem/training/configs/base.yaml index 6a507cc..a93c21d 100644 --- a/dreem/training/configs/base.yaml +++ b/dreem/training/configs/base.yaml @@ -73,6 +73,7 @@ dataset: crop_size: 128 chunk: true clip_length: 32 + mode: "train" val_dataset: slp_files: ["../../tests/data/sleap/two_flies.slp"] @@ -81,6 +82,7 @@ dataset: crop_size: 128 chunk: True clip_length: 32 + mode: "val" test_dataset: slp_files: ["../../tests/data/sleap/two_flies.slp"] @@ -89,6 +91,7 @@ dataset: crop_size: 128 chunk: True clip_length: 32 + mode: "test" dataloader: train_dataloader: @@ -131,6 +134,7 @@ checkpointing: every_n_epochs: 10 trainer: + # only use mps and devices params for apple silicon runs accelerator: "mps" devices: 1 check_val_every_n_epoch: 1 From 5d4bf5efa87db088900c5a9a70f07f04c7ed10b0 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:11:44 -0700 Subject: [PATCH 14/63] Update run_trainer.py --- run_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_trainer.py b/run_trainer.py index 5b129ab..c538cc3 100644 --- a/run_trainer.py +++ b/run_trainer.py @@ -4,7 +4,7 @@ os.chdir("./dreem/training") base_config = "./configs/base.yaml" -# params_config = "/path/to/override.yaml" +# params_config = "./configs/override.yaml" cfg = OmegaConf.load(base_config) # cfg["params_config"] = params_config From f23ef5c90e33fc0e84747336dea38e04013297c8 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:11:56 -0700 Subject: [PATCH 15/63] Update .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index fb6ee36..4e1fa69 100644 --- a/.gitignore +++ b/.gitignore @@ -142,3 +142,4 @@ dreem/training/models/* # docs site/ +*.xml From 785df8f251abbfb355efc66750a7b671a99172f1 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 14:43:36 -0700 Subject: [PATCH 16/63] comments for tracker.py --- dreem/inference/tracker.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/dreem/inference/tracker.py b/dreem/inference/tracker.py index f7c29b4..8426e84 100644 --- a/dreem/inference/tracker.py +++ b/dreem/inference/tracker.py @@ -138,8 +138,10 @@ def track( # asso_preds, pred_boxes, pred_time, embeddings = self.model( # instances, reid_features # ) + # get reference and query instances from TrackQueue and calls _run_global_tracker() instances_pred = self.sliding_inference(model, frames) + # e.g. during train/val, don't track across batches so persistent_tracking is switched off if not self.persistent_tracking: logger.debug(f"Clearing Queue after tracking") self.track_queue.end_tracks() @@ -164,7 +166,9 @@ def sliding_inference( # H: height. # W: width. + # frames is untracked clip for inference for batch_idx, frame_to_track in enumerate(frames): + # tracked_frames is a list of reference frames that have been tracked (associated) tracked_frames = self.track_queue.collate_tracks( device=frame_to_track.frame_id.device ) @@ -188,10 +192,11 @@ def sliding_inference( ) curr_track_id = 0 + # if track ids exist from another tracking program i.e. sleap, init with those for i, instance in enumerate(frames[batch_idx].instances): instance.pred_track_id = instance.gt_track_id curr_track_id = max(curr_track_id, instance.pred_track_id) - + # if no track ids, then assign new ones for i, instance in enumerate(frames[batch_idx].instances): if instance.pred_track_id == -1: curr_track += 1 @@ -201,6 +206,7 @@ def sliding_inference( if ( frame_to_track.has_instances() ): # Check if there are detections. If there are skip and increment gap count + # combine the tracked frames with the latest frame; inference pipeline uses latest frame as pred frames_to_track = tracked_frames + [ frame_to_track ] # better var name? @@ -217,7 +223,7 @@ def sliding_inference( self.track_queue.add_frame(frame_to_track) else: self.track_queue.increment_gaps([]) - + # update the frame object from the input inference untracked clip frames[batch_idx] = frame_to_track return frames @@ -252,7 +258,7 @@ def _run_global_tracker( # E.g.: instances_per_frame: [4, 5, 6, 7]; window of length 4 with 4 detected instances in the first frame of the window. _ = model.eval() - + # get the last frame in the clip to perform inference on query_frame = frames[query_ind] query_instances = query_frame.instances @@ -279,8 +285,10 @@ def _run_global_tracker( # (L=1, n_query, total_instances) with torch.no_grad(): + # GTR knows this is for inference since query_instances is not None asso_matrix = model(all_instances, query_instances) + # GTR output is n_query x n_instances - split this into per-frame to softmax each frame separately asso_output = asso_matrix[-1].matrix.split( instances_per_frame, dim=1 ) # (window_size, n_query, N_i) @@ -296,7 +304,7 @@ def _run_global_tracker( asso_output_df.index.name = "Instances" asso_output_df.columns.name = "Instances" - + # save the association matrix to the Frame object query_frame.add_traj_score("asso_output", asso_output_df) query_frame.asso_output = asso_matrix[-1] @@ -374,7 +382,7 @@ def _run_global_tracker( query_frame.add_traj_score("decay_time", decay_time_traj_score) ################################################################################ - + # reduce association matrix - aggregating reference instance association scores by tracks # (n_query x n_nonquery) x (n_nonquery x n_traj) --> n_query x n_traj traj_score = torch.mm(traj_score, id_inds.cpu()) # (n_query, n_traj) @@ -387,6 +395,7 @@ def _run_global_tracker( query_frame.add_traj_score("traj_score", traj_score_df) ################################################################################ + # IOU-based post-processing; add a weighted IOU across successive frames to association scores # with iou -> combining with location in tracker, they set to True # todo -> should also work without pos_embed @@ -421,6 +430,7 @@ def _run_global_tracker( query_frame.add_traj_score("weight_iou", iou_traj_score) ################################################################################ + # filters association matrix such that instances too far from each other get scores=0 # threshold for continuing a tracking or starting a new track -> they use 1.0 # todo -> should also work without pos_embed @@ -439,6 +449,7 @@ def _run_global_tracker( query_frame.add_traj_score("max_center_dist", max_center_dist_traj_score) ################################################################################ + # softmax along tracks for each instance, for interpretability scaled_traj_score = torch.softmax(traj_score, dim=1) scaled_traj_score_df = pd.DataFrame( scaled_traj_score.numpy(), columns=unique_ids.cpu().numpy() @@ -449,8 +460,10 @@ def _run_global_tracker( query_frame.add_traj_score("scaled", scaled_traj_score_df) ################################################################################ + # hungarian matching match_i, match_j = linear_sum_assignment((-traj_score)) + track_ids = instance_ids.new_full((n_query,), -1) for i, j in zip(match_i, match_j): # The overlap threshold is multiplied by the number of times the unique track j is matched to an @@ -462,6 +475,7 @@ def _run_global_tracker( thresh = ( overlap_thresh * id_inds[:, j].sum() if mult_thresh else overlap_thresh ) + # if the association score for a query instance is lower than the threshold, create a new track for it if n_traj >= self.max_tracks or traj_score[i, j] > thresh: logger.debug( f"Assigning instance {i} to track {j} with id {unique_ids[j]}" From 3d3f2ca2538de270a4e2bb07beffaa8d66687c91 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 18:48:03 -0700 Subject: [PATCH 17/63] embedding bug fixes for encoder - bounding box embedding only for method "average" - modify emb_funcs routing - temporarily remove support for adding embeddings into instance objects - need to make compatible with x,y,t embeddings - remove config yamls from updates - current versions serve as templates - runs through to end of encoder forward pass --- .gitignore | 4 ++++ dreem/models/embedding.py | 42 ++++++++++++++++++++++++++++++++++--- dreem/models/transformer.py | 37 +++++++++++++++++++------------- 3 files changed, 65 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index 4e1fa69..3af1399 100644 --- a/.gitignore +++ b/.gitignore @@ -143,3 +143,7 @@ dreem/training/models/* # docs site/ *.xml +dreem/training/configs/base.yaml +dreem/training/configs/override.yaml +dreem/training/configs/override.yaml +dreem/training/configs/base.yaml diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 186ecab..994258f 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -146,6 +146,7 @@ def __init__( normalize: bool = False, scale: float | None = None, mlp_cfg: dict | None = None, + embedding_agg_method: str = "average" ): """Initialize embeddings. @@ -164,12 +165,14 @@ def __init__( mlp_cfg: A dictionary of mlp hyperparameters for projecting embedding to correct space. Example: {"hidden_dims": 256, "num_layers":3, "dropout": 0.3} """ + self._check_init_args(emb_type, mode) super().__init__() self.emb_type = emb_type self.mode = mode + self.embedding_agg_method = embedding_agg_method self.features = features self.emb_num = emb_num self.over_boxes = over_boxes @@ -216,12 +219,15 @@ def __init__( elif self.mode == "fixed": if self.emb_type == "pos": - self._emb_func = self._sine_box_embedding + if self.embedding_agg_method == "average": + self._emb_func = self._sine_box_embedding + else: + self._emb_func = self._sine_pos_embedding elif self.emb_type == "temp": self._emb_func = self._sine_temp_embedding elif self.mode == "rope": - # TODO: pos/temp uses the same processing but takes the input differently + # pos/temp embeddings processed the same way with different embedding array inputs self._emb_func = self._rope_embedding @@ -363,7 +369,37 @@ def _rope_embedding(self, x: torch.Tensor) -> torch.Tensor: return rot_mat - + + def _sine_pos_embedding(self, centroids: torch.Tensor) -> torch.Tensor: + """Compute fixed sine temporal embeddings per dimension (x,y) + + Args: + centroids: the input centroids for either the x,y dimension represented + by fraction of distance of original image that the instance centroid lies at; + of shape (N,) or (N,1) where N = # of query tokens (i.e. instances) + values between [0,1] + + Returns: + an n_instances x D embedding representing the temporal embedding. + """ + d = self.features + n = self.temperature + + positions = centroids.unsqueeze(1) + temp_lookup = torch.zeros(len(centroids), d, device=centroids.device) + + denominators = torch.pow( + n, 2 * torch.arange(0, d // 2, device=centroids.device) / d + ) # 10000^(2i/d_model), i is the index of embedding + temp_lookup[:, 0::2] = torch.sin( + positions / denominators + ) # sin(pos/10000^(2i/d_model)) + temp_lookup[:, 1::2] = torch.cos( + positions / denominators + ) # cos(pos/10000^(2i/d_model)) + + return temp_lookup # .view(len(times), self.features) + def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor: """Compute sine positional embeddings for boxes using given parameters. diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index c40cc33..6ff0eee 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -85,13 +85,17 @@ def __init__( pos_emb_cfg = self.embedding_meta["pos"] if pos_emb_cfg: self.pos_emb = Embedding( - emb_type="pos", features=self.d_model, **pos_emb_cfg - ) + emb_type="pos", features=self.d_model, + embedding_agg_method=self.embedding_meta["embedding_agg_method"], + **pos_emb_cfg + ) # agg method must be the same for pos and temp embeddings if "temp" in self.embedding_meta: temp_emb_cfg = self.embedding_meta["temp"] if temp_emb_cfg: self.temp_emb = Embedding( - emb_type="temp", features=self.d_model, **temp_emb_cfg + emb_type="temp", features=self.d_model, + embedding_agg_method=self.embedding_meta["embedding_agg_method"], + **temp_emb_cfg ) # Transformer Encoder @@ -178,7 +182,8 @@ def forward( encoder_queries = ref_features - encoder_features, ref_pos_emb, ref_temp_emb = self.encoder( + # (encoder_features, ref_pos_emb, ref_temp_emb) \ + encoder_features = self.encoder( encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, ref_boxes=ref_boxes, @@ -187,10 +192,11 @@ def forward( ) # (total_instances, batch_size, embed_dim) # TODO: check if instance.add_embedding() supports rotation matrices - if self.return_embedding: - for i, instance in enumerate(ref_instances): - instance.add_embedding("pos", ref_pos_emb[i]) - instance.add_embedding("temp", ref_temp_emb[i]) + # TODO: include support for adding x,y,t embeddings to the instance + # if self.return_embedding: + # for i, instance in enumerate(ref_instances): + # instance.add_embedding("pos", ref_pos_emb[i]) + # instance.add_embedding("temp", ref_temp_emb[i]) # -------------- Begin decoder pre-processing --------------- # @@ -225,10 +231,11 @@ def forward( else: query_instances = ref_instances - if self.return_embedding: - for i, instance in enumerate(query_instances): - instance.add_embedding("pos", query_pos_emb[i]) - instance.add_embedding("temp", query_temp_emb[i]) + # TODO: include support for x,y,t embeddings and uncomment this + # if self.return_embedding: + # for i, instance in enumerate(query_instances): + # instance.add_embedding("pos", query_pos_emb[i]) + # instance.add_embedding("temp", query_temp_emb[i]) decoder_features = self.decoder( query_features, @@ -481,7 +488,7 @@ def forward( queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) # calculate temporal embeddings and transform queries queries_t, ref_temp_emb = temp_emb(queries, ref_times) - # if avg. of temp and pos, need bounding boxes + # if avg. of temp and pos, need bounding boxes; bb only used for method "average" if embedding_agg_method == "average": _, ref_pos_emb = pos_emb(queries, ref_boxes) ref_emb = (ref_pos_emb + ref_temp_emb) / 2 @@ -495,7 +502,7 @@ def forward( # concatenate or stack the queries (avg. method done above since it applies differently) queries = self.collate_queries( - (queries, queries_t, queries_x, queries_y), + (queries_t, queries_x, queries_y), embedding_agg_method) # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) @@ -504,7 +511,7 @@ def forward( encoder_features = self.norm(queries) - return encoder_features, ref_pos_emb, ref_temp_emb + return encoder_features# , ref_pos_emb, ref_temp_emb def collate_queries(self, queries: Tuple[torch.Tensor], embedding_agg_method: str From 6af9e17d3c2386b07e32ed89e0c2dd2602f4affd Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Thu, 8 Aug 2024 17:58:50 -0700 Subject: [PATCH 18/63] implement rope for decoder - implement embeddings for decoder + refactor - add 1x1 conv to final attn head to deal with stacked embeddings (3x tokens) and create channels for each dim - bug fix in rope rotation matrix product with input data --- dreem/models/attention_head.py | 21 ++- dreem/models/embedding.py | 14 +- dreem/models/transformer.py | 257 ++++++++++++++++----------------- run_trainer.py | 3 +- tests/test_models.py | 3 +- 5 files changed, 155 insertions(+), 143 deletions(-) diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 2b16055..3dde1f5 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -14,6 +14,7 @@ def __init__( feature_dim: int, num_layers: int, dropout: float, + embedding_agg_method: str ): """Initialize an instance of ATTWeightHead. @@ -21,11 +22,20 @@ def __init__( feature_dim: The dimensionality of input features. num_layers: The number of hidden layers in the MLP. dropout: Dropout probability. + embedding_agg_method: how the embeddings are aggregated; average/stack/concatenate """ super().__init__() - - self.q_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) - self.k_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) + self.embedding_agg_method = embedding_agg_method + + # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels + if self.embedding_agg_method == "stack": + self.conv_1x1 = torch.nn.Conv2d(in_channels=3,out_channels=1, + kernel_size=1,stride=1,padding=0) + self.q_proj = self.conv_1x1 + self.k_proj = self.conv_1x1 + else: + self.q_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) + self.k_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) def forward( self, @@ -41,6 +51,11 @@ def forward( Returns: Output tensor of shape (batch_size, num_frame_instances, num_window_instances). """ + # if stacked embeddings, create channels for each x,y,t embedding dimension + if self.embedding_agg_method == "stack": + key = + query = + k = self.k_proj(key) q = self.q_proj(query) attn_weights = torch.bmm(q, k.transpose(1, 2)) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 994258f..c9f4cfc 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -274,16 +274,16 @@ def _apply_rope(self, x, emb): Returns: Tensor of input queries transformed by RoPE """ - x_out = torch.unsqueeze(x, 2) + xout = torch.unsqueeze(x, 2) # input needs shape [batch_size, n_query, num_heads, embed_dim // 2, 2] - x_out = x_out.float().reshape(*x_out.shape[:-1], -1, 2) + xout = xout.float().reshape(*xout.shape[:-1], -1, 2) # apply RoPE to each query token - x_out = torch.stack( + xout = torch.stack( [ - x[..., 0] * emb[..., 0] - - x[..., 1] * emb[..., 1], - x[..., 1] * emb[..., 0] - + x[..., 0] * emb[..., 1], + xout[..., 0] * emb[..., 0] + - xout[..., 1] * emb[..., 1], + xout[..., 1] * emb[..., 0] + + xout[..., 0] * emb[..., 1], ], -1, ) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 6ff0eee..33c904b 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -184,12 +184,11 @@ def forward( # (encoder_features, ref_pos_emb, ref_temp_emb) \ encoder_features = self.encoder( - encoder_queries, - embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, - ref_boxes=ref_boxes, - ref_times=ref_times, + encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + boxes=ref_boxes, times=ref_times, embedding_agg_method=self.embedding_meta["embedding_agg_method"] - ) # (total_instances, batch_size, embed_dim) + ) # (total_instances, batch_size, embed_dim) or + # (3*total_instances,batch_size,embed_dim) if using stacked embeddings # TODO: check if instance.add_embedding() supports rotation matrices # TODO: include support for adding x,y,t embeddings to the instance @@ -198,18 +197,11 @@ def forward( # instance.add_embedding("pos", ref_pos_emb[i]) # instance.add_embedding("temp", ref_temp_emb[i]) - # -------------- Begin decoder pre-processing --------------- # - - n_query = total_instances - - query_features = ref_features - query_pos_emb = ref_pos_emb - query_temp_emb = ref_temp_emb - query_emb = ref_emb + # -------------- Begin decoder --------------- # + # for inference, query_instances is not None if query_instances is not None: n_query = len(query_instances) - query_features = torch.cat( [instance.features for instance in query_instances], dim=0 ).unsqueeze(0) @@ -218,18 +210,15 @@ def forward( 1, 0, 2 ) # (n_query, batch_size, embed_dim) + # just get boxes, we already have query_times from above query_boxes = get_boxes(query_instances) query_boxes = torch.nan_to_num(query_boxes, -1.0) - query_temp_emb = self.temp_emb(query_times) - - query_pos_emb = self.pos_emb(query_boxes) - - query_emb = (query_pos_emb + query_temp_emb) / 2.0 - query_emb = query_emb.view(1, n_query, embed_dim) - query_emb = query_emb.permute(1, 0, 2) # (n_query, batch_size, embed_dim) - - else: + else: # for training, query_instances is None so just pass in the ref data + n_query = total_instances query_instances = ref_instances + query_features = ref_features + query_boxes = ref_boxes + query_times = ref_times # TODO: include support for x,y,t embeddings and uncomment this # if self.return_embedding: @@ -238,25 +227,28 @@ def forward( # instance.add_embedding("temp", query_temp_emb[i]) decoder_features = self.decoder( - query_features, - encoder_features, - ref_pos_emb=ref_emb, - query_pos_emb=query_emb, + query_features, encoder_features, + embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + boxes=query_boxes, times=query_times, + embedding_agg_method=self.embedding_meta["embedding_agg_method"] ) # (L, n_query, batch_size, embed_dim) + decoder_features = decoder_features.transpose( 1, 2 - ) # # (L, batch_size, n_query, embed_dim) - encoder_features = encoder_features.permute(1, 0, 2).view( - batch_size, total_instances, embed_dim - ) # (batch_size, total_instances, embed_dim) + ) # # (L, batch_size, n_query, embed_dim) or ((L, batch_size, 3*n_query, embed_dim)) if using stacked embeddings + encoder_features = encoder_features.permute(1, 0, 2) + # (batch_size, total_instances, embed_dim) or (batch_size, 3*total_instances, embed_dim) asso_output = [] for frame_features in decoder_features: - # TODO: this needs to handle the 3x queries that come out of the encoder/decoder + # TODO: attn_head handles the 3x queries that can come out of the encoder/decoder if using stacked embeddings; + # does this by altering the MLP dimensions prior to attention outer product + # n_query should be the number of instances in the last frame if running inference, + # or number of ref instances for training. total_instances is always the number of reference instances asso_matrix = self.attn_head(frame_features, encoder_features).view( n_query, total_instances - ) + ) # call to view() just removes the batch dimension; output of attn_head is (1,n_query,total_instances) asso_matrix = AssociationMatrix(asso_matrix, ref_instances, query_instances) asso_output.append(asso_matrix) @@ -313,13 +305,6 @@ def forward( Returns: The output tensor of shape (n_query, batch_size, embed_dim). """ - # TODO: delete this section; keep to check that pos_emb None is taken care of automatically by config -# if pos_emb is None: -# pos_emb = torch.zeros_like(queries) - -# queries = queries + pos_emb - - # q = k = src attn_features = self.self_attn( query=queries, @@ -386,8 +371,6 @@ def forward( self, decoder_queries: torch.Tensor, encoder_features: torch.Tensor, - ref_pos_emb: torch.Tensor | None = None, - query_pos_emb: torch.Tensor | None = None, ) -> torch.Tensor: """Execute forward pass of decoder layer. @@ -395,19 +378,10 @@ def forward( decoder_queries: Target sequence for decoder to generate (n_query, batch_size, embed_dim). encoder_features: Output from encoder, that decoder uses to attend to relevant parts of input sequence (total_instances, batch_size, embed_dim) - ref_pos_emb: The input positional embedding tensor of shape (n_query, embed_dim). - query_pos_emb: The target positional embedding of shape (n_query, embed_dim) Returns: The output tensor of shape (n_query, batch_size, embed_dim). """ - if query_pos_emb is None: - query_pos_emb = torch.zeros_like(decoder_queries) - if ref_pos_emb is None: - ref_pos_emb = torch.zeros_like(encoder_features) - - decoder_queries = decoder_queries + query_pos_emb - encoder_features = encoder_features + ref_pos_emb if self.decoder_self_attn: self_attn_features = self.self_attn( @@ -416,6 +390,7 @@ def forward( decoder_queries = decoder_queries + self.dropout1(self_attn_features) decoder_queries = self.norm1(decoder_queries) + # cross attention x_attn_features = self.multihead_attn( query=decoder_queries, # (n_query, batch_size, embed_dim) key=encoder_features, # (total_instances, batch_size, embed_dim) @@ -465,7 +440,7 @@ def __init__( def forward( self, queries: torch.Tensor, embedding_map: Dict[str, Embedding], - ref_boxes: torch.Tensor, ref_times: torch.Tensor, + boxes: torch.Tensor, times: torch.Tensor, embedding_agg_method: str ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Execute a forward pass of encoder layer. Computes and applies embeddings before input to EncoderLayer @@ -474,8 +449,8 @@ def forward( queries: The input tensor of shape (n_query, batch_size, embed_dim). embedding_map: Dict of Embedding objects defining the pos/temp embeddings to be applied to the input data before it passes to the EncoderLayer - ref_boxes: Bounding box based embedding ids of shape (n_query, batch_size, 4) - ref_times: + boxes: Bounding box based embedding ids of shape (n_query, batch_size, 4) + times: embedding_agg_method: Returns: @@ -483,29 +458,8 @@ def forward( """ for layer in self.layers: - pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] - # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings - queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) - # calculate temporal embeddings and transform queries - queries_t, ref_temp_emb = temp_emb(queries, ref_times) - # if avg. of temp and pos, need bounding boxes; bb only used for method "average" - if embedding_agg_method == "average": - _, ref_pos_emb = pos_emb(queries, ref_boxes) - ref_emb = (ref_pos_emb + ref_temp_emb) / 2 - queries = queries + ref_emb - else: - # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) - ref_x, ref_y = self._spatial_emb_from_bb(ref_boxes) - # forward pass of Embedding object transforms input queries with embeddings - queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) - queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) - - # concatenate or stack the queries (avg. method done above since it applies differently) - queries = self.collate_queries( - (queries_t, queries_x, queries_y), - embedding_agg_method) - # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) - queries = queries.permute(1, 0, 2) + # compute embeddings and apply to the input queries + queries = apply_embeddings(queries, embedding_map, boxes, times, embedding_agg_method) # pass through EncoderLayer queries = layer(queries) @@ -514,51 +468,6 @@ def forward( return encoder_features# , ref_pos_emb, ref_temp_emb - def collate_queries(self, queries: Tuple[torch.Tensor], embedding_agg_method: str - ) -> torch.Tensor: - """ - - Args: - _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t - each of shape (batch_size, n_query, embed_dim) - embedding_agg_method: String representing the aggregation method for embeddings - - Returns: Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), - stacked (increased number of tokens), or averaged (original token number and length) - """ - - queries_t, queries_x, queries_y = queries - - mlp = MLP(input_dim=queries_t.shape[-1]*3, hidden_dim=queries_t.shape[-1]*2, - output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) - - if embedding_agg_method == "stack": - # stacked is of shape (batch_size, 3*n_query, embed_dim) - collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) - elif embedding_agg_method == "concatenate": - # concatenated is of shape (batch_size, n_query, 3*embed_dim) - collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) - # pass through MLP to project into space of (batch_size, n_query, embed_dim) - collated_queries = mlp(collated_queries) - - return collated_queries - - - def _spatial_emb_from_bb(self, bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes - Args: - bb: Bounding boxes of shape (n_query, batch_size, 4) from which to compute x,y centroids; - each bounding box is [ymin, xmin, ymax, xmax] - - Returns: - A tuple of tensors containing the emebdding array for x,y dimensions, each of shape (n_query,) - """ - # compute avg of xmin,xmax and ymin,ymax - return bb[:,:,[1,3]].mean(axis=2).squeeze(), bb[:,:,[0,2]].mean(axis=2).squeeze() - - - class TransformerDecoder(nn.Module): """Transformer Decoder Block composed of Transformer Decoder Layers.""" @@ -587,8 +496,9 @@ def forward( self, decoder_queries: torch.Tensor, encoder_features: torch.Tensor, - ref_pos_emb: torch.Tensor | None = None, - query_pos_emb: torch.Tensor | None = None, + embedding_map: Dict[str, Embedding], + boxes: torch.Tensor, times: torch.Tensor, + embedding_agg_method: str ) -> torch.Tensor: """Execute a forward pass of the decoder block. @@ -596,22 +506,28 @@ def forward( decoder_queries: Query sequence for decoder to generate (n_query, batch_size, embed_dim). encoder_features: Output from encoder, that decoder uses to attend to relevant parts of input sequence (total_instances, batch_size, embed_dim) - ref_pos_emb: The input positional embedding tensor of shape (total_instances, batch_size, embed_dim). - query_pos_emb: The query positional embedding of shape (n_query, batch_size, embed_dim) + Returns: The output tensor of shape (L, n_query, batch_size, embed_dim). """ decoder_features = decoder_queries - intermediate = [] + # since the encoder output doesn't change for any number of decoder layer inputs, + # we can process its embedding outside the loop + if embedding_agg_method == "average": + encoder_features = apply_embeddings(encoder_features, embedding_map, + boxes, times, embedding_agg_method) + # TODO: ^ should embeddings really be applied to encoder output again before cross attention? + # switched off for stack and concatenate methods as those further split the tokens. Kept for "average" + # for backward compatibility + for layer in self.layers: + decoder_features = apply_embeddings(decoder_features, embedding_map, + boxes, times, embedding_agg_method) decoder_features = layer( - decoder_features, - encoder_features, - ref_pos_emb=ref_pos_emb, - query_pos_emb=query_pos_emb, + decoder_features, encoder_features ) if self.return_intermediate: intermediate.append(self.norm(decoder_features)) @@ -626,6 +542,40 @@ def forward( return decoder_features.unsqueeze(0) +def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], + boxes: torch.Tensor, times: torch.Tensor, + embedding_agg_method: str): + """ + Enter docstring here + """ + + pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] + # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings + queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) + # calculate temporal embeddings and transform queries + queries_t, ref_temp_emb = temp_emb(queries, times) + # if avg. of temp and pos, need bounding boxes; bb only used for method "average" + if embedding_agg_method == "average": + _, ref_pos_emb = pos_emb(queries, boxes) + ref_emb = (ref_pos_emb + ref_temp_emb) / 2 + queries = queries + ref_emb + else: + # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) + ref_x, ref_y = spatial_emb_from_bb(boxes) + # forward pass of Embedding object transforms input queries with embeddings + queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) + queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) + + # concatenate or stack the queries (avg. method done above since it applies differently) + queries = collate_queries( + (queries_t, queries_x, queries_y), + embedding_agg_method) + # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) + queries = queries.permute(1, 0, 2) + + return queries + + def _get_clones(module: nn.Module, N: int) -> nn.ModuleList: """Generate repeated clones of same layer type. @@ -656,3 +606,48 @@ def _get_activation_fn(activation: str) -> callable: return F.glu raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") + +def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str + ) -> torch.Tensor: + """ + + Args: + _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t + each of shape (batch_size, n_query, embed_dim) + embedding_agg_method: String representing the aggregation method for embeddings + + Returns: Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), + stacked (increased number of tokens), or averaged (original token number and length) + """ + + queries_t, queries_x, queries_y = queries + + mlp = MLP(input_dim=queries_t.shape[-1]*3, hidden_dim=queries_t.shape[-1]*2, + output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) + + if embedding_agg_method == "stack": + # TODO: try changing order of stacking so that order is by query token (x1,y1,t1),(x2,y2,t2) rather than + # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) + # stacked is of shape (batch_size, 3*n_query, embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) + elif embedding_agg_method == "concatenate": + # concatenated is of shape (batch_size, n_query, 3*embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) + # pass through MLP to project into space of (batch_size, n_query, embed_dim) + collated_queries = mlp(collated_queries) + + return collated_queries + + +def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes + Args: + bb: Bounding boxes of shape (n_query, batch_size, 4) from which to compute x,y centroids; + each bounding box is [ymin, xmin, ymax, xmax] + + Returns: + A tuple of tensors containing the emebdding array for x,y dimensions, each of shape (n_query,) + """ + # compute avg of xmin,xmax and ymin,ymax + return bb[:,:,[1,3]].mean(axis=2).squeeze(), bb[:,:,[0,2]].mean(axis=2).squeeze() \ No newline at end of file diff --git a/run_trainer.py b/run_trainer.py index c538cc3..684a727 100644 --- a/run_trainer.py +++ b/run_trainer.py @@ -2,7 +2,8 @@ from omegaconf import OmegaConf import os -os.chdir("./dreem/training") +os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") + base_config = "./configs/base.yaml" # params_config = "./configs/override.yaml" diff --git a/tests/test_models.py b/tests/test_models.py index 3eaf9c2..187ea21 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -166,7 +166,8 @@ def test_embedding_validity(): _ = Embedding(emb_type="pos", mode="learned", features=128) - +# TODO: create test_embedding_rope and test the xshaped vs xout in the apply_rope function; +# how did the shapes match if i was using x vs xshaped? def test_embedding_basic(): """Test embedding logic.""" From 69280785f6703183760a372e723e802ffaeaa6bd Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Thu, 8 Aug 2024 20:01:04 -0700 Subject: [PATCH 19/63] final attn head supports stack embeddings - 1x1 conv for stack embedding - stack into 3 channels for x,y,t --- dreem/models/attention_head.py | 34 +++++++++++++++++++++++++--------- dreem/models/transformer.py | 4 ++-- run_trainer.py | 4 +++- 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 3dde1f5..559292c 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -27,12 +27,15 @@ def __init__( super().__init__() self.embedding_agg_method = embedding_agg_method - # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels + # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels + # ensures output represents ref instances by query instances if self.embedding_agg_method == "stack": - self.conv_1x1 = torch.nn.Conv2d(in_channels=3,out_channels=1, - kernel_size=1,stride=1,padding=0) - self.q_proj = self.conv_1x1 - self.k_proj = self.conv_1x1 + self.q_proj = torch.nn.Conv1d(in_channels=3, out_channels=1, + kernel_size=1, stride=1, padding=0 + ) + self.k_proj = torch.nn.Conv1d(in_channels=3, out_channels=1, + kernel_size=1, stride=1, padding=0 + ) else: self.q_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) self.k_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) @@ -51,13 +54,26 @@ def forward( Returns: Output tensor of shape (batch_size, num_frame_instances, num_window_instances). """ + batch_size, num_query_instances, feature_dim = query.size() + num_window_instances = key.shape[1] + # if stacked embeddings, create channels for each x,y,t embedding dimension + # maps shape (1,192,1024) -> (1,64,3,1024) if self.embedding_agg_method == "stack": - key = - query = + key = key.view( + batch_size, 3, num_window_instances//3, feature_dim + ).permute(0, 2, 1, 3).squeeze(0) + query = query.view( + batch_size, 3, num_query_instances//3, feature_dim + ).permute(0, 2, 1, 3).squeeze(0) + # key, query of shape (batch_size, num_instances, 3, feature_dim) + k = self.k_proj(key).transpose(1, 0) + q = self.q_proj(query).transpose(1, 0) + # k,q of shape (batch_size, num_instances, feature_dim) + else: + k = self.k_proj(key) + q = self.q_proj(query) - k = self.k_proj(key) - q = self.q_proj(query) attn_weights = torch.bmm(q, k.transpose(1, 2)) return attn_weights # (B, N_t, N) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 33c904b..adfb371 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -131,6 +131,7 @@ def __init__( feature_dim=feature_dim_attn_head, num_layers=num_layers_attn_head, dropout=dropout_attn_head, + embedding_agg_method=self.embedding_meta["embedding_agg_method"] ) self._reset_parameters() @@ -242,8 +243,7 @@ def forward( asso_output = [] for frame_features in decoder_features: - # TODO: attn_head handles the 3x queries that can come out of the encoder/decoder if using stacked embeddings; - # does this by altering the MLP dimensions prior to attention outer product + # attn_head handles the 3x queries that can come out of the encoder/decoder if using stacked embeddings # n_query should be the number of instances in the last frame if running inference, # or number of ref instances for training. total_instances is always the number of reference instances asso_matrix = self.attn_head(frame_features, encoder_features).view( diff --git a/run_trainer.py b/run_trainer.py index 684a727..fcf38ff 100644 --- a/run_trainer.py +++ b/run_trainer.py @@ -2,7 +2,9 @@ from omegaconf import OmegaConf import os -os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") +# /Users/mustafashaikh/dreem/dreem/training +# /Users/main/Documents/GitHub/dreem/dreem/training +os.chdir("/Users/mustafashaikh/dreem/dreem/training") base_config = "./configs/base.yaml" # params_config = "./configs/override.yaml" From c4b11241a771af13c826cfb06e814ea6e160802a Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Fri, 9 Aug 2024 18:29:13 -0700 Subject: [PATCH 20/63] Update tests, add new unit tests for rope - add unit tests for rope - Update existing tests to use new args/return params related to tfmr - Modify test to remove return_embedding=True support - need to address this --- dreem/models/attention_head.py | 2 +- dreem/models/embedding.py | 70 +++++------ dreem/models/transformer.py | 44 +++---- tests/test_models.py | 204 ++++++++++++++++++++++----------- 4 files changed, 201 insertions(+), 119 deletions(-) diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 559292c..537cee1 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -14,7 +14,7 @@ def __init__( feature_dim: int, num_layers: int, dropout: float, - embedding_agg_method: str + embedding_agg_method: str = "average" ): """Initialize an instance of ATTWeightHead. diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index c9f4cfc..b94a159 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -72,7 +72,7 @@ def build_rope_cache(self, max_seq_len: int = 4096) -> None: cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) self.register_buffer("cache", cache, persistent=False) - def forward(self, x: Tensor, *, input_pos: Optional[Tensor] = None) -> Tensor: + def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: """ Args: x (Tensor): input tensor with shape @@ -131,6 +131,7 @@ class Embedding(torch.nn.Module): EMB_MODES = { "fixed": {"temperature", "scale", "normalize"}, "learned": {"emb_num"}, + "rope": {"embedding_agg_method"}, "off": {}, } # dict of valid args:keyword params @@ -166,8 +167,6 @@ def __init__( Example: {"hidden_dims": 256, "num_layers":3, "dropout": 0.3} """ - self._check_init_args(emb_type, mode) - super().__init__() self.emb_type = emb_type @@ -181,6 +180,8 @@ def __init__( self.scale = scale self.n_points = n_points + self._check_init_args(emb_type, mode) + if self.normalize and self.scale is None: self.scale = 2 * math.pi @@ -201,8 +202,8 @@ def __init__( else: self.mlp = torch.nn.Identity() - self._emb_func = lambda tensor: torch.zeros( - (tensor.shape[0], self.features), dtype=tensor.dtype, device=tensor.device + self._emb_func = lambda seq, x: torch.zeros( + (seq.shape[0], self.features), dtype=seq.dtype, device=seq.device ) # turn off embedding by returning zeros self.lookup = None @@ -254,10 +255,15 @@ def _check_init_args(self, emb_type: str, mode: str): f"Embedding `mode` must be one of {self.EMB_MODES} not {mode}" ) + if mode.lower() == "rope" and self.embedding_agg_method == "average": + raise ValueError( + f"Cannot use aggregation method 'average' for rope embedding; must use 'stack' or 'concatenate'" + ) + def _transform(self, x, emb): - if emb==self._rope_embedding: + if self._emb_func == self._rope_embedding: return self._apply_rope(x, emb) else: return self._apply_additive_embeddings(x, emb) @@ -274,6 +280,7 @@ def _apply_rope(self, x, emb): Returns: Tensor of input queries transformed by RoPE """ + xout = torch.unsqueeze(x, 2) # input needs shape [batch_size, n_query, num_heads, embed_dim // 2, 2] xout = xout.float().reshape(*xout.shape[:-1], -1, 2) @@ -288,9 +295,9 @@ def _apply_rope(self, x, emb): -1, ) # output has shape [batch_size, n_query, num_heads, embed_dim] - x_out = x_out.flatten(3) - - return x_out + xout = xout.flatten(3).squeeze(2) + + return xout def _apply_additive_embeddings(self, x, emb): @@ -320,22 +327,15 @@ def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: - Tensor: input queries transformed by embedding - An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. """ + # create embedding array; either rotation matrix of shape # (batch_size, n_query, num_heads, embed_dim // 2, 2), # or (N, embed_dim) array - emb = self._emb_func(seq_positions) - + emb = self._emb_func(seq_positions, x.size()) # transform the input data with the embedding - x = self._transform(x, emb) + xout = self._transform(x, emb) - # if emb.shape[-1] != self.features: - # raise RuntimeError( - # ( - # f"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \n" - # f"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions." - # ) - # ) - return x, emb + return xout, emb def _torch_int_div( self, tensor1: torch.Tensor, tensor2: torch.Tensor @@ -352,25 +352,29 @@ def _torch_int_div( return torch.div(tensor1, tensor2, rounding_mode="floor") - def _rope_embedding(self, x: torch.Tensor) -> torch.Tensor: + def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) -> torch.Tensor: """ Computes the rotation matrix to apply RoPE to input queries Args: - x: Input queries of shape (num_batches, n_queries, embed_dim) + seq_positions: Pos array of shape (embed_dim,) used to compute rotational embedding + input_shape: Shape of the input queries; needed for rope Returns: Tensor: (N, embed_dim) rotation matrix """ - # input must be of shape (num_batches, num_instances, num_attn_heads, embed_dim) + # create dummy input of shape (num_batches, num_instances, num_attn_heads, embed_dim) # use num_heads=1 for compatibility with torch ROPE - x_rope = torch.unsqueeze(x, 2) + x_rope = torch.rand(input_shape).unsqueeze(2) # RoPE module takes in dimension, num_queries as input to calculate rotation matrix - rope = RotaryPositionalEmbeddings(self.features, x.shape[1]) - rot_mat = rope(x_rope) - + rope = RotaryPositionalEmbeddings(self.features, input_shape[1]) + # convert seq_positions (indicates relative position in frame) to int + # to index into the theta array for rope + seq_pos = 100*seq_positions.unsqueeze(0) + rot_mat = rope(x_rope, seq_pos.int()) + return rot_mat - def _sine_pos_embedding(self, centroids: torch.Tensor) -> torch.Tensor: + def _sine_pos_embedding(self, centroids: torch.Tensor, *args) -> torch.Tensor: """Compute fixed sine temporal embeddings per dimension (x,y) Args: @@ -400,7 +404,7 @@ def _sine_pos_embedding(self, centroids: torch.Tensor) -> torch.Tensor: return temp_lookup # .view(len(times), self.features) - def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor: + def _sine_box_embedding(self, boxes: torch.Tensor, *args) -> torch.Tensor: """Compute sine positional embeddings for boxes using given parameters. Args: @@ -445,7 +449,7 @@ def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor: return pos_emb - def _sine_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: + def _sine_temp_embedding(self, times: torch.Tensor, *args) -> torch.Tensor: """Compute fixed sine temporal embeddings. Args: @@ -477,7 +481,7 @@ def _sine_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: temp_emb = temp_lookup[times.int()] return temp_emb # .view(len(times), self.features) - def _learned_pos_embedding(self, boxes: torch.Tensor) -> torch.Tensor: + def _learned_pos_embedding(self, boxes: torch.Tensor, *args) -> torch.Tensor: """Compute learned positional embeddings for boxes using given parameters. Args: @@ -537,7 +541,7 @@ def _learned_pos_embedding(self, boxes: torch.Tensor) -> torch.Tensor: return pos_emb.view(N, self.features) - def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: + def _learned_temp_embedding(self, times: torch.Tensor, *args) -> torch.Tensor: """Compute learned temporal embeddings for times using given parameters. Args: @@ -565,7 +569,7 @@ def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: return temp_emb.view(N, self.features) - def _compute_weights(self, data: torch.Tensor) -> tuple[torch.Tensor, ...]: + def _compute_weights(self, data: torch.Tensor, *args) -> tuple[torch.Tensor, ...]: """Compute left and right learned embedding weights. Args: diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index adfb371..fb8f424 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -79,6 +79,7 @@ def __init__( self.pos_emb = Embedding(emb_type="off", mode="off", features=self.d_model) self.temp_emb = Embedding(emb_type="off", mode="off", features=self.d_model) + self.embedding_agg_method = "average" # default arg in case it's not passed into configs if self.embedding_meta: if "pos" in self.embedding_meta: @@ -86,7 +87,7 @@ def __init__( if pos_emb_cfg: self.pos_emb = Embedding( emb_type="pos", features=self.d_model, - embedding_agg_method=self.embedding_meta["embedding_agg_method"], + embedding_agg_method=self.embedding_agg_method, **pos_emb_cfg ) # agg method must be the same for pos and temp embeddings if "temp" in self.embedding_meta: @@ -94,9 +95,11 @@ def __init__( if temp_emb_cfg: self.temp_emb = Embedding( emb_type="temp", features=self.d_model, - embedding_agg_method=self.embedding_meta["embedding_agg_method"], + embedding_agg_method=self.embedding_agg_method, **temp_emb_cfg ) + self.embedding_agg_method = embedding_meta["embedding_agg_method"] \ + if "embedding_agg_method" in embedding_meta else "average" # Transformer Encoder encoder_layer = TransformerEncoderLayer( @@ -131,7 +134,7 @@ def __init__( feature_dim=feature_dim_attn_head, num_layers=num_layers_attn_head, dropout=dropout_attn_head, - embedding_agg_method=self.embedding_meta["embedding_agg_method"] + embedding_agg_method=self.embedding_agg_method ) self._reset_parameters() @@ -187,11 +190,10 @@ def forward( encoder_features = self.encoder( encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, boxes=ref_boxes, times=ref_times, - embedding_agg_method=self.embedding_meta["embedding_agg_method"] + embedding_agg_method=self.embedding_agg_method ) # (total_instances, batch_size, embed_dim) or # (3*total_instances,batch_size,embed_dim) if using stacked embeddings - # TODO: check if instance.add_embedding() supports rotation matrices # TODO: include support for adding x,y,t embeddings to the instance # if self.return_embedding: # for i, instance in enumerate(ref_instances): @@ -221,19 +223,19 @@ def forward( query_boxes = ref_boxes query_times = ref_times - # TODO: include support for x,y,t embeddings and uncomment this - # if self.return_embedding: - # for i, instance in enumerate(query_instances): - # instance.add_embedding("pos", query_pos_emb[i]) - # instance.add_embedding("temp", query_temp_emb[i]) decoder_features = self.decoder( query_features, encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, boxes=query_boxes, times=query_times, - embedding_agg_method=self.embedding_meta["embedding_agg_method"] + embedding_agg_method=self.embedding_agg_method ) # (L, n_query, batch_size, embed_dim) + # TODO: include support for x,y,t embeddings and uncomment this + # if self.return_embedding: + # for i, instance in enumerate(query_instances): + # instance.add_embedding("pos", query_pos_emb[i]) + # instance.add_embedding("temp", query_temp_emb[i]) decoder_features = decoder_features.transpose( 1, 2 @@ -558,7 +560,8 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], if embedding_agg_method == "average": _, ref_pos_emb = pos_emb(queries, boxes) ref_emb = (ref_pos_emb + ref_temp_emb) / 2 - queries = queries + ref_emb + queries_avg = queries + ref_emb + queries_t = queries_x = queries_y = None else: # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) ref_x, ref_y = spatial_emb_from_bb(boxes) @@ -568,7 +571,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( - (queries_t, queries_x, queries_y), + (queries_avg, queries_t, queries_x, queries_y), embedding_agg_method) # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) @@ -612,7 +615,7 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str """ Args: - _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t + _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t each of shape (batch_size, n_query, embed_dim) embedding_agg_method: String representing the aggregation method for embeddings @@ -620,17 +623,18 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str stacked (increased number of tokens), or averaged (original token number and length) """ - queries_t, queries_x, queries_y = queries - - mlp = MLP(input_dim=queries_t.shape[-1]*3, hidden_dim=queries_t.shape[-1]*2, - output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) + queries_avg, queries_t, queries_x, queries_y = queries - if embedding_agg_method == "stack": + if embedding_agg_method == "average": + collated_queries = queries_avg + elif embedding_agg_method == "stack": # TODO: try changing order of stacking so that order is by query token (x1,y1,t1),(x2,y2,t2) rather than # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) # stacked is of shape (batch_size, 3*n_query, embed_dim) collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) elif embedding_agg_method == "concatenate": + mlp = MLP(input_dim=queries_t.shape[-1] * 3, hidden_dim=queries_t.shape[-1] * 2, + output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) # concatenated is of shape (batch_size, n_query, 3*embed_dim) collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) # pass through MLP to project into space of (batch_size, n_query, embed_dim) @@ -643,7 +647,7 @@ def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes Args: - bb: Bounding boxes of shape (n_query, batch_size, 4) from which to compute x,y centroids; + bb: Bounding boxes of shape (n_query, n_anchors, 4) from which to compute x,y centroids; each bounding box is [ymin, xmin, ymax, xmax] Returns: diff --git a/tests/test_models.py b/tests/test_models.py index 187ea21..51d3a72 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -14,6 +14,8 @@ from dreem.models.transformer import ( TransformerEncoderLayer, TransformerDecoderLayer, + spatial_emb_from_bb, + apply_embeddings ) @@ -33,7 +35,7 @@ def test_att_weight_head(): """Test self-attention head logic.""" b, n, f = 1, 10, 1024 # batch size, num instances, features - att_weight_head = ATTWeightHead(feature_dim=f, num_layers=2, dropout=0.1) + att_weight_head = ATTWeightHead(feature_dim=f, num_layers=2, dropout=0.1, embedding_agg_method="average") q = k = torch.rand(size=(b, n, f)) @@ -161,10 +163,77 @@ def test_embedding_validity(): with pytest.raises(Exception): _ = Embedding(emb_type="temporal", mode="learn", features=128) + with pytest.raises(Exception): + # embedding_agg_method cannot be average for rope + _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="average") + _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="stacked") + + _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="stack") + _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="concatenate") + + _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="average") + _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="stack") + _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="concatenate") + + _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="average") + _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="stack") + _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="concatenate") + _ = Embedding(emb_type="temp", mode="learned", features=128) _ = Embedding(emb_type="pos", mode="learned", features=128) - _ = Embedding(emb_type="pos", mode="learned", features=128) + +def test_rope_embedding(): + "Test RoPE embedding" + frames = 32 + objects = 10 + d_model = 256 + n_anchors = 1 + + N = frames * objects + + boxes = torch.rand(size=(N, n_anchors, 4)) + times = torch.rand(size=(N,)) + # input data of shape (batch_size, N, num_heads, embed_dim) + x = torch.rand(size=(1, N, d_model)) + + pos_emb = Embedding( + emb_type="pos", + mode="rope", + features=d_model, + embedding_agg_method="stack" + ) + temp_emb = Embedding( + emb_type="temp", + mode="rope", + features=d_model, + embedding_agg_method="stack" + ) + + ref_x, ref_y = spatial_emb_from_bb(boxes) + x_rope, rot_mat_x = pos_emb(x, ref_x) + y_rope, rot_mat_y = pos_emb(x, ref_y) + t_rope, ref_temp_emb = temp_emb(x, times) + + assert x_rope.size() == (1, N, d_model) + assert y_rope.size() == (1, N, d_model) + assert t_rope.size() == (1, N, d_model) + + assert not torch.equal(x, x_rope) + assert not torch.equal(x, y_rope) + assert not torch.equal(x, t_rope) + + assert not torch.equal(x_rope, y_rope) + assert not torch.equal(x_rope, t_rope) + assert not torch.equal(y_rope, t_rope) + + assert ref_x.size() == ref_y.size() + assert x_rope.size() == x.size() + assert y_rope.size() == x.size() + +def test_embedding_aggregation(): + """Test stack, concatenate agg methods""" + # TODO: create test_embedding_rope and test the xshaped vs xout in the apply_rope function; # how did the shapes match if i was using x vs xshaped? @@ -180,6 +249,8 @@ def test_embedding_basic(): boxes = torch.rand(size=(N, n_anchors, 4)) times = torch.rand(size=(N,)) + # input data of shape (batch_size, N, embed_dim) + x = torch.rand(size=(1, N, d_model)) pos_emb = Embedding( emb_type="pos", @@ -190,31 +261,31 @@ def test_embedding_basic(): scale=10, ) - sine_pos_emb = pos_emb(boxes) + _, sine_pos_emb = pos_emb(x, boxes) pos_emb = Embedding(emb_type="pos", mode="learned", features=d_model, emb_num=100) - learned_pos_emb = pos_emb(boxes) + _, learned_pos_emb = pos_emb(x, boxes) temp_emb = Embedding(emb_type="temp", mode="learned", features=d_model, emb_num=16) - learned_temp_emb = temp_emb(times) + _, learned_temp_emb = temp_emb(x, times) pos_emb_off = Embedding(emb_type="pos", mode="off", features=d_model) - off_pos_emb = pos_emb_off(boxes) + _, off_pos_emb = pos_emb_off(x, boxes) temp_emb_off = Embedding(emb_type="temp", mode="off", features=d_model) - off_temp_emb = temp_emb_off(times) + _, off_temp_emb = temp_emb_off(x, times) learned_emb_off = Embedding(emb_type="off", mode="learned", features=d_model) - off_learned_emb_boxes = learned_emb_off(boxes) - off_learned_emb_times = learned_emb_off(times) + _, off_learned_emb_boxes = learned_emb_off(x, boxes) + _, off_learned_emb_times = learned_emb_off(x, times) fixed_emb_off = Embedding(emb_type="off", mode="fixed", features=d_model) - off_fixed_emb_boxes = fixed_emb_off(boxes) - off_fixed_emb_times = fixed_emb_off(times) + _, off_fixed_emb_boxes = fixed_emb_off(x, boxes) + _, off_fixed_emb_times = fixed_emb_off(x, times) off_emb = Embedding(emb_type="off", mode="off", features=d_model) - off_emb_boxes = off_emb(boxes) - off_emb_times = off_emb(times) + _, off_emb_boxes = off_emb(x, boxes) + _, off_emb_times = off_emb(x, times) assert sine_pos_emb.size() == (N, d_model) assert learned_pos_emb.size() == (N, d_model) @@ -248,12 +319,14 @@ def test_embedding_kwargs(): frames = 32 objects = 10 + d_model = 128 N = frames * objects n_anchors = 1 boxes = torch.rand(N, n_anchors, 4) - + # input data of shape (batch_size, N, embed_dim) + x = torch.rand(size=(1, N, d_model)) # sine embedding sine_args = { @@ -261,32 +334,32 @@ def test_embedding_kwargs(): "scale": frames, "normalize": True, } - sine_no_args = Embedding("pos", "fixed", 128) - sine_with_args = Embedding("pos", "fixed", 128, **sine_args) + sine_no_args = Embedding("pos", "fixed", d_model) + sine_with_args = Embedding("pos", "fixed", d_model, **sine_args) assert sine_no_args.temperature != sine_with_args.temperature - sine_no_args = sine_no_args(boxes) - sine_with_args = sine_with_args(boxes) + _, sine_no_args = sine_no_args(x, boxes) + _, sine_with_args = sine_with_args(x, boxes) assert not torch.equal(sine_no_args, sine_with_args) # learned pos embedding - lp_no_args = Embedding("pos", "learned", 128) + lp_no_args = Embedding("pos", "learned", d_model) lp_args = {"emb_num": 100, "over_boxes": False} - lp_with_args = Embedding("pos", "learned", 128, **lp_args) + lp_with_args = Embedding("pos", "learned", d_model, **lp_args) assert lp_no_args.lookup.weight.shape != lp_with_args.lookup.weight.shape # learned temp embedding - lt_no_args = Embedding("temp", "learned", 128) + lt_no_args = Embedding("temp", "learned", d_model) lt_args = {"emb_num": 100} - lt_with_args = Embedding("temp", "learned", 128, **lt_args) + lt_with_args = Embedding("temp", "learned", d_model, **lt_args) assert lt_no_args.lookup.weight.shape != lt_with_args.lookup.weight.shape @@ -300,6 +373,8 @@ def test_multianchor_embedding(): N = frames * objects boxes = torch.rand(size=(N, n_anchors, 4)) + # input data of shape (batch_size, N, embed_dim) + x = torch.rand(size=(1, N, d_model)) fixed_emb = Embedding( "pos", @@ -318,18 +393,18 @@ def test_multianchor_embedding(): assert not isinstance(fixed_emb.mlp, torch.nn.Identity) assert not isinstance(learned_emb.mlp, torch.nn.Identity) - emb = fixed_emb(boxes) + _, emb = fixed_emb(x, boxes) assert emb.size() == (N, features) - emb = learned_emb(boxes) + _, emb = learned_emb(x, boxes) assert emb.size() == (N, features) fixed_emb = Embedding("pos", "fixed", features=features) learned_emb = Embedding("pos", "learned", features=features) with pytest.raises(RuntimeError): - _ = fixed_emb(boxes) + _, _ = fixed_emb(x, boxes) with pytest.raises(RuntimeError): - _ = learned_emb(boxes) + _, _ = learned_emb(x, boxes) def test_transformer_encoder(): @@ -352,7 +427,7 @@ def test_transformer_encoder(): # with position pos_emb = torch.ones_like(queries) - encoder_features = transformer_encoder(queries, pos_emb=pos_emb) + encoder_features = transformer_encoder(queries) assert encoder_features.size() == encoder_features.size() @@ -384,9 +459,7 @@ def test_transformer_decoder(): decoder_features = transformer_decoder( decoder_queries, - encoder_features, - ref_pos_emb=pos_emb, - query_pos_emb=query_pos_emb, + encoder_features ) assert decoder_features.size() == decoder_queries.size() @@ -445,14 +518,15 @@ def test_transformer_embedding(): embedding_meta = { "pos": {"mode": "learned", "emb_num": 16, "normalize": True}, "temp": {"mode": "learned", "emb_num": 16, "normalize": True}, + "embedding_agg_method": "average" } - + # TODO: add support for return_embedding=True transformer = Transformer( d_model=feats, num_encoder_layers=1, num_decoder_layers=1, embedding_meta=embedding_meta, - return_embedding=True, + return_embedding=False, ) assert transformer.pos_emb.mode == "learned" @@ -462,22 +536,22 @@ def test_transformer_embedding(): assert asso_preds[0].matrix.size() == (num_detected * num_frames,) * 2 - pos_emb = torch.concat( - [instance.get_embedding("pos") for instance in instances], axis=0 - ) - temp_emb = torch.concat( - [instance.get_embedding("pos") for instance in instances], axis=0 - ) - - assert pos_emb.size() == ( - len(instances), - feats, - ), pos_emb.shape - - assert temp_emb.size() == ( - len(instances), - feats, - ), temp_emb.shape + # pos_emb = torch.concat( + # [instance.get_embedding("pos") for instance in instances], axis=0 + # ) + # temp_emb = torch.concat( + # [instance.get_embedding("pos") for instance in instances], axis=0 + # ) + # + # assert pos_emb.size() == ( + # len(instances), + # feats, + # ), pos_emb.shape + # + # assert temp_emb.size() == ( + # len(instances), + # feats, + # ), temp_emb.shape def test_tracking_transformer(): @@ -512,7 +586,7 @@ def test_tracking_transformer(): } encoder_cfg = {"model_name": "resnet18", "pretrained": False, "in_chans": 3} - + # TODO: add support for return_embedding=True and uncomment lines below tracking_transformer = GlobalTrackingTransformer( encoder_cfg=encoder_cfg, d_model=feats, @@ -526,19 +600,19 @@ def test_tracking_transformer(): assert asso_preds[0].matrix.size() == (num_detected * num_frames,) * 2 - pos_emb = torch.concat( - [instance.get_embedding("pos") for instance in instances], axis=0 - ) - temp_emb = torch.concat( - [instance.get_embedding("pos") for instance in instances], axis=0 - ) - - assert pos_emb.size() == ( - len(instances), - feats, - ), pos_emb.shape - - assert temp_emb.size() == ( - len(instances), - feats, - ), temp_emb.shape + # pos_emb = torch.concat( + # [instance.get_embedding("pos") for instance in instances], axis=0 + # ) + # temp_emb = torch.concat( + # [instance.get_embedding("pos") for instance in instances], axis=0 + # ) + # + # assert pos_emb.size() == ( + # len(instances), + # feats, + # ), pos_emb.shape + # + # assert temp_emb.size() == ( + # len(instances), + # feats, + # ), temp_emb.shape From 62f2c03cff5cec0d60e795d7babebee875358a51 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 12 Aug 2024 15:07:58 -0700 Subject: [PATCH 21/63] rope bug fixes - create rope isntance once rather than each fwd pass - construct embedding lookup array each fwd pass based on num instances passed in to embedding - scale only pos embs * 100 rather than also temp embs --- dreem/models/embedding.py | 30 +++++++++++++++++++----------- dreem/models/transformer.py | 8 ++++---- tests/test_models.py | 3 +-- 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index b94a159..b4970a1 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -35,13 +35,13 @@ class RotaryPositionalEmbeddings(nn.Module): def __init__( self, dim: int, - max_seq_len: int = 4096, + # max_seq_len: int, base: int = 10000, ) -> None: super().__init__() self.dim = dim self.base = base - self.max_seq_len = max_seq_len + # self.max_seq_len = max_seq_len self._rope_init() # We need to explicitly define reset_parameters for FSDP initialization, see @@ -55,10 +55,10 @@ def _rope_init(self): ** (torch.arange(0, self.dim, 2)[: (self.dim // 2)].float() / self.dim) ) self.register_buffer("theta", theta, persistent=False) - self.build_rope_cache(self.max_seq_len) - def build_rope_cache(self, max_seq_len: int = 4096) -> None: + def build_rope_cache(self, max_seq_len: int) -> None: # Create position indexes `[0, 1, ..., max_seq_len - 1]` + seq_idx = torch.arange( max_seq_len, dtype=self.theta.dtype, device=self.theta.device ) @@ -96,6 +96,12 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: # input tensor has shape [b, s, n_h, h_d] seq_len = x.size(1) + # create the lookup array based on how many instances there are + # max(101, seq_len) is for positional vs temporal; pos can only have idx up to + # 100 since it's a fraction of [0,1]*100. temp is from [0, clip_len]; since clip_len + # not available, we use # of instances from input x; this is always >= clip_len + self.build_rope_cache(max(101, seq_len)) # registers cache + self.cache = self.cache.to(input_pos.device) # extract the values based on whether input_pos is set or not rope_cache = ( self.cache[:seq_len] if input_pos is None else self.cache[input_pos] @@ -113,7 +119,6 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: return rope_cache - class Embedding(torch.nn.Module): @@ -222,7 +227,7 @@ def __init__( if self.emb_type == "pos": if self.embedding_agg_method == "average": self._emb_func = self._sine_box_embedding - else: + else: # if using stacked/concatenated agg method self._emb_func = self._sine_pos_embedding elif self.emb_type == "temp": self._emb_func = self._sine_temp_embedding @@ -230,6 +235,8 @@ def __init__( elif self.mode == "rope": # pos/temp embeddings processed the same way with different embedding array inputs self._emb_func = self._rope_embedding + # create instance so embedding lookup array is created only once + self.rope_instance = RotaryPositionalEmbeddings(self.features) def _check_init_args(self, emb_type: str, mode: str): @@ -364,12 +371,13 @@ def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) # create dummy input of shape (num_batches, num_instances, num_attn_heads, embed_dim) # use num_heads=1 for compatibility with torch ROPE x_rope = torch.rand(input_shape).unsqueeze(2) + # infer whether it is a positional or temporal embedding + is_pos_emb = 1 if seq_positions.max() <= 1 else 0 + # if it is positional, scale seq_positions since these are fractions + # in [0,1] and we need int indexes for embedding lookup + seq_positions = seq_positions*100 if is_pos_emb else seq_positions # RoPE module takes in dimension, num_queries as input to calculate rotation matrix - rope = RotaryPositionalEmbeddings(self.features, input_shape[1]) - # convert seq_positions (indicates relative position in frame) to int - # to index into the theta array for rope - seq_pos = 100*seq_positions.unsqueeze(0) - rot_mat = rope(x_rope, seq_pos.int()) + rot_mat = self.rope_instance(x_rope, seq_positions.unsqueeze(0).int()) return rot_mat diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index fb8f424..e88bfe0 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -79,9 +79,10 @@ def __init__( self.pos_emb = Embedding(emb_type="off", mode="off", features=self.d_model) self.temp_emb = Embedding(emb_type="off", mode="off", features=self.d_model) - self.embedding_agg_method = "average" # default arg in case it's not passed into configs if self.embedding_meta: + self.embedding_agg_method = embedding_meta["embedding_agg_method"] \ + if "embedding_agg_method" in embedding_meta else "average" if "pos" in self.embedding_meta: pos_emb_cfg = self.embedding_meta["pos"] if pos_emb_cfg: @@ -98,8 +99,6 @@ def __init__( embedding_agg_method=self.embedding_agg_method, **temp_emb_cfg ) - self.embedding_agg_method = embedding_meta["embedding_agg_method"] \ - if "embedding_agg_method" in embedding_meta else "average" # Transformer Encoder encoder_layer = TransformerEncoderLayer( @@ -568,6 +567,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], # forward pass of Embedding object transforms input queries with embeddings queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) + queries_avg = None # pass dummy var in to collate_queries # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( @@ -613,7 +613,7 @@ def _get_activation_fn(activation: str) -> callable: def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str ) -> torch.Tensor: """ - + Aggregates queries transformed by embeddings Args: _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t each of shape (batch_size, n_query, embed_dim) diff --git a/tests/test_models.py b/tests/test_models.py index 51d3a72..80d3277 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -235,8 +235,7 @@ def test_embedding_aggregation(): """Test stack, concatenate agg methods""" -# TODO: create test_embedding_rope and test the xshaped vs xout in the apply_rope function; -# how did the shapes match if i was using x vs xshaped? + def test_embedding_basic(): """Test embedding logic.""" From 9292bbcd9b566c3bed0c65d1294084baf7bcf746 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 12 Aug 2024 15:52:36 -0700 Subject: [PATCH 22/63] minor update to previous commit --- dreem/models/transformer.py | 1 - tests/test_models.py | 9 +++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index e88bfe0..2235c04 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -628,7 +628,6 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str if embedding_agg_method == "average": collated_queries = queries_avg elif embedding_agg_method == "stack": - # TODO: try changing order of stacking so that order is by query token (x1,y1,t1),(x2,y2,t2) rather than # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) # stacked is of shape (batch_size, 3*n_query, embed_dim) collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) diff --git a/tests/test_models.py b/tests/test_models.py index 80d3277..8193ee6 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -231,10 +231,6 @@ def test_rope_embedding(): assert x_rope.size() == x.size() assert y_rope.size() == x.size() -def test_embedding_aggregation(): - """Test stack, concatenate agg methods""" - - def test_embedding_basic(): """Test embedding logic.""" @@ -470,8 +466,9 @@ def test_transformer_basic(): num_frames = 32 num_detected = 10 img_shape = (1, 100, 100) - - transformer = Transformer(d_model=feats, num_encoder_layers=1, num_decoder_layers=1) + embedding_meta = {"embedding_agg_method": "stack"} + transformer = Transformer(d_model=feats, num_encoder_layers=1, num_decoder_layers=1, + embedding_meta=embedding_meta) frames = [] From 3751de006dc4efd194ec708731031d706db8a79b Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 14 Aug 2024 17:16:01 -0700 Subject: [PATCH 23/63] fix device mismatch in mlp module --- dreem/models/mlp.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dreem/models/mlp.py b/dreem/models/mlp.py index 4f09551..a6c5ab3 100644 --- a/dreem/models/mlp.py +++ b/dreem/models/mlp.py @@ -56,6 +56,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: Output tensor of shape (batch_size, num_instances, output_dim). """ for i, layer in enumerate(self.layers): + layer.to(x.device) x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) if i < self.num_layers - 1 and self.dropout > 0.0: x = self.dropouts[i](x) From 3d1a35e60f4c4b8a5834955ebdb0e5bf50093c78 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 14 Aug 2024 22:26:06 -0700 Subject: [PATCH 24/63] support for adding embedding to instance --- dreem/io/instance.py | 4 ++- dreem/models/transformer.py | 52 ++++++++++++++++----------- tests/test_models.py | 70 ++++++++++++++++++------------------- 3 files changed, 70 insertions(+), 56 deletions(-) diff --git a/dreem/io/instance.py b/dreem/io/instance.py index 65be3c0..c3aa568 100644 --- a/dreem/io/instance.py +++ b/dreem/io/instance.py @@ -565,7 +565,9 @@ def add_embedding(self, emb_type: str, embedding: torch.Tensor) -> None: emb_type: Key/embedding type to be saved to dictionary embedding: The actual torch tensor embedding. """ - embedding = _expand_to_rank(embedding, 2) + if type(embedding) != dict: # for embedding agg method "average", input is array + # for method stack and concatenate, input is dict + embedding = _expand_to_rank(embedding, 2) self._embeddings[emb_type] = embedding @property diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 2235c04..421d70c 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -186,18 +186,22 @@ def forward( encoder_queries = ref_features # (encoder_features, ref_pos_emb, ref_temp_emb) \ - encoder_features = self.encoder( + encoder_features, pos_emb_traceback, temp_emb_traceback = self.encoder( encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, boxes=ref_boxes, times=ref_times, embedding_agg_method=self.embedding_agg_method ) # (total_instances, batch_size, embed_dim) or # (3*total_instances,batch_size,embed_dim) if using stacked embeddings - # TODO: include support for adding x,y,t embeddings to the instance - # if self.return_embedding: - # for i, instance in enumerate(ref_instances): - # instance.add_embedding("pos", ref_pos_emb[i]) - # instance.add_embedding("temp", ref_temp_emb[i]) + if self.return_embedding: + for i, instance in enumerate(ref_instances): + if self.embedding_agg_method == "average": + ref_pos_emb = pos_emb_traceback[0][i] # array + else: + ref_pos_emb = {"x": pos_emb_traceback[0][0][i], "y": pos_emb_traceback[1][0][i]} # dict + + instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict + instance.add_embedding("temp", temp_emb_traceback) # -------------- Begin decoder --------------- # @@ -223,18 +227,22 @@ def forward( query_times = ref_times - decoder_features = self.decoder( + decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( query_features, encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, boxes=query_boxes, times=query_times, embedding_agg_method=self.embedding_agg_method ) # (L, n_query, batch_size, embed_dim) - # TODO: include support for x,y,t embeddings and uncomment this - # if self.return_embedding: - # for i, instance in enumerate(query_instances): - # instance.add_embedding("pos", query_pos_emb[i]) - # instance.add_embedding("temp", query_temp_emb[i]) + if self.return_embedding: + for i, instance in enumerate(ref_instances): + if self.embedding_agg_method == "average": + ref_pos_emb = pos_emb_traceback[0][i] # array + else: + ref_pos_emb = {"x": pos_emb_traceback[0][0][i], "y": pos_emb_traceback[1][0][i]} # dict + + instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict + instance.add_embedding("temp", temp_emb_traceback) decoder_features = decoder_features.transpose( 1, 2 @@ -460,13 +468,15 @@ def forward( for layer in self.layers: # compute embeddings and apply to the input queries - queries = apply_embeddings(queries, embedding_map, boxes, times, embedding_agg_method) + queries, pos_emb_traceback, temp_emb_traceback = apply_embeddings( + queries, embedding_map, boxes, times, embedding_agg_method + ) # pass through EncoderLayer queries = layer(queries) encoder_features = self.norm(queries) - return encoder_features# , ref_pos_emb, ref_temp_emb + return encoder_features, pos_emb_traceback, temp_emb_traceback class TransformerDecoder(nn.Module): @@ -518,15 +528,16 @@ def forward( # since the encoder output doesn't change for any number of decoder layer inputs, # we can process its embedding outside the loop if embedding_agg_method == "average": - encoder_features = apply_embeddings(encoder_features, embedding_map, + encoder_features, *_ = apply_embeddings(encoder_features, embedding_map, boxes, times, embedding_agg_method) # TODO: ^ should embeddings really be applied to encoder output again before cross attention? # switched off for stack and concatenate methods as those further split the tokens. Kept for "average" # for backward compatibility for layer in self.layers: - decoder_features = apply_embeddings(decoder_features, embedding_map, - boxes, times, embedding_agg_method) + decoder_features, pos_emb_traceback, temp_emb_traceback = apply_embeddings( + decoder_features, embedding_map, boxes, times, embedding_agg_method + ) decoder_features = layer( decoder_features, encoder_features ) @@ -537,10 +548,9 @@ def forward( if self.return_intermediate: intermediate.pop() intermediate.append(decoder_features) - return torch.stack(intermediate) - return decoder_features.unsqueeze(0) + return decoder_features.unsqueeze(0), pos_emb_traceback, temp_emb_traceback def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], @@ -561,6 +571,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], ref_emb = (ref_pos_emb + ref_temp_emb) / 2 queries_avg = queries + ref_emb queries_t = queries_x = queries_y = None + pos_emb_traceback = (ref_pos_emb,) else: # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) ref_x, ref_y = spatial_emb_from_bb(boxes) @@ -568,6 +579,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) queries_avg = None # pass dummy var in to collate_queries + pos_emb_traceback = (ref_pos_emb_x, ref_pos_emb_y) # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( @@ -576,7 +588,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) - return queries + return queries, pos_emb_traceback, ref_temp_emb def _get_clones(module: nn.Module, N: int) -> nn.ModuleList: diff --git a/tests/test_models.py b/tests/test_models.py index 8193ee6..76ef074 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -516,13 +516,13 @@ def test_transformer_embedding(): "temp": {"mode": "learned", "emb_num": 16, "normalize": True}, "embedding_agg_method": "average" } - # TODO: add support for return_embedding=True + transformer = Transformer( d_model=feats, num_encoder_layers=1, num_decoder_layers=1, embedding_meta=embedding_meta, - return_embedding=False, + return_embedding=True, ) assert transformer.pos_emb.mode == "learned" @@ -532,22 +532,22 @@ def test_transformer_embedding(): assert asso_preds[0].matrix.size() == (num_detected * num_frames,) * 2 - # pos_emb = torch.concat( - # [instance.get_embedding("pos") for instance in instances], axis=0 - # ) - # temp_emb = torch.concat( - # [instance.get_embedding("pos") for instance in instances], axis=0 - # ) - # - # assert pos_emb.size() == ( - # len(instances), - # feats, - # ), pos_emb.shape - # - # assert temp_emb.size() == ( - # len(instances), - # feats, - # ), temp_emb.shape + pos_emb = torch.concat( + [instance.get_embedding("pos") for instance in instances], axis=0 + ) + temp_emb = torch.concat( + [instance.get_embedding("pos") for instance in instances], axis=0 + ) + + assert pos_emb.size() == ( + len(instances), + feats, + ), pos_emb.shape + + assert temp_emb.size() == ( + len(instances), + feats, + ), temp_emb.shape def test_tracking_transformer(): @@ -582,7 +582,7 @@ def test_tracking_transformer(): } encoder_cfg = {"model_name": "resnet18", "pretrained": False, "in_chans": 3} - # TODO: add support for return_embedding=True and uncomment lines below + tracking_transformer = GlobalTrackingTransformer( encoder_cfg=encoder_cfg, d_model=feats, @@ -596,19 +596,19 @@ def test_tracking_transformer(): assert asso_preds[0].matrix.size() == (num_detected * num_frames,) * 2 - # pos_emb = torch.concat( - # [instance.get_embedding("pos") for instance in instances], axis=0 - # ) - # temp_emb = torch.concat( - # [instance.get_embedding("pos") for instance in instances], axis=0 - # ) - # - # assert pos_emb.size() == ( - # len(instances), - # feats, - # ), pos_emb.shape - # - # assert temp_emb.size() == ( - # len(instances), - # feats, - # ), temp_emb.shape + pos_emb = torch.concat( + [instance.get_embedding("pos") for instance in instances], axis=0 + ) + temp_emb = torch.concat( + [instance.get_embedding("pos") for instance in instances], axis=0 + ) + + assert pos_emb.size() == ( + len(instances), + feats, + ), pos_emb.shape + + assert temp_emb.size() == ( + len(instances), + feats, + ), temp_emb.shape From c4abac20a76b62d708e6e890a84ec6c6d3717740 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Thu, 15 Aug 2024 17:14:51 -0700 Subject: [PATCH 25/63] bug fixes to pass unit tests - times array for embedding for encoder queries inside decoder was of query size rather than ref size --- dreem/models/attention_head.py | 7 +++-- dreem/models/embedding.py | 3 +- dreem/models/transformer.py | 55 +++++++++++++++++++++------------- tests/test_training.py | 6 ++-- 4 files changed, 44 insertions(+), 27 deletions(-) diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 537cee1..8ea04b2 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -14,7 +14,7 @@ def __init__( feature_dim: int, num_layers: int, dropout: float, - embedding_agg_method: str = "average" + **kwargs ): """Initialize an instance of ATTWeightHead. @@ -25,7 +25,10 @@ def __init__( embedding_agg_method: how the embeddings are aggregated; average/stack/concatenate """ super().__init__() - self.embedding_agg_method = embedding_agg_method + if 'embedding_agg_method' in kwargs: + self.embedding_agg_method = kwargs['embedding_agg_method'] + else: + self.embedding_agg_method = None # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels # ensures output represents ref instances by query instances diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index b4970a1..39468ea 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -318,7 +318,8 @@ def _apply_additive_embeddings(self, x, emb): Returns: Tensor: Input queries with embeddings added - shape (batch_size, N, embed_dim) """ - return x + emb.unsqueeze(0) + _emb = emb.unsqueeze(0) + return x + _emb def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 421d70c..f64d6b2 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -99,6 +99,9 @@ def __init__( embedding_agg_method=self.embedding_agg_method, **temp_emb_cfg ) + else: + self.embedding_meta = {} + self.embedding_agg_method = None # Transformer Encoder encoder_layer = TransformerEncoderLayer( @@ -133,7 +136,7 @@ def __init__( feature_dim=feature_dim_attn_head, num_layers=num_layers_attn_head, dropout=dropout_attn_head, - embedding_agg_method=self.embedding_agg_method + **self.embedding_meta ) self._reset_parameters() @@ -230,6 +233,7 @@ def forward( decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( query_features, encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + enc_boxes=ref_boxes, enc_times=ref_times, boxes=query_boxes, times=query_times, embedding_agg_method=self.embedding_agg_method ) # (L, n_query, batch_size, embed_dim) @@ -450,7 +454,7 @@ def __init__( def forward( self, queries: torch.Tensor, embedding_map: Dict[str, Embedding], boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str + embedding_agg_method: str = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Execute a forward pass of encoder layer. Computes and applies embeddings before input to EncoderLayer @@ -508,8 +512,9 @@ def forward( decoder_queries: torch.Tensor, encoder_features: torch.Tensor, embedding_map: Dict[str, Embedding], + enc_boxes: torch.Tensor, enc_times: torch.Tensor, boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str + embedding_agg_method: str = None ) -> torch.Tensor: """Execute a forward pass of the decoder block. @@ -529,7 +534,7 @@ def forward( # we can process its embedding outside the loop if embedding_agg_method == "average": encoder_features, *_ = apply_embeddings(encoder_features, embedding_map, - boxes, times, embedding_agg_method) + enc_boxes, enc_times, embedding_agg_method) # TODO: ^ should embeddings really be applied to encoder output again before cross attention? # switched off for stack and concatenate methods as those further split the tokens. Kept for "average" # for backward compatibility @@ -565,25 +570,31 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) # calculate temporal embeddings and transform queries queries_t, ref_temp_emb = temp_emb(queries, times) - # if avg. of temp and pos, need bounding boxes; bb only used for method "average" - if embedding_agg_method == "average": - _, ref_pos_emb = pos_emb(queries, boxes) - ref_emb = (ref_pos_emb + ref_temp_emb) / 2 - queries_avg = queries + ref_emb - queries_t = queries_x = queries_y = None - pos_emb_traceback = (ref_pos_emb,) + + if embedding_agg_method is None: + pos_emb_traceback = (torch.zeros_like(queries),) + queries_avg = queries_t = queries_x = queries_y = None else: - # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) - ref_x, ref_y = spatial_emb_from_bb(boxes) - # forward pass of Embedding object transforms input queries with embeddings - queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) - queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) - queries_avg = None # pass dummy var in to collate_queries - pos_emb_traceback = (ref_pos_emb_x, ref_pos_emb_y) + # if avg. of temp and pos, need bounding boxes; bb only used for method "average" + if embedding_agg_method == "average": + _, ref_pos_emb = pos_emb(queries, boxes) + ref_emb = (ref_pos_emb + ref_temp_emb) / 2 + queries_avg = queries + ref_emb + queries_t = queries_x = queries_y = None + pos_emb_traceback = (ref_pos_emb,) + else: + # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) + ref_x, ref_y = spatial_emb_from_bb(boxes) + # forward pass of Embedding object transforms input queries with embeddings + queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) + queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) + queries_avg = None # pass dummy var in to collate_queries + pos_emb_traceback = (ref_pos_emb_x, ref_pos_emb_y) + # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( - (queries_avg, queries_t, queries_x, queries_y), + (queries_avg, queries_t, queries_x, queries_y, queries), embedding_agg_method) # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) @@ -627,7 +638,7 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str """ Aggregates queries transformed by embeddings Args: - _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t + _queries: 5-tuple of queries (already transformed by embeddings) for _, x, y, t, original input each of shape (batch_size, n_query, embed_dim) embedding_agg_method: String representing the aggregation method for embeddings @@ -635,7 +646,7 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str stacked (increased number of tokens), or averaged (original token number and length) """ - queries_avg, queries_t, queries_x, queries_y = queries + queries_avg, queries_t, queries_x, queries_y, orig_queries = queries if embedding_agg_method == "average": collated_queries = queries_avg @@ -650,6 +661,8 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) # pass through MLP to project into space of (batch_size, n_query, embed_dim) collated_queries = mlp(collated_queries) + else: + collated_queries = orig_queries return collated_queries diff --git a/tests/test_training.py b/tests/test_training.py index bd8bbe7..8c5206e 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -138,9 +138,9 @@ def test_config_gtr_runner(tmp_path, base_config, params_config, two_flies): "dataset.clip_length": 8, "trainer.min_epochs": 1, "checkpointing.dirpath": model_dir, - "logging.save_dir": logs_dir, + "logging.save_dir": logs_dir } cfg.set_hparams(hparams) - with torch.autograd.set_detect_anomaly(True): - run(cfg.cfg) + # with torch.autograd.set_detect_anomaly(True): + run(cfg.cfg) From 5a7e86b8e392026c2c6009e28d3d4041840c2481 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Fri, 16 Aug 2024 12:24:04 -0700 Subject: [PATCH 26/63] minor updates from PR review --- dreem/models/embedding.py | 22 +- dreem/models/transformer.py | 23 +- rope.ipynb | 598 ----------------------- run_trainer.py => scripts/run_trainer.py | 2 +- 4 files changed, 30 insertions(+), 615 deletions(-) delete mode 100644 rope.ipynb rename run_trainer.py => scripts/run_trainer.py (86%) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 39468ea..17c988f 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -99,8 +99,9 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: # create the lookup array based on how many instances there are # max(101, seq_len) is for positional vs temporal; pos can only have idx up to # 100 since it's a fraction of [0,1]*100. temp is from [0, clip_len]; since clip_len - # not available, we use # of instances from input x; this is always >= clip_len - self.build_rope_cache(max(101, seq_len)) # registers cache + # not available, we use the last value in the indexing array since this will be the + # last possible frame that we would need to index since no instances in a frame after that + self.build_rope_cache(max(101, input_pos[:, -1].max() + 1)) # registers cache self.cache = self.cache.to(input_pos.device) # extract the values based on whether input_pos is set or not rope_cache = ( @@ -269,7 +270,13 @@ def _check_init_args(self, emb_type: str, mode: str): def _transform(self, x, emb): - + """Routes to the relevant embedding function to transform the input queries + + Args: + x: Input queries of shape (batch_size, N, embed_dim) + emb: Embedding array to apply to data; can be (N, embed_dim) or + (batch_size, n_query, num_heads, embed_dim // 2, 2) if using RoPE + """ if self._emb_func == self._rope_embedding: return self._apply_rope(x, emb) else: @@ -277,8 +284,7 @@ def _transform(self, x, emb): def _apply_rope(self, x, emb): - """ - Applies Rotary Positional Embedding to input queries + """Applies Rotary Positional Embedding to input queries Args: x: Input queries of shape (batch_size, n_query, embed_dim) @@ -308,8 +314,7 @@ def _apply_rope(self, x, emb): def _apply_additive_embeddings(self, x, emb): - """ - Applies additive embeddings to input queries + """Applies additive embeddings to input queries Args: x: Input tensor of shape (batch_size, N, embed_dim) @@ -361,8 +366,7 @@ def _torch_int_div( def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) -> torch.Tensor: - """ - Computes the rotation matrix to apply RoPE to input queries + """Computes the rotation matrix to apply RoPE to input queries Args: seq_positions: Pos array of shape (embed_dim,) used to compute rotational embedding input_shape: Shape of the input queries; needed for rope diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index f64d6b2..272d688 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -229,7 +229,6 @@ def forward( query_boxes = ref_boxes query_times = ref_times - decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( query_features, encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, @@ -553,7 +552,7 @@ def forward( if self.return_intermediate: intermediate.pop() intermediate.append(decoder_features) - return torch.stack(intermediate) + return torch.stack(intermediate), pos_emb_traceback, temp_emb_traceback return decoder_features.unsqueeze(0), pos_emb_traceback, temp_emb_traceback @@ -561,8 +560,16 @@ def forward( def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], boxes: torch.Tensor, times: torch.Tensor, embedding_agg_method: str): - """ - Enter docstring here + """ Applies embeddings to input queries for various aggregation methods. This function + is called from the transformer encoder and decoder + + Args: + queries: The input tensor of shape (n_query, batch_size, embed_dim). + embedding_map: Dict of Embedding objects defining the pos/temp embeddings to be applied + to the input data + boxes: Bounding box based embedding ids of shape (n_query, n_anchors, 4) + times: Times based embedding ids of shape (n_query,) + embedding_agg_method: method of aggregation of embeddings e.g. stack/concatenate/average """ pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] @@ -635,14 +642,15 @@ def _get_activation_fn(activation: str) -> callable: def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str ) -> torch.Tensor: - """ - Aggregates queries transformed by embeddings + """Aggregates queries transformed by embeddings + Args: _queries: 5-tuple of queries (already transformed by embeddings) for _, x, y, t, original input each of shape (batch_size, n_query, embed_dim) embedding_agg_method: String representing the aggregation method for embeddings - Returns: Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), + Returns: + Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), stacked (increased number of tokens), or averaged (original token number and length) """ @@ -670,6 +678,7 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes + Args: bb: Bounding boxes of shape (n_query, n_anchors, 4) from which to compute x,y centroids; each bounding box is [ymin, xmin, ymax, xmax] diff --git a/rope.ipynb b/rope.ipynb deleted file mode 100644 index 593439b..0000000 --- a/rope.ipynb +++ /dev/null @@ -1,598 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 156, - "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "import dreem\n", - "import os\n", - "import matplotlib.pyplot as plt\n", - "import math\n", - "import torch\n", - "import logging\n", - "from dreem.models.mlp import MLP\n", - "from dreem.models.model_utils import *\n", - "from dreem.datasets import SleapDataset\n", - "from dreem.models.transformer import *\n", - "from dreem.models import VisualEncoder\n", - "from dreem.models import GlobalTrackingTransformer\n", - "from dreem.models.gtr_runner import GTRRunner" - ] - }, - { - "cell_type": "code", - "execution_count": 130, - "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "\"\"\"Module containing different position and temporal embeddings.\"\"\"\n", - "\n", - "logger = logging.getLogger(\"dreem.models\")\n", - "# todo: add named tensors, clean variable names\n", - "\n", - "\n", - "class Embedding(torch.nn.Module):\n", - " \"\"\"Class that wraps around different embedding types.\n", - "\n", - " Used for both learned and fixed embeddings.\n", - " \"\"\"\n", - "\n", - " EMB_TYPES = {\n", - " \"temp\": {},\n", - " \"pos\": {\"over_boxes\"},\n", - " \"off\": {},\n", - " None: {},\n", - " } # dict of valid args:keyword params\n", - " EMB_MODES = {\n", - " \"fixed\": {\"temperature\", \"scale\", \"normalize\"},\n", - " \"learned\": {\"emb_num\"},\n", - " \"off\": {},\n", - " } # dict of valid args:keyword params\n", - "\n", - " def __init__(\n", - " self,\n", - " emb_type: str,\n", - " mode: str,\n", - " features: int,\n", - " n_points: int = 1,\n", - " emb_num: int = 16,\n", - " over_boxes: bool = True,\n", - " temperature: int = 10000,\n", - " normalize: bool = False,\n", - " scale: float | None = None,\n", - " mlp_cfg: dict | None = None,\n", - " ):\n", - " \"\"\"Initialize embeddings.\n", - "\n", - " Args:\n", - " emb_type: The type of embedding to compute. Must be one of `{\"temp\", \"pos\", \"off\"}`\n", - " mode: The mode or function used to map positions to vector embeddings.\n", - " Must be one of `{\"fixed\", \"learned\", \"off\"}`\n", - " features: The embedding dimensions. Must match the dimension of the\n", - " input vectors for the transformer model.\n", - " n_points: the number of points that will be embedded.\n", - " emb_num: the number of embeddings in the `self.lookup` table (Only used in learned embeddings).\n", - " over_boxes: Whether to compute the position embedding for each bbox coordinate (y1x1y2x2) or the centroid + bbox size (yxwh).\n", - " temperature: the temperature constant to be used when computing the sinusoidal position embedding\n", - " normalize: whether or not to normalize the positions (Only used in fixed embeddings).\n", - " scale: factor by which to scale the positions after normalizing (Only used in fixed embeddings).\n", - " mlp_cfg: A dictionary of mlp hyperparameters for projecting embedding to correct space.\n", - " Example: {\"hidden_dims\": 256, \"num_layers\":3, \"dropout\": 0.3}\n", - " \"\"\"\n", - " self._check_init_args(emb_type, mode)\n", - "\n", - " super().__init__()\n", - "\n", - " self.emb_type = emb_type\n", - " self.mode = mode\n", - " self.features = features\n", - " self.emb_num = emb_num\n", - " self.over_boxes = over_boxes\n", - " self.temperature = temperature\n", - " self.normalize = normalize\n", - " self.scale = scale\n", - " self.n_points = n_points\n", - "\n", - " if self.normalize and self.scale is None:\n", - " self.scale = 2 * math.pi\n", - "\n", - " if self.emb_type == \"pos\" and mlp_cfg is not None and mlp_cfg[\"num_layers\"] > 0:\n", - " if self.mode == \"fixed\":\n", - " self.mlp = MLP(\n", - " input_dim=n_points * self.features,\n", - " output_dim=self.features,\n", - " **mlp_cfg,\n", - " )\n", - " else:\n", - " in_dim = (self.features // (4 * n_points)) * (4 * n_points)\n", - " self.mlp = MLP(\n", - " input_dim=in_dim,\n", - " output_dim=self.features,\n", - " **mlp_cfg,\n", - " )\n", - " else:\n", - " self.mlp = torch.nn.Identity()\n", - "\n", - " self._emb_func = lambda tensor: torch.zeros(\n", - " (tensor.shape[0], self.features), dtype=tensor.dtype, device=tensor.device\n", - " ) # turn off embedding by returning zeros\n", - "\n", - " self.lookup = None\n", - "\n", - " if self.mode == \"learned\":\n", - " if self.emb_type == \"pos\":\n", - " self.lookup = torch.nn.Embedding(\n", - " self.emb_num * 4 * self.n_points, self.features // (4 * n_points)\n", - " )\n", - " self._emb_func = self._learned_pos_embedding\n", - " elif self.emb_type == \"temp\":\n", - " self.lookup = torch.nn.Embedding(self.emb_num, self.features)\n", - " self._emb_func = self._learned_temp_embedding\n", - "\n", - " elif self.mode == \"fixed\":\n", - " if self.emb_type == \"pos\":\n", - " self._emb_func = self._sine_box_embedding\n", - " elif self.emb_type == \"temp\":\n", - " self._emb_func = self._sine_temp_embedding\n", - "\n", - " def _check_init_args(self, emb_type: str, mode: str):\n", - " \"\"\"Check whether the correct arguments were passed to initialization.\n", - "\n", - " Args:\n", - " emb_type: The type of embedding to compute. Must be one of `{\"temp\", \"pos\", \"\"}`\n", - " mode: The mode or function used to map positions to vector embeddings.\n", - " Must be one of `{\"fixed\", \"learned\"}`\n", - "\n", - " Raises:\n", - " ValueError:\n", - " * if the incorrect `emb_type` or `mode` string are passed\n", - " NotImplementedError: if `emb_type` is `temp` and `mode` is `fixed`.\n", - " \"\"\"\n", - " if emb_type.lower() not in self.EMB_TYPES:\n", - " raise ValueError(\n", - " f\"Embedding `emb_type` must be one of {self.EMB_TYPES} not {emb_type}\"\n", - " )\n", - "\n", - " if mode.lower() not in self.EMB_MODES:\n", - " raise ValueError(\n", - " f\"Embedding `mode` must be one of {self.EMB_MODES} not {mode}\"\n", - " )\n", - "\n", - " def forward(self, seq_positions: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Get the sequence positional embeddings.\n", - "\n", - " Args:\n", - " seq_positions:\n", - " * An (`N`, 1) tensor where seq_positions[i] represents the temporal position of instance_i in the sequence.\n", - " * An (`N`, n_anchors x 4) tensor where seq_positions[i, j, :] represents the [y1, x1, y2, x2] spatial locations of jth point of instance_i in the sequence.\n", - "\n", - " Returns:\n", - " An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding.\n", - " \"\"\"\n", - " emb = self._emb_func(seq_positions)\n", - "\n", - " if emb.shape[-1] != self.features:\n", - " raise RuntimeError(\n", - " (\n", - " f\"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \\n\"\n", - " f\"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions.\"\n", - " )\n", - " )\n", - " return emb\n", - "\n", - " def _torch_int_div(\n", - " self, tensor1: torch.Tensor, tensor2: torch.Tensor\n", - " ) -> torch.Tensor:\n", - " \"\"\"Perform integer division of two tensors.\n", - "\n", - " Args:\n", - " tensor1: dividend tensor.\n", - " tensor2: divisor tensor.\n", - "\n", - " Returns:\n", - " torch.Tensor, resulting tensor.\n", - " \"\"\"\n", - " return torch.div(tensor1, tensor2, rounding_mode=\"floor\")\n", - "\n", - " def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute sine positional embeddings for boxes using given parameters.\n", - "\n", - " Args:\n", - " boxes: the input boxes of shape N, n_anchors, 4 or B, N, n_anchors, 4\n", - " where the last dimension is the bbox coords in [y1, x1, y2, x2].\n", - " (Note currently `B=batch_size=1`).\n", - "\n", - " Returns:\n", - " torch.Tensor, the sine positional embeddings\n", - " (embedding[:, 4i] = sin(x)\n", - " embedding[:, 4i+1] = cos(x)\n", - " embedding[:, 4i+2] = sin(y)\n", - " embedding[:, 4i+3] = cos(y)\n", - " )\n", - " \"\"\"\n", - " if self.scale is not None and self.normalize is False:\n", - " raise ValueError(\"normalize should be True if scale is passed\")\n", - "\n", - " if len(boxes.size()) == 3:\n", - " boxes = boxes.unsqueeze(0)\n", - "\n", - " if self.normalize:\n", - " boxes = boxes / (boxes[:, :, -1:] + 1e-6) * self.scale\n", - "\n", - " dim_t = torch.arange(self.features // 4, dtype=torch.float32)\n", - "\n", - " dim_t = self.temperature ** (\n", - " 2 * self._torch_int_div(dim_t, 2) / (self.features // 4)\n", - " )\n", - "\n", - " # (b, n_t, n_anchors, 4, D//4)\n", - " pos_emb = boxes[:, :, :, :, None] / dim_t.to(boxes.device)\n", - "\n", - " pos_emb = torch.stack(\n", - " (pos_emb[:, :, :, :, 0::2].sin(), pos_emb[:, :, :, :, 1::2].cos()), dim=4\n", - " )\n", - " pos_emb = pos_emb.flatten(2).squeeze(0) # (N_t, n_anchors * D)\n", - "\n", - " pos_emb = self.mlp(pos_emb)\n", - "\n", - " pos_emb = pos_emb.view(boxes.shape[1], self.features)\n", - "\n", - " return pos_emb\n", - "\n", - " def _sine_temp_embedding(self, times: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute fixed sine temporal embeddings.\n", - "\n", - " Args:\n", - " times: the input times of shape (N,) or (N,1) where N = (sum(instances_per_frame))\n", - " which is the frame index of the instance relative\n", - " to the batch size\n", - " (e.g. `torch.tensor([0, 0, ..., 0, 1, 1, ..., 1, 2, 2, ..., 2,..., B, B, ...B])`).\n", - "\n", - " Returns:\n", - " an n_instances x D embedding representing the temporal embedding.\n", - " \"\"\"\n", - " T = times.int().max().item() + 1\n", - " d = self.features\n", - " n = self.temperature\n", - "\n", - " positions = torch.arange(0, T).unsqueeze(1)\n", - " temp_lookup = torch.zeros(T, d, device=times.device)\n", - "\n", - " denominators = torch.pow(\n", - " n, 2 * torch.arange(0, d // 2) / d\n", - " ) # 10000^(2i/d_model), i is the index of embedding\n", - " temp_lookup[:, 0::2] = torch.sin(\n", - " positions / denominators\n", - " ) # sin(pos/10000^(2i/d_model))\n", - " temp_lookup[:, 1::2] = torch.cos(\n", - " positions / denominators\n", - " ) # cos(pos/10000^(2i/d_model))\n", - "\n", - " temp_emb = temp_lookup[times.int()]\n", - " return temp_emb # .view(len(times), self.features)" - ] - }, - { - "cell_type": "code", - "execution_count": 131, - "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# create Embedding object\n", - "emb_t = Embedding(emb_type=\"temp\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)\n", - "emb_p = Embedding(emb_type=\"pos\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)" - ] - }, - { - "cell_type": "code", - "execution_count": 132, - "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# get sample crops from training data to pass through the network\n", - "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", - "# train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\n", - "data = SleapDataset([os.path.join(train_path,\"10-1.slp\")], [os.path.join(train_path,\"10-1.mp4\")], crop_size=64,\n", - " mode=\"train\", clip_length=32, anchors=\"centroid\")" - ] - }, - { - "cell_type": "code", - "execution_count": 133, - "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# get a list of all instances in the first clip; this is the format that the model pipeline uses as input data\n", - "ref_instances = []\n", - "for frame in data[0]:\n", - " for instance in frame.instances:\n", - " ref_instances.append(instance)" - ] - }, - { - "cell_type": "code", - "execution_count": 134, - "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# get the vector of times using the list of crops+labels\n", - "# query_instance is the instances in last frame (set to None)\n", - "ref_times, query_times = get_times(ref_instances, None)" - ] - }, - { - "cell_type": "code", - "execution_count": 135, - "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", - "metadata": { - "collapsed": true, - "jupyter": { - "outputs_hidden": true - }, - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 135, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi4AAAGiCAYAAADA0E3hAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB5bElEQVR4nO39e5Bkd33f/z8/n885fZnLzt6kXS2SQNgiGC84WDgEQgwOIBuDKf+oCo7BGCf8YcwlKECwMamy7DKSTcpAImJSdlFAmRC5UgbHztehELEtTAkMERALcLAdy+hirVbSzs61L+d8Pu/fH59zek73dM90z2Vnevf9qOo63adPn+tMn/ec7nm/jIgISimllFJTwB70CiillFJKjUsLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNjQMtXH7zN3+TG264gUajwU033cSf/dmfHeTqKKWUUuqQO7DC5Xd/93e55ZZbeM973sPXvvY1/uk//ae87GUv44EHHjioVVJKKaXUIWcOKmTxuc99Lt///d/Phz/84d647/me7+HHf/zHuf322w9ilZRSSil1yCUHsdBut8u9997LL/zCL/SNv/nmm7nnnns2Td/pdOh0Or3HIQQuXLjAiRMnMMbs+/oqpZRSavdEhJWVFc6cOYO1O/vQ50AKl8cffxzvPadOneobf+rUKc6dO7dp+ttvv51f/uVfvlSrp5RSSql99OCDD3Lttdfu6LUHUriUBq+WiMjQKyjvfve7efvb3957vLS0xPXXX8//7w9eTTpb640Psrl6syZsHsfwT8eGjQ9sXp+h43TZumxdti5bl63L1mVvuZz57CK//SP/H/Pz80PnMY4DKVxOnjyJc27T1ZXz589vugoDUK/Xqdfrm8ans7UdFS7O7O6Ae9n5Addl67J12bpsXbYu+0pddq2bApsvXEziQP6rqFarcdNNN3HXXXf1jb/rrrt4/vOffxCrpJRSSqkpcGAfFb397W/nda97Hc95znN43vOex2/91m/xwAMP8MY3vvGgVkkppZRSh9yBFS4/8RM/wRNPPMGv/Mqv8Mgjj3D27Fn+6I/+iCc/+ckHtUpKKaWUOuQO9Mu5b3rTm3jTm950kKuglFJKqSmiWUVKKaWUmhpauCillFJqamjhopRSSqmpcaDfcdmtp8+eozGX9o0b9j/ne2Hw/9v9wOPB/1UffH6iZW0xr1D533vP8PG7NTivrZYz7P/+xzHqOA3Or7ovBvsObLfs7X4WBnsRjJp/YsrHledN2HScRjZlGtHzYNN0xeuryylfW12WM75vfSfpqTBsfYKYTdMGzND9s9U+HbYeo9Zh2DT92yi9ZW6s9/a/25Oug0U2HUdnZNPPgjWh7+dt3N4W+7HsSYyz//eLLvvKWvalNNWFy9FknWaS4Bj2JjJeATNugeEHLk6NLiA2z88Paf6z1fSDrymn6Stg+k7ow8ePY9T2j1MgBTFQnkTHvHg37FhVl2OL+ZXzt737tu+EG0+snp0Y+cu9adcNruvANg6ZzVZFyjgn3k3bWezvrU7u/Ws4efHUK46qRcKYJ9XdrMtgETzpMnezDjtZ7m4KiL20F3+cDdvmUcs6DNusVJX+RCql1AT266quUmo8WrgopZRSampo4aKUGupK+bxcKbV7l/L9QgsXpdRQk35fSil15bqU7xdauCillFJqamjhopRSSqmpoYWLUmoo/Y6LUuow0sJFKTWUfsdlOO1rotTB0t9ApZSawJXUx0WLNHUY6U+lUkqpoa6kIk1Nj6lu+f//PfpMGuspNZuT2EBiAtYEUhOwRkitxyLY8rHxWCO9tvPWCM4ELBtDiHEB1gRc8bj85R18HMeFvjb2w6IG0k1jKsa4Gr85bmAgN2lIPMDw+Wy/sOrrh0UJjBM7sBPj5iONiiLYK9X5J6Z/f4Rtso2qRuUHVQ1v4+8H8nn618EiW+ZS7TRyYFjUwDj5RZuWMUHkwKTRBpPEDYxal93GDYzah3sZNzB0um1a7+90+YPz2C5e4VLGDajpcim/EzfVhctDS0dphITEBVLnSW3A2UBqfa+QSSr3hxU1ifU44riyqCmf6ytqTCAURRDFG8jgL3H1jXirTJ7eNGO8CXixpAxk8pgtspOKwaiT+raFTZH/E8SS9nKIts9NSgdygyYtZIIZOEHIxgnGVbapeuKx9OcaTWrYScxt2rcbx8gysP+2WOQkv8TVaQeLg8Fgw8ECY9t5j3miKacLvZ/t8fKLqs8Nn++YAZNGtl3WsBPrToqWYcsbtcyDsFUBcSVdAdGcJDWK/lQopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKqQM1Tt8jpUpauCillDpQ2zW+U6pKCxellFJKTQ0tXJRSSik1Naa65X/2jQX8bB1JIDiQROLNCVRuxgnWCdYGrAtYKzgX2/wnzuOskDqPM4KzgZr1OBuKqABPzXkSU0QJVKIDyriAxIZebMBg9lHZ1r+adzSYfdT/fKXF/MDnvn2ZSAORAm6g1b8djAkomdFt8quZSt5Us4Iq+UVmc/v/uNzhl3rHadk9GA9QjRAYmZdkBrZhF1eaq9sKG3ED1WVCGTlQRiKYXVX9ozJzGNxfm7Zr8pbv43x/oLxUv1W8wLD8omEmyUvqy2Ea0vYfhucWjZz/hFlJ27X9Hye/Z7c5QZPkFY27/EvpUuUkKVWa6sLlyN8JZhZCAiE1SGIIaVnExPGSSHzswCdCPqqwSQLWBpyLRY2zgcR5UhcLk9TFzKPUemrW92UglQVNHG4EOW5V0MBAmKO4Ig9pIwfJC32BkB47NAOpmnk0KiNpWHaQG/E+4bH9hY/xvTf4lEqWj/G9E08cP2SGlWlG2fJkVjl5uYFxsDnjCCbPSbL4kXlF1ZNbdbrNmUY7s/mEOlA4bJNjVDXuCWxwntXX9gc8hi0Li03z3UHA47BlwPgZSX3z2kFe0nbbBFufVCcpGkYWqxMuc6xl7TDgcVSw5GHIDNKAR1U6+J9GpZRSSqkxaeGilFJKqamhhYu6rEz6MZFSSqnpou/y6rLi9uBzcKWUUoeXFi5KKaWUmhpauCillFJqamjhoi4r+h0XpZS6vOm7vLqs6HdclFLq8qaFi7qs6BUXpZS6vE1159x0PWAldsYNSeyOGxIIidnonOtMHFpiZ91EkKKTrjhBkjj0xS0ruunaJMSOujaQJAFXdNBNXNFRt+ymazaGg910LRJjAYxs3C866ZbddUfFBJQddXtRABLvZ7hNsQDlyXpU19xJVedT7SALsetn33KqTShHFQ3bXQXZ1OZ+yPTVaarPj/PasVQjDja2Mbb539j2kfthl4bt5/LYl11Lh7WjH8dWXVS3a3E/VmfbHXTMHZxm2+7K23Q83UnX3J0uqzRq/0/a7n5a7EXn2mlxWLoFq+GmunCZ+84aicvBGMQasHEoziLOFEVLMUw3HsdCx2wUOK6IC+jFBFSHQpZANymKnCIPCSeQhBgV4ATrAkniewWOs0UOkpFeXIAzoVfYVIucMvvIGumLDUgrw7Ko6d0vowJg030YP/Oob/yQN6bBaQfb3Kf4vgiAqmpMwKC++fTlJ9ne9B7Te658E6nGCgSxQzONhtnuRDQqRmBkbtFAvtE4bdy3Xv5WsQO+EjswXv7TpvlvddLZIh/JVwooKsseVRBsd3IbPNkPZiQNzn+r7KItC7IJW/GPk1lUXd9Ji5bq+oyzvHIZ+52TNK7DWrRoTtKVSUtKpZRSSk0NLVyUUkopNTW0cFFKKaXU1NDCRSml1GVjnC9Wq+mmhYtSSqnLxuX6X11qgxYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmxnQXLgYwJt4AqdzfNGnZ+LHohGpEMAJ93+MSNsZVbibOfNN4xCBi4l0h3i9uoXwMxWPTGwaKYXGD/m/CD3ZGrXbZDBi8WILYvi6ykxjsftsbv8MW13vZ+n77ZR3OL97tpLX8fsxjr40bKzDtDuO+V0oNN9Ut/y8+bZ4adVxXcN2AzQSbBUweh3SERARCUaQUxNCLByjjAiSJwzIioIwJCL0ogOJ+OS4tcpHSIvcoAZ8KmStjAcosJAErkAjGCcZtZCA5JzgXcDbeEhdzj8qYgLSIBajZvC8qIB0YOoqYgS2yj2BzNMCwmIBMXDFOeuMGbRUNUC1+3BZ1VUrRqn+wiOq1+DeV1v+2r/V/b7wZXeyVtivuytcNiw4IGFLjN8aZjQIzxW8u9CqLmjQCYGhWERv7otyXMTtpY/wok52IB47nwD7oL162L1InKXYGW+lP0vp/0KSZSYM/M4Ot+Ee14R86/wnXaS/a/k9aVO5l1MBW6zDOf/WMmwd1WDKD9iLyYNxtVts7+J+I3TCVG8VFkcErLmXREi+JQHmlJQgmVMYVj42XeHVG2BgXqAwH79ObnhCv2Jhg4vt7eV8MBNO7KlOs6MYqQeVKzcZt47EdeZLqXbHZ4o1m1JWU8qS+kQO0+x+HYUXNltNPeLVm1BvIqPHbXaEZ5w1p0m2K85WJioedXLUaNv9Jl7vtMg7JFS69IqKUKk134aKUUkqpK4oWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFJKqakx1Z1zl24w1LC4LriOw3UE2yV20s0Emws2E4wHkwesL5rOBTYa0VG0+c9jv3+XGyQXrDWIpa+LrrhKJ13HRvfcxCAJRVfd2Ek3duCNXXRD2U3XOSQRvBO8BZIALnbUtUU3XesCzsUuuonzpC52xU1d7KI7rJtuan1vaBGsCaTG95qRjdVRV+LrMhwOwRYN0cpOj30NwPa4F1i1+dpgB9m4vP7ny6ZxloHup6MaxW3XeXOc15XTjLM8Npr+jds4LYjZ6D6M7e+gW7lfnWaS+W9lsDvtpu61lY6fg51uq+O2Xc7AD061i+iwDrrbdbYdXP+hy9ziGI2zjP3sdjpqe3Y1zx10FT4sJunYq65sU124JN+7TMcIvuUwLYdrWVybOOxQ3CQOy2KmWxY0AZvHeACCYHzAlh12q0wRAVBGBDhDcHajkOnFAlSjAWIEQDm+vB8LmqKYSUCs68UDBCf4ajxAErBJEQ2QhF40QC3xsaApYgJq1seipmj/Xy1mLBIjAYyQFIVMWcSUhc2wYqYsZGAjGqAsZByBgOt7I9yu6+u4nWe92OHzMhsnF4vve7N3JmycxCsvCX3RA35jGUNOQtUCyBnfm6Z6YnPF+lnjN07ijO5K3GvLP/ZJrzKfSqFW3fY+Mnmn3a0MFkvQX8CUJ8TBAmYr23XdHZxnOd9R7f/jOKms3+67Io/TUn674mUvi4VxW/DvNENqP4qlS2EvWu5Pi8MSc3CY6d5RSiml1NTQwkUppZRSU2PiwuXzn/88P/ZjP8aZM2cwxvD7v//7fc+LCLfeeitnzpyh2Wzyohe9iG9+85t903Q6Hd761rdy8uRJZmdneeUrX8lDDz20qw1RSiml1OVv4sJlbW2N7/u+7+NDH/rQ0Off97738f73v58PfehDfOUrX+H06dO89KUvZWVlpTfNLbfcwqc//WnuvPNOvvCFL7C6usorXvEKvPdD56mUUkopBTv4cu7LXvYyXvaylw19TkT44Ac/yHve8x5e9apXAfDxj3+cU6dO8clPfpKf/dmfZWlpiY985CP8zu/8Di95yUsA+MQnPsF1113H5z73OX74h394F5ujlFJKqcvZnn7H5f777+fcuXPcfPPNvXH1ep0XvvCF3HPPPQDce++9ZFnWN82ZM2c4e/Zsb5pBnU6H5eXlvptSSimlrjx7WricO3cOgFOnTvWNP3XqVO+5c+fOUavVOHbs2MhpBt1+++0sLCz0btddd91errZSSimlpsS+/FeRMf19AkRk07hBW03z7ne/m6Wlpd7twQcf3LN1VUoppdT02NPC5fTp0wCbrpycP3++dxXm9OnTdLtdFhcXR04zqF6vc+TIkb6bUkoppa48e1q43HDDDZw+fZq77rqrN67b7XL33Xfz/Oc/H4CbbrqJNE37pnnkkUf4xje+0ZtmXPU0I63l2LpH6gHfCPg6+KbgGxQ3g29A3jDkdRMf1w2+ZvF1R6hZJLFI6ghJcd8aMPHWazIpRTRAACOC8YIJxC68vrzRGxoPpryfg83jOJuDyU0chnjf5OXQQDDgDZJbQm4IweJzi/cWHyzd3JF7RxaKx8GReUculjw48mDJxZIFR1aMy4IjF0ceHB5LEEMmLt6C27gvDi+WILbXPba8X3ZyLLvUVrtv+m1+jLzY3m0nBrvpDnYkHdZtd1SnTYfghnQ6rU5ffX7YfKqdUrfrCmyR3m1cB92efU9iBPY6F6I67yLKYutpdt5pdZLt38/t3CsH/fOk1F6b+L+KVldX+Zu/+Zve4/vvv5+vf/3rHD9+nOuvv55bbrmF2267jRtvvJEbb7yR2267jZmZGV7zmtcAsLCwwBve8Abe8Y53cOLECY4fP8473/lOnvnMZ/b+y2hcP3XDl7EzTdZDjaW8ycVshqWswVKnyXK3zmq7zmqrRt5OoGOxLYtrW1zbxBiA9kAkQHGz3TISQDB5iEWKD5hM+iMByuKmEgkgie1lGlVzjoKrRAC4MuMIpBIXIEUGUowIECSNj30i5IkgSYwDwJWRAAFrA0kRCZDYjYwjZ6SXceRM6EUBlLEAZRxAaooIgEpEQBkHkBo/OhKgyEQqT/LliaJaRAwrMEYVL86ErYuAgfdeV9RNG+3+N/6Vviyqhs1vWDxAfI3tRQN4TO/56vjqybBXyBUxAMNsat0+dKr+eWy01C/GVyIOnNmcUbRn7ds35UFVd3hlP+6w+BxVSFTjBOKSzNDconLanSyjVG2TX22hv120AAxvxb8Xrf6HteDfLmJgVLv/nRZR48YMjLMe+1HIHdZ2/zuNXRich+YzTW7iwuV//+//zQ/90A/1Hr/97W8H4PWvfz0f+9jHeNe73kWr1eJNb3oTi4uLPPe5z+Wzn/0s8/Pzvdd84AMfIEkSXv3qV9NqtXjxi1/Mxz72MZwb9da+S/oXh9pDhyVHZBozZ9TO6AlOqQ0TFy4vetGLkMEgwgpjDLfeeiu33nrryGkajQZ33HEHd9xxx6SL3xn9hVd76LCEoE1rYJ6anBYt02EaPjq8HBz8u++loFdclFJTbC8+llD7b/w0eLUbV0bhopRSU0yvuCi1QQsXpZRSag/oR0WXhhYuSk3oMHy/RSmlrlT6DqyUUkrtAf2Oy6WhhYtSSimlpsbE/w59mFzMZ2iEFC+Wus05mq6TWk/NeppJxlyty0q9zno3pd1OyWcSsrYjb7teEzrbKZrRdUxsRtctm9FZbFZpRucFk8dGdLGLrkDY+DzTlPfzgBGDCbEhXUgMxhusM0XTOSka1EFwhpAItrgvvaZ0RbO6DEwqSNHMLhRN6CQVJDP4xBKSQPCCtYG8aERnres1o3PekdqAs7EJ3bBmdIkJdIIjNYFELLlxJNaTY+NrCL2GdAGDFUswAYshFI3ovDgcQsBuNIyS/mZ0gx12q83qysZ0o5rQOTO8eZ0jbJpvtQHY4L8L9y2TasOxjSZkZVM9j9k0ftM4s3kbevMc8Xn35mZfG9tWbQZWjq82myvXf7AR3U70NVir/Gv14D6tPjfJv4LvtnHYYO+SrZY9SfO5YfMeR/XY7OS7DKMa6Y3zb+3ax2U66HdcLo2pLlz+eu0U19S7HEvWWUjWmbHd3ht7JgltSVj3dVZ9neW8wUrW4GK3yXKnwVqnRqsTCxppOUxZzJS3orNu0paioNnoquu6AZsHTB6LF+MDJkj/6aiMC7AbXXVxhuAsYik67FJ01a121C2KmnSj025IN4oZSSCkUnTYlWI6wTshS0LsqusE62IxY10sZsrOus5Kr7Nu6vymYqbm8l5Rk1aG1gRS4zcVMoMddQGcxGImw2101q0UDL3iwWwuZKon/8EiZmgnXLFDW/7DsA6wA2TI66oHsXqCKZZt2TjxOON70QgwfofXod1nzYjutH3bXBk/bN0L20Uw9OY2UOCNUyDF6fauk+ngPivf+DeKt83Fy7i2+xfi7QqjUV1ttzLq56263uMUf6O62W41/bjrs5Nuvftht8vUfxG/culHRUoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlPdOffLX3kaXJWSzGbMzbY5OtPiWH2dY7UW82mbOdehYTNOpqtcXVsmiCUTRzukrPo6a3mdlbzOcrfBarfOaqdGq1Oj3UkIrQTTtriWjZ10W7bXTdd1hKQXDxBiLEAeowFMLrH9f5BeLEC1q661BjEGDL1uumJN0fLfVrrnmo0uupVuulLpptvrpFt00xXnYiSA2+imS3EzTjBJwFrpddO1VmIUgPM4GzvplvEAqfXUnCcxG7EA9aKrbuyiW3bV9VgjvW66fZ10qx11hY0uuibE+7LRUdQRCLj+Tp9DGmNu1U13sOX+qI66sEVX3UpHWmc2Wt+P6phbHb/xOl8sY3hX0GEddl1l/W0RrRDXZ4wuultswzi27DBcmVd1uq1a1E8SQ1CNEoiPh0UfFBEMY7a8n6Sj6k665w6z1TaP0+133OUMfe0EXXMnsVXMwKRda8fdvt1GG4zTcn/cjr2TxFvsp73oVH0QnZH301QXLumSJdQducCaEYwREhuoOU/d5QTbxRJb06fGY238QZyxXWZsl3XXoekaNF1Gw2XUkwYriWfV1WlbwdsEb0GMRYxAUXBgyh8AgeovWTBYAUTiJMXvkCnGlS8xSHyZszHvyMb8HwkBSQyEeI4yUhmKwQgEqSxTIAC2eD6kxZtDOY0Uiy2GIhaSgGARAeeknA0y4mQXrAHyeJ70CbicBMigWDg4CfF5KUaYEE+45bBUFCyheK68b4sCwRH6Ws+X48ZVFjHDMo02TTsys6j/Dbaa21N9Iytzi8rxMKSAqWQeDTP4umF5TNvlGA2f74iMpFEnoSF5T9vZ7UmxOp9R67WTN9udtIHfq+JlmFEnncNyUlRqGulvjlJKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaU93y/8Q3PfIQZLMJ2VxKa26Wv5s7zt/Me8xsTnO2w5GZNgv1Nsfr6xyttZh1HWZcl7rJWUjWWUjWyUJC1nSshxotn7KSN1jJ6ix1m6x06qx1arRbNdbbCablsC0T84vaFteGpMgvch1wXcF1BZvFm8kDNheM35xhZPOAFCkC5KEvw0gSGzOMUoO4eAtpmV8khMTgU4qsooHsIldmGAmSgDgQJ4SkHAreClkiGBcwTvoyjJKkyCdyoZdflDqPM6Evv6hmPan1fflFcRjzi7bLMIIit0gEawIZDodg2b/8omEG2/9vamdfjUOodn4fnPeoTJEt1sFjhreFH5VRNDK7aMi8B5a7VRRAuQ8G84iq96vT7NSweVRb6k+aWTRui/9hGTbVOIGdtv3fbUbR4Lyqy9jPfJndxBiMnOc+ZSZdCrvNSLpcTEsUxVQXLo0nupClpC1HtwhDzLoGmzmyzLDuDd5bfLBI+cZYi9lFdZf3MoywXYJY5qVN26UcSdosJw1mki6zSZOLSZNl51lPanSTFO+SXiCiWBNDDZ1BrMTHJsYQiQVrLBB6b9NGpO/ka0LxIAjGxNcaa5AgiLOYUBYtNoY1BlPc4nnLB1OMBxMMIYBJiMPisSSChJiBJAmVYSxw4rKEMlJJxCCJj0MxBOcJYkhdDA/s5RcRT2jeelJjYm5RL3xQcBIIxmy8oRXZRZ540hgcX70/SX6RF7upeCkNjt+ukNnuxDoquyg+NzybqHoC2y7PqJznOBlF253Yhu2TYds/evuqBcXwabYzKmtqcB6TZBZNHPA3RvDeQdiLk8ReFguXWxDfXtmLkEO1tw5/aaWUUmrfadGipoUWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSik1wjT8l81emZZtnY61VEoppQ7AlfRfRdOyrVq4KKWUUmpqaOGilFJKjTAtH5/shWnZ1ulYS6WUUuoATMvHJ3thWrZVCxellFJKTQ0tXJRSSik1NaY6qwgLxscwQ5cJrguhXQYRWvLE0bUpqzYG/pkyMA6DxxIw1G0W84oKzgRcERRYt55GktHwCVnNkgdL8JasXmQCedvLCCpzg2LYT9E6W2L0TkwqClhTrDQBjAEfMFu12RbBFNlBJghiDMaDKbbbWIP1EEw5Ls4WUwwrjykHhmLZIEbAbzwp3hCwGBPw3mJMkY1kYvCeDUUAXwBrHIS4OVaEYIQ8OLAecCQUK1oEFMZ24haKkMXeuEpGUV9uzw7yiqrHcD9tlVcUn5dNeUWlSfJpqrlEo0yaLzNpAOXI+YzIKxqVTTTJPGDvwt72M6doLwMWDwPNKlLTYqoLF1931NuedDWj8WgMJZTEks848llHd9aRzTmy+QYX5+Z5bF4Ic550rsvcbJtjMy1ONNY4VlvnaNpiwbWYcR3mXJtr0vjGk0nCeqix4hss5w0uZk2Wu00udpost+ust2u02ymynmDbMejRtQyuY3CbUqPBdUMlOVqwWYghiT7EoMVqgrQpAu6sAWuKQEeLJKZIgLZFMrQhlCnRiSlSoInD6rhhydE2hjD2JUc7gSRgk5gc7SZMjbZGSEwgtR6LkFiPM0JSpEY7AokNWyZHV1Ojy4hKS+idGKup0VsVMaXyhL1dUVO+dtMJ2AzMc1RidMEy+otuzvjNhc02qc+usm6bnp/gBN8fVlidT2Uefds2fN7VtOjdGAx1LOcdH28UL33F7jbGLVYG5zlOQvRY852gaNkuhXqrYuJSpDEPS02eJOSyui7DgjS1WFI7oR8VKaWUUmpqaOGi1BVkPz86UZObln8/Veow0d8apa4gelleKTXttHBR6gqiV1yUUtPu8itcLr8tUkoppVTh8jvNT99/ISp1yehHRUqpaXf5FS5KKaWUumxdfoXL5bdFSu0Z/Y6LUmraXX6nef2oSKmR9KOiw2UaO+wqddAuv8JFKaWUUpctLVyUuoLoR0WHizagU2pyU51VtHY6JXRrJOsB1wkYH9+UbS64VqAmYL3B5mC7Bts15B1D3rEsthPW2zWWZxosNmc4Vl/nWG2dI0mbOdehUYQv2iJwccZ2IYl5G6kJJNaTOk8t8awmgXYS8GmCpC5mA9WKDKGawdcgaYNPwaU25hZlgu0KkpiYWeTjUIrcIiNFYCPEoMUgxUX+AGKwYhGJ942YXtCjCTHnMCTxcQgCobwPJimG3iC+yCySGN4YkmL2CRAsIRhMEpchwSBiCC7gQ8AHg3eBPFhS5xEx5NaSSCAxnmBNLxgxYEhMwBtDagLBGHywvcyi6nSp8X3Bi4iNWY7F/fKTjmrwYjWob1R+zrghjKOCDQcDAau5K+UyBwMDy48Bhp2c+gIlB6athjRWs2zKdRvMd5k0OLFavGzk9Gxs9+C2VbOENnKENufObLnMgXyb6mu32reThC3uRVG2XV7Rtusw4Uc/exUmuROTHkN1eRmWQzUtprpwuXBWaK476ouO+sVAbTWQrnpc25OsZQCIs4SaJW86slkbb3OO7rwjm6/xxNwMj88doT7X4chsm+PNdU421jheW+NYus6cazNju8y5NgCZODr1tBe8eDFrspTF0MWlToPVdp12q0a3nUDbbgQvtk0MW2ybgeDFeLOZxXYDNhdMbmL6sw8YIaZEBynSnmP0s+QBsQZrY9iiuBimKC7eYvCiIRRhi72QxYRi2koIo6s8l5gY0JgKkgiSWLwTfCLkScA6wTqPc0lf6GLiYpBi6uI4Z2JRkhRFXnk/FoJhI3hR+oMXM1wMZ5TB8EVDQLAYHEKgSI+WjcJhsIiB4YWMrxQCw4wqBAYLlOrJeFTooC/Wc5iySNmY1mxMWyy3Oi6I3Vi3gZNdtQDZymBhUw0aLF/rxY5dvOzUYKDiuMXLboqT6nHoCzbcVAjurHjZrmgpwwm3O1mME7R4UFfORgUsXur1mSTocTcO63eQLtX2H1Z6nVIppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NSYqHC5/fbb+YEf+AHm5+e5+uqr+fEf/3G+/e1v900jItx6662cOXOGZrPJi170Ir75zW/2TdPpdHjrW9/KyZMnmZ2d5ZWvfCUPPfTQ7rdGKaWUUpe1iQqXu+++mze/+c186Utf4q677iLPc26++WbW1tZ607zvfe/j/e9/Px/60If4yle+wunTp3npS1/KyspKb5pbbrmFT3/609x555184QtfYHV1lVe84hV47/duy5RSSil12Zmo5f9nPvOZvscf/ehHufrqq7n33nv5wR/8QUSED37wg7znPe/hVa96FQAf//jHOXXqFJ/85Cf52Z/9WZaWlvjIRz7C7/zO7/CSl7wEgE984hNcd911fO5zn+OHf/iH92jTlFJKKXW52VVW0dLSEgDHjx8H4P777+fcuXPcfPPNvWnq9TovfOELueeee/jZn/1Z7r33XrIs65vmzJkznD17lnvuuWdo4dLpdOh0Or3Hy8vLABx56kVWc0N7sUZ60VJbSqgtO2orQroecO2AzWLOj2vHHKCkJaRrhnTFks0a8llHNufI5xIem21yYX6Ov59tc7TZ5nhjnRP1NY6m68wlHWZsl7rNsCYw59q98MUjSZuFtMVSvcnFepPVZp2Vdp12JyXrJGQtR952A3lF4Dr9uUU2s7iOYHPBZgGbB0wuEPpzixDBeOJ4a5AgiDUYbxFriuBGg03K/CKJuUUJBGcIiWATeuNsmVXkDCEFSWJekiRxWilvzuCTcl6Czy15EnAu4KwjcZ6ud6TV3CJTPGdj+GJiA7kJ2JCQWk9iXC+/KJMYaJmJKwIuhdR4AjFUMlQzi0zAi4tZP0WMSzW3CDZnFw1mCW2XWVQ+N5jvMyxUcTB7Z9gytwthHBauWA1jHJZZ5LfI8BnMtxm2rdXXDwYuVrepus2jtnUnRmUiDT63mzDCYXkzg/Mblge022VUVbNldhvkuPV67C7DZth+uNRhfDtd3pWefH4lbf+OCxcR4e1vfzsveMELOHv2LADnzp0D4NSpU33Tnjp1iu985zu9aWq1GseOHds0Tfn6Qbfffju//Mu/vGn8P7nmfs6ZNR46dpQnFufILtTIFh35oqF+0VBbMaRrAdf2uHYOAomBtOao1S35jCObMWRzhu68JZ+zZGuWlfmE1lyd1bkaKzN1rmrWOFlbw6frWBNomJzU5MwkXYIYFpKUY0mdpbTJ8VqDi90my40Gy50GK+06rUZKt52Stxy+7XD1onhpg6sZXI1eIROSInjRgWQGawMmC4AFHzAUqdGheHsJBmOKcMWigMEbjLOIjwWMOIMtC45e6KLBOundlyQmR/eSpZMYtGi8QXKzEboYBPEG74v5BYt3HudsTHy2MTna2fjm44zgbKBmfV96dGIDuVgSE0itj0WJCQRjSWz8yNAST1yp9b031GoBE4uW0AsctGbjxFemR8fx5cl/47kqXw0vHGKwSOiN36KAgfGLmMFk6Pi82TR+VGL0sHWD8YqZvlToIcVLuU2jAhd3qi9EcB+Kl92G401SXExStOxU9djs5AQ1KqlcE6LVNNpxmf+Wt7yFv/iLv+C//tf/uuk5Y/p/EURk07hBW03z7ne/m6Wlpd7twQcf3OlqK6WUUmqK7ahweetb38of/MEf8Cd/8idce+21vfGnT58G2HTl5Pz5872rMKdPn6bb7bK4uDhymkH1ep0jR4703dQlduVchVRKKXWITVS4iAhvectb+NSnPsUf//Efc8MNN/Q9f8MNN3D69Gnuuuuu3rhut8vdd9/N85//fABuuukm0jTtm+aRRx7hG9/4Rm8adQjp1WR1APRjDKXUoIm+4/LmN7+ZT37yk/z3//7fmZ+f711ZWVhYoNlsYozhlltu4bbbbuPGG2/kxhtv5LbbbmNmZobXvOY1vWnf8IY38I53vIMTJ05w/Phx3vnOd/LMZz6z919GSikF+h0MpdRmExUuH/7whwF40Yte1Df+ox/9KD/zMz8DwLve9S5arRZvetObWFxc5LnPfS6f/exnmZ+f703/gQ98gCRJePWrX02r1eLFL34xH/vYx3DO7W5r1P7Rj4qUUkodAhMVLiLbn72MMdx6663ceuutI6dpNBrccccd3HHHHZMsXh0k/aNXHQC92qKUGqRZRWo8esVFHYDd9iVRSl1+tHBR49E/fJVSSh0CWrgopZRSampo4aKUOrT0Oy5KqUG7yio6aE3b5fqZRWaSjEcbbR6dm2N1oUn3aI3ORUttyVFbttRWHUkrZhe5bgAB143ZPzazuK7BtS3ZuiFZt2RrhmzOsTiXsjLX5MLsDI/OtDjRWCuyi1rMuzZ1m5Eaj0No2AxnAg2b0bRdZpMuM0mXmbTBSr3Oaq1Oux6zi3qt//uyi4oIgDLDqGti6/+uwWYWmwVMbmNmURCMLz77H8guwlkQsCEgwcQso8QQvMHkYNKirX8eW/8bb7B5bP1vytb/KRgfn5MEQiKYYAi5iW3/00rr/14OUsB709f6P/eWxIUYA2DjuMQGnCmziwKJ9Zta/wcMwZheVpEPdiOzqGh9ngIUeUWbsoug99HWuK3/d5pbVJ3P5iyiydr/97XynyC3aNh6D1tP6G8XP7S9/4Rt/3dqMO9oq1iBsEet94dNv9u8or1q979pXYb8G/h2rf73+/tAk0YXHObvJ+02m0odrKkuXGZdh6sa63xX4zFW5hqcWzjCA8eO8+DyAheXZuleqFFbdDG3aKnILlo1JC2P7QZs25MYqKUWX3fkM5ZsxtKdM2Tztrg5Lh5JWZurszxXZ2WmwYnGGifrqxxL15lzbZzJmbExBHLOtVlwKcfSdVZqDVbq/dlFq50arWaNbjshbyX4tsW1bSxa6pUCpg1JB0LqcN2A7Zq4zrnB5AFjiuDFanaRMZjgY+CiNYg3vewiaw3BGWxuEVcGLJriPr3gxJBQBDRCSNkoaHIwaSxWggdJzEb4ojd4bwjOEpzgbcC6GL6Yh1is5FZIivDFxAZS58lNIBFLEvoLmDh0RSFjYtFSDK2pZLaUGUN9gYvFiaQ8CVTfmyQ+t10Bs5Pcoup8hp3UtytiqsVLnGZzoTKqqNmYbyVQcoxCpjwRBkzfdg2OH1W89Oa9iyKmP4to/OJlr2xVvGyVVzTOOmx1or+UwYV7sb+22pZJspNGFWTTVkTsRf6U2jn9qEgptWO7vfKilFKT0ncdpZQqTNtf/pPay6tVSh0ULVyUUjtW/chIHX6jvh+k1DTRn2KllFJKTQ0tXJRSO6bfcVFKXWr6rqOU2jH9qEgpdalp4aKUUoVJ/rVXKXUwtHBRSqmC/leRUoefFi5KKVW43K+46H8VqcuB/hQrpXbscvtyrl5xUerwu7zedZRSl5R+OXe66BUXdTmY6qyi3//O93H9NW2eNHORE+lazA460uHamYucOzrPuZNHeGJlluWLDdzFJIYuLjlqKwm11UCyHoMXrQ+4Ir8oWTXUlmNuUXfWkM2VmUUJi/MNFufneGiuw9HZFscaLY7X1zheW2chaTFju73gxbkihHHetTmarLPq66w0GqxkdZa6TVa7NVbbdTrtlKydkHcctmVjVlHLxODFtiFpC67jcB3BdS2uK9hMsFnA5gGTC4Qit6jMFxTAF+OLzCJT5hflgjiDdTG7SJwhpAbphSXSf0uLoMUUQtcQUno5RuJivlHMLIrziKGLFuMCuROsDTgnOBewtghXdAHnHakNveDF1PoieNFTc/2ZRbn1WITM+l5ekTcxU8iKxCFCMAFb/MW8VfCil40TrjWyKbPIb5H5Mzh+q8wi2D63CGJ20WDO0WBm0WCwoqt8pDEst6icbqvt2DZYsRjfl2M0EI44uL3jqu6XneQV7cSovKi+DKKB3JxheUXjGCfLZqsspHJ9x93mcQMNtwqp7E0zhdlB6soy1YXL0uIMj8ylNFzGnOvE4iHpcjJd5Wi6zkLa5qH0KI+k86wkM3RcCsYCBsRiAhgv2AxMHmJBYA0uC9jMYXOHzS3GG0wwmGDJJKUlBqm80TojOAI2iWnGzsQTbGo8DZNTtxl1m1O3OTWb907UzghrNrBuhcwJwTjEFqckY+KJ1hjECGx6IylSoAmYHBCDBIlFi8Q3sXi/uCOmdzNiCGKxQRCxGBFCauK0UoQ2SvEY6W1rea6Mj4UgYMX0TltxcoOIIAJGBJwBCYiAcxuvF1cUGJV9GcSAAzxxGOJmEiC1njw4sJDiyShmZkIMVCyHperjgeBFa+IJsUyOroYuTnoSHpUY3Xt+i+DF0jjJy8PSooc91z/f/gTprdZ9u+Jlq3Xeia0Spve6eJk0pXi3J+7DFMA3TqG1F0XhpXSY9u+kxv3ZGqfAvJLpnpkm0/Peoi5z03SiU0pdXrRwUUoppdTU0MJlmkzvFVJ1mZn04xel1PQ5rP+FpoXLNNGr80opddk7LN9vOSzrMehwrpUaTv/IVYeEfsdFqf1zWK90HBZauCilJqYfFSm1fw7rlY7DQvfONNE/ctUhoVdclNo/esVla1q4TBP9I1cdEnrFRan9c1iuuBzWAupw7B01Hv0jVymlLnuHpWA4LAXUoMO5VkqpQ00/KlLq8ndYCqhBU93yf+6bdVYuHuHekwv85YlTnF5Y4UmzF7mqtkrTZVzTWGI+bXNVc5VH5+d59Ngcq0tN2ksptYu2yC6y1FYS0rVA0vLYPIAXknWP6wZcy5GuW7IVS3fekC07snlLez7l7+caPD43x5HZNseb65xoxNyiI0mLGbeRW5Qaz7xrkxrPjOvGeIK0zkqtwXIj3lY7NVrNGt12Qt5K8G2La9leZpFvg2sTs4w6QtIRbGawXVvkFllMHjBBMKHIKRqVXRQM1sd2/OKF4AzGW2wvr6iSWZTHPCKTg01iVlFIwOSmyCwSjI8t/CURQgI4QbxBrOCTYv7O4q1gXcC5QB4CNnckzpO6QBYsqY2t5p1x5OKH5BYFAoYcR2I9wcR2/Y4YoWARMnFFhpGN2UXVvKKB1v+9K1iyfW7RqMyi8rmt2v5X5wej84uqbe7L6YblEw3mFsXX9H90U40AGPbmEwa2y4vFFvMo84nGySzqn+dkxcxgO//qPHba9n/8zJ7++WzVYn0wU2grB9GO/jB+bLfbdZpkn++V3S7T6mf5Q+1H9tV0Fy4Pe7IArU7Kej7L34f4hpqaQNMtsZCsczxZ42S6yvHaGgu1BR6uL/BEbY5OWickLp5wLWBiZo9ZjwGGJvfQAdsNRcihw2UW1zWxYMgMWZ7Q9YalYPDBENi4QTxgzsb8orQoYhriqJucpivyi1zML6onDVYSz6qr03aCdwneglhbrJ9BrOlFDmFiDFDMM4qBPhYLeTy5GNicXYTpFTA42xtaAQkBSQyEeH43AkZM3CdiMAKhmmEkRZRQ8VgkngisCBKKcS5OJ8GCCMEFBBujkMTgXIizEkPiNn6wgzWbhjHHKIdQnIiLHCMnoZdnFHOjxssuckjMaxoztygWVFsXL+V02xlVxAyeTPtP2KGvSBkMVhw86VaLnGE2FT5Diq9xsoz65zn6jXtUsbFVZtEoe3Wi3qoI2smb7U6Klr0MWhw06q/lvczBmYaT9bRlMant6UdFSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaU905N2kLZlkIqUGco02TB4Khkye0fMrVjRXmXAdnAnWbs5C2aTdTcu9YFENGDXCIid1zxTpCYkjWDK4TML5oBe8DrmuQ9kYnW7Gm6FgLGTVWis6MQQx5sGQ1R5Y65lyHhs2o2yzOywRSm9PAkDlHEItPN15bdnhsUzS5ja8itqstutZWO3oaQ6+NrBWsAXIpW9+CD7Fj7gAjgmBiPEDJF01oTbWiNb1liyk7ZVa63FI2rjUb3UYrs5TKUCR2J44/dQERi3PDj601UnTDdRubFxzY+PJghLx43FsJKe5s0T237Jhbbf+/V91zYbz2/33TD3SO3Un33MHn+ue/dQfdYcbpkjupwW63o7dRtm37v1920/Z/nK65+9H6fL/a/e/HuvbmfQk72R7GOAS1e1NduNQvdEiWDHN/7+gcTWmdcLSunuPhq2f4+6sXOHV8mevmL/KkxkVOpGtcXVvmu2YeY/HIDI8cX+DhtQXOL8+xttikvZhSX7TULlrqS5b6ciBZ87h2jm15bCsnWbXUao76jCObc3TnDN15S3bEks0nLB6psTw/w2OzsxxttjneWOdEfY2j6TpHkjYzrkPD5KS2y4ztMufadJKUdkhZ9XWW8wZLWZOLnSYr3TorM3XarRpZO8G3HL5lcS0Tbx2DbxtcW2IcQVeKm8Hkgs0E6wPkJrb+96HX/t8IMbfIxGLEWIPkBqxBkhhnEJzB5BbrDCGNeUU227gfMuL9DEIKNofgivyiRJCkyC9yRX5RIoiTIr/IElzAOOnlF+UukCSe3FsyF0iL7KIsOFLrqTlPHiyJddSs72UXZWJJTcDjcQSskV5+URkBUL4Jj8ov2rhf/GDtQXZRddrtVLOJyuUCQ0/ag8VLfF1/+/9S9SQ8KsuoLwtpILdosHgZzDIaNEmBs/U2XrriZZK2/8OKl3Hb/Jf7blhBcBC5PIOmvS3+NEQPjHIYjv+00Y+KlFJKKTU1tHBRSqkDpH9tKzUZLVyUUuoA7SRVWqkrmRYuSil1gPSKi1KT0cJFKaUOkF5xUWoyWrgopdQB0isuSk1GCxellFJKTQ0tXJRSSik1NbRwUUqpA6TfcVFqMlq4KKXUAdLvuCg1malu+Z/N17AhZg3ZTEhaQrpq8A1Dp1bjcTfX+2vGY1lIWlhiS/jZpMNCrU2rmZJ1E7LckuUO4w3WG0ywvaAd2/UYHx8YL9hccN1A0raEtGh9nxhCYvEuoWXqOCM4G9vLV/MygrXUbdZrwZ4ajzeWus1puoxuSOgmDi+WzDu8t4Rg8d4QvMH4mC9kgsEHIBSPBYwYjBisgAgELJaAyUGw/blFUmyPECMBbDHKA8bE+YXYBt8EsD7mORkfowKMAWMFa2KwUTBFbJIHawzBlC30DcZIjBbAxK32xfqYgBiQYAjG4L3FAMYI1mzc92LJgxRxTEIutpdPtCm3qGjXHzdodHZRzCcylawis2Vu0aD9yC3acl4j2v7H52RoFtGW2TsjXtP3+gkziyaNOrjUBqMVhtlqn22a34Tt/quPt4oSGFyHUe34t8rhGYx/UOpyMtWFy+KNKUcu1mhcyKkvdqlfEGYfSeguJDG36Ko5Hr5qhoevOsqJ46tcf2SRa2cucjJd5WS6wpMbF1g60uT88XkeWjvKueV5VhdnaC8m1C5Y6heL27Lr5RaVt2TFUqsPyS2aj7lFF4/UWJ5r8tjsHAszLU40Y27RQtpiIWkx59pFblEMYJyXNscTx3paY6XeYCVvcLHR5GK3yVKnwUq7yC1qpeRti1u3uHbMLco7hqTMLepu5BbZrmBzi81CLDhyE4ciGE9f8VLNLhIvYA02MQRnsXnMLtrIKiLesiK3KKUo3ECK56wrHwumklsUM4wESQTJDD4RgrMYF/BOisyimFeU2EDiHKnzpDYOc2vjeOOpuZhXNJhbVBaLjjiPsgCpBkHa4sQQTMBierlFvVDGgdyiMlBzMHSxNKqIGRy/1Ul9nMyi8vlqxlB8buMkNhi+WDUsv8hjdpxZNM42D+NlYxsnzSsq98HQZbP9soft5zCicBhWZExivzJ0dhseOEmBNnIeI7ZtJ+u2n6GO00wL0OEO559GSimllFJDaOGilFJKqamhhYtSakf262MQpZTaihYuSimllJoaWrgopXZEv0yplDoIWrgopXZEPypSSh0ELVyUUjuiV1yUUgdBCxellFJKTQ0tXJRSO6IfFSmlDoIWLkoppZSaGhMVLh/+8Id51rOexZEjRzhy5AjPe97z+J//83/2nhcRbr31Vs6cOUOz2eRFL3oR3/zmN/vm0el0eOtb38rJkyeZnZ3lla98JQ899NCOVj6bh+68IZtz5E2HpEWb7lxwHUhakKwbwlrKynqDJ9qzPN6ZYylvsu7rBAyOQM3mNJOMZi0jaeT4ZsDPCHkT8qYhb1p83RJqjpBaxMXP9mPrfMFmgs2IbfYzsF2wHUvoOLqdhFY3ZS2rsZLVWcvrrPsa675OWxKC2P5cEoTUeBLjqbucmvXUnCd1HpcEbOrBlW32QdKNNvshgeA2bpLEDvbiDGKLMCFrkKK1fwwX2sxIL+wo5iB5KfKRqjcqw8H7UHbQN8HEocT79PKVDIiBYJBgEClvEELMZyrHhcrNB1vctxvjes9bAhvPQWyX77HFdPH5cjgoiN02v2dUq3kvdqyMnnHa4Q+2rZ+0hbrb4krITlqI7/eVler2Vbe9On5wHzjC0NthclBXpC7nNvGapK0AjIiM/ZPwh3/4hzjn+O7v/m4APv7xj/Pv//2/52tf+xrf+73fy6//+q/z3ve+l4997GM87WlP41d/9Vf5/Oc/z7e//W3m5+cB+Lmf+zn+8A//kI997GOcOHGCd7zjHVy4cIF7770X59xY67G8vMzCwgI/8Km3cX71FMnDdWb/3jBzPlC/kOHaHgDfcHQXEtrHLa2rDO2rAlzV4arjK1x/ZJHrmoucSNdo2IxMHBfyWc53Ym7RI0tHWFts4i6k1C8Y6otCfUmoL3uSdY9te2xehASmDl935DOObM7SnTNkc4ZsHrJ5IZ/32LmM5myXozMtjo/KLTI5AJkkZOJYDzWW8ibLeYPF7gxLnSbL3TorrQbtdopvJZiWw7Ysrg1Jy+A64NrgOoLrMpBbJJXcomLoQ8wqGvwxMEW+UDHExdwiSYpAyV6wZJldVM0q6s8tEkcvt0h6RVW8L0nMLcIKJIJJAsYJzgWsCyRJwNlA4jxpkV+UOk/NepwN1Kwnsb7IL4o5RakJJNbjjGCReJ+YX2RNLAydCVikf1i84TsEa0JvGMfF4aiT7DDjFCmwdX5RtVAaDNobVkSNyp8ZVZBVp69O0ze+zHWqPD9OkTbOl3er86lu36jtHhY2uBtb7d/qPpjki8jjFiyD8xxMiR48ltXspi2XP+bP3aj5b7WOfcvZRVbRuMsa3CejCpdx1mXT8R1jeaMMW4+dHvdRyw1iRx7LnSx/kuX25rlHy68ue767yH/6p7/P0tISR44c2XKdR5noisuP/diP8aM/+qM87WlP42lPexrvfe97mZub40tf+hIiwgc/+EHe85738KpXvYqzZ8/y8Y9/nPX1dT75yU8CsLS0xEc+8hF+4zd+g5e85CU8+9nP5hOf+AT33Xcfn/vc53a0AUoppZS6cuz4Oy7ee+68807W1tZ43vOex/3338+5c+e4+eabe9PU63Ve+MIXcs899wBw7733kmVZ3zRnzpzh7NmzvWmG6XQ6LC8v992UUkopdeWZuHC57777mJubo16v88Y3vpFPf/rTPOMZz+DcuXMAnDp1qm/6U6dO9Z47d+4ctVqNY8eOjZxmmNtvv52FhYXe7brrrpt0tZVSSil1GZi4cPkH/+Af8PWvf50vfelL/NzP/Ryvf/3r+da3vtV73gx84VNENo0btN007373u1laWurdHnzwwUlXWymllFKXgYkLl1qtxnd/93fznOc8h9tvv53v+77v4z/8h//A6dOnATZdOTl//nzvKszp06fpdrssLi6OnGaYer3e+0+m8qaUUkqpK8+u+7iICJ1OhxtuuIHTp09z11139Z7rdrvcfffdPP/5zwfgpptuIk3TvmkeeeQRvvGNb/SmUUoppZQaJZlk4l/8xV/kZS97Gddddx0rKyvceeed/Omf/imf+cxnMMZwyy23cNttt3HjjTdy4403cttttzEzM8NrXvMaABYWFnjDG97AO97xDk6cOMHx48d55zvfyTOf+Uxe8pKX7MsGKqWUUuryMVHh8uijj/K6172ORx55hIWFBZ71rGfxmc98hpe+9KUAvOtd76LVavGmN72JxcVFnvvc5/LZz36218MF4AMf+ABJkvDqV7+aVqvFi1/8Yj72sY+N3cNFKaWUUleuiQqXj3zkI1s+b4zh1ltv5dZbbx05TaPR4I477uCOO+6YZNFKKaWUUtOdVTRpK3SllFJKTbeJrrgcNi85/X9Ze/I5/vopV/M3j51k6ZFZmo/UaT4qNC8Eass5zce71Bcts+cc7aOO9okmiycbPHryON84uc41R5e5fm6Ra+pLHE/WOJmu8JTmEzx2ZJ6/P3mEh1aOcuHiHK3FGrULlvpiSn0xobYSSFdj+3+TB5L1DNfOSVcd9YYjm7Vks0X7//mEbM7Rmq+zNtfk/Ow8c7Ntjs20ONFY41htnaNpiwXXYsZ1SI1nxuTUbcaca3MsqXMiXWO53uRi1mR5psHFdpOVdp31do2sneBbCXnL4tom3jqmaP1PvN8tIwBMka1kMWUEQIit/ylyiAAIASMGYwziQZzFWUEyg00MoWt6uU2x/b9UIgCk0u7f9HKUJDEEB7b3GMQaJBFCAiSCOIskAUksxgV8LkXrf0tWtPyvJZaudaQ2kLnYvr/mYr5TYgNdIyQhkFofW/6LxRkhMR5rhBxLYkMvBsCZgJWN9v/BBCyGgGAxOIRA0X5bNlr/V9vFD2v/X21nv1X7//K5YW30HaG3nLJQL1uXl8usrke1RXe1dfdgflHZ3r+cPojtTeMx/eNNwIvttfQOmE3bM2zdx2kBXs673L7qtlW3u9rufi/b/o9aTnwcevvQItu2/Z80m2hwns7Ili3nx/lDbZKcour2lfMfd9/upt3/pMvaqW2jEcY4pupwmuorLjOuy5GkzZG0TbPeRWqBkG6ECwLgBesDNisze8B2DSY35Lkl844sOHxxYkqNp2Ezmq5Lw+U0khyX+N7JtXcSthShhcVyyvDBIpDQ+iIbKBdsDtYbTA54QwgWX9zyYDcCA4uAwFIMjhNSm5MWmTypjSfomN8TsFYwNmb9iI25hX1Da4rHphhXCVmE3vrLqD46IjEgsQxehBig2LtJcas+pghojNOW0w8bDj5frAxxcWZjVBG4GHf15uyaavBiyfees33DcvoyfHEnJn3dbgIYtwtdHJWZtNVJbKsgxktt3Eyn0k6vtJZZVfthPwIVL+ewRKV2Y6oLF6WUUkpdWbRwUUoppdTU0MJFKaWUUlNDCxellFJKTQ0tXJRSSik1NbRwUUoppdTU0MJFKaWUUlNDCxellFJKTQ0tXJRSSik1Naa65f/Xlq/n+rTFNY0l0qs899e7PDR/lIsLDTrHHI3HLI2LgXQtYPJAfQlsLiRtS7Lu6KzO8uBKnQsnZnh0YZ7rZxc5WV9l3rU5lqyTznhmky5ztQ6PNI6w1JzBz9TIm5Z8xpHNWOorlmTNk7Q9JgsYH3AtMF56rfVd1+I6Btux5B1D1nYstx3tTsrabI2VZoMTjTprtTpHk3XmXKfX+t+aQIMc51qkxlO3OTWbU3OeusupJTkraZ12UiNLE/KaI7RsbMOfGkJKrw1/SAyhC86BSyziBMkN1gZMLmCIHYD9QBdQEYwv7wcQgxEDEtv4I2B87JRrgondgxNDCMRuwgFCMHGYxOmCNxhPjAcQgwQIAQix8zHBxm7FAhJMvBXdc30I+GDwRQRAcAYvgdwEahiCeHIT2/oHE1v/B29IrCcEE1v/IzgJBGMgFC3IMaTGb7RdNiHeLzuYiqXsEO4IBDG9Tqxl2/hRXWwhds/drktstQV+3/hKa3rY3DJ9WPv/ON1G6/7N85S+1v/lNMPGV2MJhrVKHycCoG+9RrRbH6ftf/l4p4btu1HL2W6d96Nj7k7tR6fdaWiLf6mPgbvCM/IOw/ZPdeHy8OoCVx31XNe4wNW1ZY6na8ykXf6fO0nLzoBYrLe4rpB2BNPJi9yeBJs7rDcQEtaY4ZwRUhuLgXnXZqYoHmZctygWPA/awEU7S5c6EPvqm2B7rf6dF2weMMFjfMB412t9b4LtO+FnJHSBZSBUfg4s0svPcVZITWz3n0qOLbJ0HAFnpDdtVQbxFGaKk6wxlaHE4SYWS4jbVJwAjQeqbf7LcUaKbQdLiBk+xat6p6reS2Lr/tjWPxYoSNzejdcYRKQYZ3qn/V6r/2LKYCzk2/xAWOh6CNaQlG/ixYKsCcUCPeCwUqyEBDIcKb63BcOKF2tCPJn37ttNxQvQGz/KfhYvw6bZmHZ4ATNJ8dI3v0pu0ahtKI0qYqonxUkzi3Zr3HltyvOprPPE2UQj9uOlKAzKk81WWUjjOkzF2uVMIx9G04+KlFI7ctj/Er+S6Enu8jPsKqmKdM8opXbkSv7L+0redqUOmhYuSqkduZKvuBy2bde/zi8/ehVtNP1pV0rtyJV81eFK3nZ1aWgxOpruGaWUUkpNDS1clFI7ctg+LrmS6ccKlx89pqNp4aKU2hH9uESp/aMfFY2me2YMWvkqpQ4zPcmpK4n+tI9B3xSU2kw/Kjo89I8rdSWZ6s655y4c4a+POAKG4+ka1ghHay0W5lo8fjSl065hM4PxDrGQrPnYPDYXkraQrEHaMISaY63W4NFkjpqN7eCvqllmXAeLMJt0OFJrcbRZp91NWe848m6KzQw2B+ttbHkvgDEYX3SfFcFmAdc1hFRw7aL9vjNIArlzZC6l5QIrLramT0zAmoArhgBp0TLWIaQ2py6Opu3ScQndxJEFR+Yd3ltCsPiiRX4o2/B7g6kV7fiDxKEU61vegsEKUHS3xQimPDGV3XOlGCcSRwWDMQIejI2deY0HY0xsUGuEYGKrf+shGInPGQimaOLri30WJHYZ9mWn37gqeBBjCSbEF3iDMfEWgsUbMEawoSguLViJyw1iyIMFG3/QgxHy4OLKGGLnVGNBAr4YOlN0ei3b/UNf91yIhex+d8/djVHdc2F499btuuT2TVvtcLtN99zqa2D7GIDDYruuujv5iKz82dl+/8qedLfdqb3sTrzXDkOr+UtJi9HRprpwqX91lr9ZOcbD1y/wjKvP8bS58zx74QFunDvP/zt2FX911VU8dm6BziMpM+cSmo9Z6kuedD0nXc+pX3Q0LzhaT1jaFxpcvLrG0lWzPHZilifPL/KUmSc4ma6ykKxzKl3m2sZFHpo9ygPzx3l0YZ71I02yI45s0VJfNNSWLbUVT9Ly2G7AdjymG3CdgGtZ0jVHtmbprhmyNUM2Hx+3Wo7OXMrqbJ3lmQarzTpr9TrHa2ssJC3mXJuGyajbjFRyGiZnxnWYSzrMp03m0w6zyQxLtQar9Tpr9RqdeoqvJYSOxdcsrg0uNbiawdXAdSjiDwSXCK5rkEyKYixAbsCHSkZRpXgRAzYWGxIs4uP94AzGW2wuhNTEgsmDzWPBZnzMcAreYHKwScwuCh5MHos5k8bnxcd8IxJBghRZRgZxBgmWkPgio8j0cotS5/HBktuYU5QWwyCeYA25WBITCBgsgrcGZ4TEeAhsyi3qO0EVxUugeEMZUrzARoZOtXAYVsRsV7yMOtkPZhJVC6Zh2UWD6wL9b4hb5hNRbovpjwMYiCSo7qetiphhMQDV4ucg2/5PWuyNv4zdnXzGWfZ2y9jqhL+bbds8r90VFochF2mnheOV/n2vS7390/EnkFJKKaUOpUtdcGrhopRSSu3CQV8putJo4aKUUkrtwpX+UdGlpoWLUkoptQt6xeXS0sJFKaWU2gW94nJpaeGilFJK7YJecbm0tHBRSimlduFKv+Ki/w6tlFJKTZEr/YqL/ju0UkoppdQIWrgopZRSampMdcv/dF3IVwxrSw0enllgNukS6vGSVdNlzNc7XJzJyOcSuvMG1zK4zGKCw2ZFdkgusf19G9y6IVtLWGo2eSLtMp+2Sa1ngdj2PLWeuaTLXK3Dcr1OZybFty15y+CaBtslzt9LzN7Bxlb5IkVmkMTldQ2+C7a8dSwhdXSThFaasJbWaCQZTZ9Rtzmp8TgrvcwiawJOhNR46janZnMaSUbbx+yiTuLIE4uksR2/pBKHHkKZG+RNXB8P4gwhoWjhH3OOMEIMFRqSWUR8WspxRcaRCUAQxBbt/U2cZ5lFFLOSwPqYJSShyFIKxGyiQMwrssQcpKLVP8XjeN8gEtv/G2OxVhAxiBh8sBgbsBJzinwlv4hAbP9PjAmIWUUWZzxBLN7IpswiKC6BVjKLXLW9/UDb/zjOjN36fL8zi0pbtbOfxFjt58ds2z4YGzBs/Kj2/pciT2c/lzG4Hw9Dq/vD6qCzm8al33G5tNs/1YXLsb9ssfBYyupDNR6//hTnrl/gKdc8wfcsPMqTm09wbWORRxeO8O2rruaBq4/TPtegc87RfNTSvOBJV3PqT3hqSznNCwntxx2tkyntq47wV1c3OX9yjicfXeQpc09wTW2Jq9NlTiarnKot8+jsEf5u/jgPH1lgeWGG7EJKbdGSLTrqS4baqiVZD7iWx+QB18pxHUPSsiRrjnTdkq1auvNFbtFcSjafcGE9ZXWuwdJcg4vNJicba5ysr3IsWWfetWNekfGkzlO3GTO2y4ztciRpM590WKo1WK41WKo3WGvUaLdq5I2ErO3wLYtrG0LN4DoQ2obQyy2CkFhcJtiOwTqDzQVrDBIE40MsMIpCrMwsMqbMD4oFiwSL8XGc8QabF0VRL7OozC2SjQyjhJhTVIQsSgIhjYVOyIvCq8ws8gafhDhNEEKRU5SHQGIDmQ2kLuBsILUBJwEfQswvkkBiPEmRWeSt78svSkzMOkorw3Eyi+L4ONgqt2ivM4s2ZxANzy0aXHb1ddXgyJHZRMNyjIbkDvXmOeRNbNiJeTCwsZxmWPEyuL27ycWp7pv9yivaTUbR4Ml6q2VPmlO0k0Kgemx2EzA5uA2HOdBRHW76UdEol6CA1N9Zpca3VeK2UurKoYXLAbrCUtqV2pW9+LhLKTX99J1gFL0aopRSSh06WrgopaaCflSklAItXJRSU0I/KlJKgRYuo+n3T5Q6VPSKi1IKtHAZTb/jotSholdclFKghYtSakroFRelFGjhopSaEnrFRSkFWriMpt9xUepQ0SsuSimY8sJFUouRImuoZQhrKRdbDS50Z1jKm2TisAjNJKNWzwjNQN6AvAl5w+BrNrarL7J0bCa4LriOwXQcrU7KSlZnJWuwHmp0QorH4ExsHd9wGfU0x9UCoS6EmuDr4GsGnxpCYpDExMyf8jszEvN7jN/ILrIZ2MxgMyCzeG/JckfHJ3SDoxsSOiEhE0cQSyiyeCC2006NxxohtZ6a9SRFu/vEBqwLGBfACuIEccShpbhvKkODWBN/Kop1lnLdrSnCh7b+8o8ps4uKbUXiuBgXUG4//Y+HjO8NBSgyjMrpKfOUxBTpAzGrKFSGfTdMX2vxjXEbP/5eNh6X0/a9BoMXG3ONtvgCVPWqwGA7872+YrDfJ/Jx29aPk7c0jVkuO40V2E27f6XU9qY6q6h1ImX+oufoxYzZcylrDyasXneSL16/wFVPusjTjz/K9c1F/vHxJb57/jH+9uRJ/vbUCRbPzdJ+JKH5qGXm8UBtKSddykiXcxqLCZ3HHa3HHK2r5/nbq5s8ctURrjt2lO868jhn6hdZcC0Wmi1O15a5fnaRB+aP8eDCMS4cnaV7oUZ30VG/6KhdtNRXLMlaIGl5bBawHY/tepK2J1l31NYs2YqlO2fIVi3ZvCVbs1yc38gsWpyd4URjjZO1NY6m6ywk6zRMTmpyUutJjadhM+Zdm/mkwZG0zcW0yXK9wXK9wWqnxnq9RtZJyFsJvm1xbRuDJdsG1zHxficWbS41uK7gOkVeUddg8oAxRWaRmC0yiwzWC3iDcTYWSInFOiGkMXsoJPRyikIiMcsoiRlFpsgskjwGNYZEMN4QvEF8kVmUFJlFXop5BLw3OGdxldwiX80ssqHIBXIkNpCEQFLJKkqtJ4yRWRQwpAAm4MURkBi0KG4jgHHC3KIyl2enmUXV+ZYmzS3aNptoIMcozndjmVtlF/XWqRpQWcklKl+zXV5Rue67LQDLfVM9NrvNK5q0WNmLoMVJc4rGXpchGULbFZ67yY4ax7jbUtrv9dkNDdXcvam+4qKUUkqpK4sWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFLqsneYe7uoyWjhopRS6rI32FRPTS8tXJRSSik1Naa65f+jL4D1cw3mHww0nsg48kCH5hMJa48krDzpKv7s2qMcv2aJG48/znXNRZ618DDXzlzk744d5++uPs7io7O0HnE0z1uaT8TW/8laTtLKqS0lNC44Wo8ntK+a56+unuGhqxa47thFnjr/BGfqF5lzbZ7iOhxL1jndXOGh+aM8vHCEi8dm6S7WqC06uhcdtSVLbcWSrgaStsd2AyYLpLngOhbXsqTrlu6aJVs1dFdt0f7fsVi0/r8w12Rxpmj9X1/lSNLua/0/YzukxjNju0Xr/zYreWP81v/1Suv/NiQdCKnDdQM2MdhubP9vcoPxA63/Ibb+90AQjK20/neGEEAsGF9p/e9M0fZ/c+t/44nt/wOYvIgCSGOb/+BBvIlt/0PZ+t8QnCU4wRf5TL7S+j+3QuI8zgip83gJ5CaQiN3U+j8OXREBYGK7/2JozUarbis2tquvtvsvW7CX7dOrf+BJfG671v/b5f5UW+H3jR/R+r+6TBje/r/a9j9Os7nF/6g4gHL6wXWsGra+ZQv5wRb/1fFbtf0fta2TqM5zVNv/wfb3e5lDtFWEgDOCH1hukP7jtJWtWuQPzns/7cX+2mpbJsnAGhVlMG3t9yeNP7gcTfUVF6l7QhrDAgFMFnDdQNKOwYuma+nmCXmIm5kaz6zrMJd2aNQypBYItZh303v/EMHkgs0CLhNct8jqyQx5EXzY8clGvgxCw2Y0bZeGy6glHpeEXqZOSIowwyK8UMqQQiGe5H15i/k9Jo/DeDPgDSFYcu/oBkcullxcESA4mE8T4g3BEYMgE+tJTMzqcVZwLmAsYCWujy0CFyu3ON4gpljfXshisaBRv+ci9H6nKgGJplLc9Acrbjwux5lyHmXoYuX5eDMDjzfGiVRXxfRu5eNyWA1j3E55AqueJL3Y3ptd9RiMOglVbbfMUTk/49pJ8OI4r9nqBLR9Zs7+hA5ebmnR0xhEOQkNn1R7ZaoLF6WUUkpdWbRwUUoppdTU0MJFKaWUUlNjV4XL7bffjjGGW265pTdORLj11ls5c+YMzWaTF73oRXzzm9/se12n0+Gtb30rJ0+eZHZ2lle+8pU89NBDu1kVpZRSSl0Bdly4fOUrX+G3fuu3eNazntU3/n3vex/vf//7+dCHPsRXvvIVTp8+zUtf+lJWVlZ609xyyy18+tOf5s477+QLX/gCq6urvOIVr8B7v/MtUUoppdRlb0eFy+rqKq997Wv57d/+bY4dO9YbLyJ88IMf5D3veQ+vetWrOHv2LB//+MdZX1/nk5/8JABLS0t85CMf4Td+4zd4yUtewrOf/Ww+8YlPcN999/G5z31ub7ZKKaWUUpelHRUub37zm3n5y1/OS17ykr7x999/P+fOnePmm2/ujavX67zwhS/knnvuAeDee+8ly7K+ac6cOcPZs2d70wzqdDosLy/33ZRSSil15Zm4Ad2dd97JV7/6Vb7yla9seu7cuXMAnDp1qm/8qVOn+M53vtObplar9V2pKacpXz/o9ttv55d/+ZcnXVWllFJKXWYmuuLy4IMP8ra3vY1PfOITNBqNkdMZ099kS0Q2jRu01TTvfve7WVpa6t0efPDBSVZbKaWUUpeJia643HvvvZw/f56bbrqpN857z+c//3k+9KEP8e1vfxuIV1Wuueaa3jTnz5/vXYU5ffo03W6XxcXFvqsu58+f5/nPf/7Q5dbrder1+qbxT7n+MR5cqJHP1JmbqzHzWCBZ9zQuejBgc8d65wh/0U5ZvGqGp8xd4Gi6zunGMhyH+63weDJPqNUIdYevG+oXHelajssCtWUfO7h6i/GOdj7DA5mjnSes+RrXNRc5lqyT2pxj6ToQ20o7IzzuAh3XICQJITH41BBSqKUGWTe4jsfksXW+6xRdY4NgxMWOscFAMBAceYDVELuu+mDp+oSs4cjEsZC0mLHdoiV96HXyLdcFYotoa2L7e2cD1gptUyO34J1FrC265hbdcY1BjCCm6ChsbPEYrDHYrOiAGYo2//0ta+M4F7vcG4A8NuoVgUCsliUQO94iRcfcoqusQGyDazZmKwAGKUbErrfFZOXkYkEEL0XXXBcq0xoSF+cfGxYbgjXUgC6xvXwQD46NFSyGVmKH4RRPRtmiOfTa+ve6nRb7Pojd1PrfmthCvuz0GsT0tf4f7AC7Xev/UW3/e89XWtYPKpe7Xev/US3+yzgAP9A+eVT7/+o6V7ev97pKy/Vh2zWq7X91vbcySSzAuG3/L5Vhbf/Hfd2ltJepy9PYgl9dehMVLi9+8Yu57777+sb9y3/5L3n605/Oz//8z/PUpz6V06dPc9ddd/HsZz8bgG63y913382v//qvA3DTTTeRpil33XUXr371qwF45JFH+MY3vsH73ve+iVb+OccfoDmf8FfhNK1unaRtSdY96WqOzQTjE8RY1l2TR1LPbNplPm1zIl2j6TISEzBGeEwWMD7F5BabgevamFmUZbG4kASMBWNp2TpPuDnqztN0GXWbc9xmLLgWqen/r6jHxNAVUxQjButNUQSB8YL1vsjjCfEE2fv9d9Brt2/AOHIL66YojGzM17EmxIIFwbqABVKT91r/b3WEyxb4OQlBwIjFCPGNsmijH9vvG3ygaMkfi4n42vLszujixQiC7Xsb2nhVebnPEHpTCVBGCwjBGMpdYHOKx3E5powiMDHXCF/uPkswgY2LidX7RUXlPARLF6hVVswG6StaCMTMIjEbLf4ptr0sXipFTLV4icWKGXg8fvGynd0UL7D7k/Gw4iXOd+sCBrZfdxhd0Ey63sP2a3W/XKqiZFRe1FZ5Rftpqyyk+PhgijWlxjFR4TI/P8/Zs2f7xs3OznLixIne+FtuuYXbbruNG2+8kRtvvJHbbruNmZkZXvOa1wCwsLDAG97wBt7xjndw4sQJjh8/zjvf+U6e+cxnbvqyr1JKqb13qUIWldoPe54O/a53vYtWq8Wb3vQmFhcXee5zn8tnP/tZ5ufne9N84AMfIEkSXv3qV9NqtXjxi1/Mxz72MZxze706SimlBlzKhGil9tquC5c//dM/7XtsjOHWW2/l1ltvHfmaRqPBHXfcwR133LHbxSullJqQFi1qmmlWkVJKKaWmhhYuSil1hbnU/3mk1F7SwkUppa4w+lGRmmZauCillFJqamjhopRSSqmpoYWLUkpdYfQ7LmqaTXXhkonjSK1Nc75N51igfczQXUjIGw4x4LpCuiYkK4bWcoPza3Ocb8+znDcIYmi6jPl6h3SmSzYnZHPQnTPkMxZfd4g1mCC4TiBpCUkLknVDtpayuN7kic4sF7MZ1kONTGIPmrrNmXVd5modGvUM08zxDcE3IG8a8obB1w2+ZpHEgo2fNfda/ueCzQTXLW9gu2A6ltB1dLoJa90a63mNtbzOeqjRkYRMHEFsX1fQ1OakxlO3OTWbU3Oe1Po4dB6XeEwSIBFCKoSE3k0q90MC4iAkhuBMjAawFNEAJnbuH5EzZaTo9S9S3I9dg2OsgRQ3+m6U9/3GML4uRiH0T2sq8QhFZ99gYtv/YAjlOCAEixSxCUHiNLHd/+ab7xvG/RrE9IbDxOlM7z4w5LGtTD/6ewbbdZYdx3bdeAdbtVenr7aXdxstnUeO3zzvrZddjQCwW8xnP0zapRh219Z+L1viK6X2oQHdpbTua9x07AG+e/YxvnrsOv7qxCm6/6/B/AMpM4/mNJ7ISFct9YsJaxdrPLZ0kotPavLdVzf47vnHuL55gVP1Za6ZOc7/WzjBowtHyY7UyGcSZs5bGk9Asp5T63iSdUe6lpKuWForNVbWEv5yvc7FE03WFmo8qXGR48kaV9eWmXMdjqbrHK23+PvmAuebc7SbDfJmgm8YfN1SrxkkMSTrHtvx2DzgfMBmFtcNuI7DdR2uA7ZjcB1D1k7pdC0XsoROltCaTWn7hFY95WiasuBazLgOqfGbbnWb03QZs67LUtKgnuSspHXWUk87qZGnjjx1hNQiiUGciYVKUbSIM7iO4Gxsiy+ZwdqAySW23h/MLZKYQVSEA4GzIGBDQJxBQkASE4sUDyHtL158iEUjUhQqEgsoisKEIDF6IBS5R0mx6GARJ4REYrElsXgJwRJcIIjB2ViAlcWJswGR2Nq/Jp5gfWw3b/Ne+/8QDInxEGL0AKFoi47ptaev5hbFfTAqCmBz6/+yoKmeVHebWVSd31bZRdUCapLMonJ8nP/o9v9xOzcvv6+Vf7EPh43bNO0etKMflUU0Kq9oUlsVK1ut/2BWz6SN4g7TlZRxspX2I1pgPwvFw7R/JzVuDtRBxVBM4nCv3SUk1V8e/cL94TO97xdK7Zj+949Sm2nhopRSSqmpoYVLwVQvAepf94eP/uGprkDT/NGEUvtFCxc1HfT9WymlFFq49Oh3XA45PSbqCqTfcVFqMy1cCvpRkVLqsNGPipTaTAsXpZRSSk0NLVzUdNA/PJVSSqGFi5oW+lG/ugLpd1yU2myqCxcvZnPnUEPRgn5g4rKLa6HarXFTh8fy9dZs0cq+fz1iO/j+aYe1Mh+6bqOUvepHjJYhb2qTdDwsv9djBj5Hn2gdN890hy8cMbteJ97y8RYTj3FV5rBcuNlpR1Z1ZdHvuCi12VS3/P+T/32Wvzy7xnOuepAXnPx/PO3Ieb5++loevPYka3+XMv9gQvPxnJlHM+pLluZjCWuPzvOta2e4/8xxbjz5ON819xjfNfM41zSWefDIMf765FU8ftURWo+kzDxSY/a8o76Y41o5zUc9teWExsWE9QuW1oUZHjpV59GT8zx84iJPmbvANY0lFpJ1nlS/yLF0nZP1Vf5+ZoGH5xd4YmGW9YsNsiOObNFSWzLUlyzpqiNZ97iOx/iAawVsN+DajqTlSNct3TVDtmboridk85aV+YTWfJ3luTqLMzOcaKxxsr7G0WSdOdehYbNeu/9516ZhM2ZslznXYTbpMJ92WEobrNQarNTrrNbrdOopeS1B6o5QM7i6wdVMvN8Gl0LSgZA6XDdguwbbDdjcYPKAsTF7iKJFfy+jiPi8GMBZTIh5RxIMxsboA+PLIb1bSAwmLbKNUtMfDeAN4sGkQijuizNIIvHmBXEGk8RhCIL3Budi6/88BBIb8C7gbMBbS+pi2/9ELDXJCWLITSARS2oC3hqCsSS2mM6GXsv+vtb/Rav+QFEUi+27T3G/bP1fzgPoiwOA8dr+l9NtpW+eA0VTuexhre7HaftfPrcx//Hb/49q+19u01Zt/4eZpH38pG3/t2zVv6sso923WD+oAucgc5j2K+Nq0piFrYwbabDbZV7qvK+DNtV/9pkQA/MgHri6zaknOSQh5uuUUTESg/ysl+KkWGTXVN4QHYHEehLnwcWcG2xx9QE2rtjI5kBAEWJw35ArLqnxJDbgjMSLEUYQE9dNRl0dChuhhKbI/DFhY9nlC8urLuUvhi9DAAfXw4S+E4c1gkV6Q2Nk46qLATGysU7VdTQgg1dUrCme2+KXTmTz48FxMPxyiPQPjWweb6ByoPrnExfVv27DFr3xXH8o4l4bdlKPy9vfwMX9MCqLZqvwxa1epw4XDYdUh9XhfEdUSh16hz2ITe3OXocfKrVX9J1HKbUjeuVEKXUQtHBRSiml1NTQwkUppdQm+h0XdVhp4aKUUmoT/Y6LOqy0cFFKKaXU1NDCRSml1Cb6UZE6rLRwUUoptYl+VKQOKy1clFJKbaJXXNRhNdUt/4/+pWEpnOCzT23wjGse5SlzT/APjpzHPTnwN/WruDjTJJtNmTlvSVcD9YseBGxmWW/P8c12wsWrm9y48Bgn66tc01gmPRaoO89DtWOs1OrkDYevGxoXLOlajut4ahfBBIfN463dneGBbsJ6lrJypM61Mxc5lqzTsBnH0nUAEhNIned8Msd60kTShJBYQmoIqaGWGNI1g2v52D4/CLbjSYTYQj/YePOxPX7mHVmosZTbeN872s2UbkjIU0uWOGZsl7rNcAgOYcZ1YpfgolOwNRK7+hadfa0R2i6Q2RRvQayNHYSNiV2ErUGsFEOLs4J0YydgawRrgVxie1sfMGWX2KJVrRHAh9htFyAPGGdit9pgkKKhmZH+G5heB+EgJnbGTYvZiiGIYKXsIgwEAyJI0aFXRGIzYmdAAsHGZYrbGJZCuW4O8AnB+vjYxuNQDoMxvWGKj0+ULf1NIIjbaMNd9jsRW7T6N5W2zuXiNlr/D7b9h/Fa/4/bYbec97DW/5O0/R98rn8ZZYTBqBb5lXkOafvfN+2Itv/D57v1yXYnVxEG2/7vhS3jAwb2wXbt4Hfa7n9wvnsRPVCd16jx2rhQ7dZUFy4zj3pkwbC80OSxo7NcP3uB0/UlrAmsZzUeXEvprKTUVizpWsC1A3ViBo5vGNZna1yYnWFppsHJ+ioLrkXa8HRCLEIeazmy9ZTuqiFpWZKWwYSAa+dgwKcGX7f4uqXdTFhuNFisz7CQtplzHWZMhxnjIYVMHG2f0PWOLHNkmSXvGlzXkHfAFfdtFjN58DH3xxqDs/HkmiRCSOgVOyG1+MTRTRPWk5R6UqPhMmo2J7EhZhWJxxnfy8lJjaducryzMR5ADHliyYIjDxYfLN5bvDdIbgi5wdckFkwJmMRgvBDKTKEEJJiYDSSxwDEiYA0SpNKivyheiOMxBhMknt6MKeITYiQDxmDsRqyC9bEWMbZYZjGU3mMTYxJMjHPACBLYCMssowoMBGOxBIIxGGPwxmCCwRgbiy8xBAx5sFgXTy65WKwIwQhBLMEIiMUbAQl4Y4kL3CheBjOEyjfyUCletmqNP6x42c4kxQv0n5C3e34nxcuwaScx6faMY7BgGCev6FLY7xP6sIJw1/Pcpysy+7GuvXlfwuOqV6z2j5a+SimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aIub9r9UymlLitauKjLm/ZSUEqpy4oWLkoppZSaGlPdOdd4obYk1M87Hpk7yl+mGfkRR2I8xxvrPH6sRWfVkbQsNnfUl4AAaStQW7bki5a1mSYP1I/RcDk0ITWeo2mL4811VhYadNYdruVwHYvrJqQCNgsYLyQtIV2DfAV809Gp13m8NkszyUiKVvEztotFaLqMhbTNaq3OerPGUjch71hs0S3X5mBzGzvBBgEpOtAGwfqAzWJnXdcxuDaEGoR27J6bpQmtpEYt8dRdTs16UhM751oELKTFPrMmkNqcVBx1m9N1OY2Q002yGB2Qxs654mPX3OAN1htCXnSy9UUnW2+KdQXjQBKLSIit8ouPZwzE7rmYXufc3rETQTBFDEDxnI/pAhTLMQaMMRgjGFsszxZdfG2lo66n6I5bvL7ovluO6++gK0W7/tgt15hycsEaGyczgrUSu+daIYgU3XMDFkMWHKn1BDE4U7SRH9I9FxN6bf9D8dghve655fjYSdf2tf0fZru2/3thp51Fd9I9d7/a/m+/rpe2K+5+mqTd/352pFXqUprqwsVmgYW/a1NbrbG03uBvuqcJNxieeezv+b6jD3GivsZfNM7weP0oIY2b2rjgqV/IcW2Haztsp8Zj2TFyb+EkPGXmCa5rXOBI0uJIrc2306tZrs0jSYI4x4yF+sUM186xWcBmCa6bYDNLK0tZzue438f8oDBnOVVfZiFZ56RdoWEzmi6j4XL+PvFcSGbpJnXEWYKL+T9iDWIgJYduwOYB8YHEC8YLxjust9gis8h4SxZS2sEQgokt+4MlF0vAEJLijcrGoiw1PmYXOek9TowntXHobMAaWDNCx0CwkBuLmI11wxSZRUVxUEbviLFxn2CLYiRsLl6KzCGE2JofGws0MRBfGSOB+t6PTXxBebIRAcxGLVTkFMWhbOQZSfncxlCkWF4CG+FDwyWb3uQTggmk1ve91CI4CfHxkOKF6km2UrwMFjKDxctOM4vK6cYxrO3/TjKLyueBLbOLgJFxAcMKklEFTXUf7DYWYJy2/9vFI+zEbvOKxi1a7BbREofB5VRITpvtcrAOK/2oSCmllFJTQwsXpZRSSk0NLVyUUkopNTW0cFFKKaXU1NDCRSmllFJTQwsXpZRSSk0NLVyUUlNrt/8KrZSaPvpbr5SaWvvdkE8pdfhMdQO65afUWHgiJVkPzD1sCEnK38op2tcnPO3oY8y6LjccfYJu7liRI2AcITE0Fj02ExoXA+IsmIRFFrgvWLonHU+eucCc63Bdc5H8uOWvxbAqc0ACxiHWUFvKsV1P0vbIsiE4EBvntS6zPCiGgCEXS1Z3zLs2dZNzNF0na8bmcCKGRSCjTqwhy0ZADoDExO67xsdOva4TwJYdYe1GMzgDOQldYBkIElu/BTF4MXji8mZsl5Siey70ytbgRjcg6gBBEjyAWIwY8rhWvUZw/WILOWs27o/qoBu75sZjYIJAHpu39ZrQ9c23aEJnyuXFBl3lNOX0cZyw0f4uvkygaGAXiucsOFM0jBvNInQHNq/aQRcLCT52zg0QjCHFs6kJXbH/4qaMbkI3aCdN6GCy7rLbNVfbqgldXMfxG9GVrxvWSbdvHhN0yp206V5cx0vf9Gy7/bxV9+FN89qj5nPbNSAbtZ9GdXeOz11ZxeRhb/B3OZrqwmX9lGGmbZl5tIs7FxCTEtKUc80FjjVanDxyjhtmn8Aa4T5vaXXmcS1L0jLUO4Haco4Rh1hHqDmWa7M83FjgWG2dY8k6p+tLWCPk4vjr3NHuzmK7Nrb/71hs12O6gTTk8TzuDJIYQuJopQ3OFy34my5jxnapuw7zJkAtvqnnweKDYckbsryOzU1xi+3/rRdMHmKb/TwgxUlfDKTWxOW5WHiIA3GOzKWsWyF1AWdCX/v/1HisCVj6i5fyBBHE4lNDLq7owBu78Xa9QYIheCka4hqCBxMMJgg+bHSqNSF2pw2E2P12VPv/YlsMZqN4Kdv/99r4G2wulZb9JnbbNQZbdOvFlNEAbEQGFEVO7y5FsYeAL96Eq+3/fezaa4wQgsWbou1/sORGINAbWiPkRcGSEoo3fbdRvEjAD2n/v+nkOqR4AYa2/j+I4mWSE/s4Lf23es123XP75rlF2/rddNMdpzPuTrrnDh63vejAO0mb/0Hbtf2fpHhS6qDoT2ipcoJVSiml1OGkhYtSSimlpsYVU7gYvZSilFJKTb0rpnAR/f6U2qUr7UuHSil1GF0xhcu2V1x28YU3pZRSSl0aV0zhsu0Vl0v8r5FKKaWUmtwVU7hsq7jiotddlFJKqcPriilctv2oSP8dWm1D+1sopdTBm8oGdGUjttBpk2eBPO8ixuC7Ht+xhPU22VqXjs2wRshaXfK1DqGd4juWPPO43GO8kGcO33X4DoRWRr7WoVvr0s4zvPV0uhnZWhe/3ia0HL5j8V0hz3JMnmG8IFjyLMF3E3zHENpCaGX49Q5Z2qUrXTq1DOtyghg63tHtdMlbHfx6jbBukZbg2xbfoZh/wOQeyXNsHlt1iRiCceS5K9bb4rsmrnsqhCRAkuNNF0+H3HfIfJduktFJM1KXITYnLzryilgyEdrB0PGGbp6QeUPWTcg7cb6+HQhtj7QSaFtM20DHYDrEtrpdgS6QCVLcbCYYH7B50UAvSOyMG4qmcNXP7YyJvevEgjWINUjR9VfEImGjC3AQgwTw1aGH4CEEYjM+XzSmSyR2M06kaM4niBMobiYRjA0YJ2BDMS4U4wLG+uK+j03kXMDYnMwGxATEeoRAsB5nhGAC1giWgLcBR3zsTMAiReO/uN2umLZsOmeLBnSm+PJv2bSs2p10sJFZOZ/tTNKIbbAxWrUBXfU5GTLPUZ1wtyr2qq/pW1bxmtD3/MZ8tmqgNjiPrQw22Cu3sTp+cJow4d96dshxq85j0/z7trNf2eF2kgZ0w9Z21HyHrcOwdYTddc4d9jOxeT9vf4xHdazdat3GXdao5fshy9xuPYbtv0mWuZt12Olyy2M0vJv35MuvLqObZcDGeXwnjOzm1QfkoYce4rrrrjvo1VBKKaXUDjz44INce+21O3rtVBYuIQS+/e1v84xnPIMHH3yQI0eOHPQqXXGWl5e57rrrdP8fID0GB0+PwcHTY3DwJjkGIsLKygpnzpzB2p19/D6VHxVZa3nSk54EwJEjR/SH9QDp/j94egwOnh6Dg6fH4OCNewwWFhZ2tRz9tqFSSimlpoYWLkoppZSaGlNbuNTrdX7pl36Jer1+0KtyRdL9f/D0GBw8PQYHT4/BwbvUx2Aqv5yrlFJKqSvT1F5xUUoppdSVRwsXpZRSSk0NLVyUUkopNTW0cFFKKaXU1JjKwuU3f/M3ueGGG2g0Gtx000382Z/92UGv0mXh9ttv5wd+4AeYn5/n6quv5sd//Mf59re/3TeNiHDrrbdy5swZms0mL3rRi/jmN7/ZN02n0+Gtb30rJ0+eZHZ2lle+8pU89NBDl3JTLhu33347xhhuueWW3jg9Bvvv4Ycf5qd+6qc4ceIEMzMz/MN/+A+59957e8/rMdhfeZ7z7/7dv+OGG26g2Wzy1Kc+lV/5lV8hhI3sHD0Ge+vzn/88P/ZjP8aZM2cwxvD7v//7fc/v1f5eXFzkda97HQsLCywsLPC6172OixcvTrayMmXuvPNOSdNUfvu3f1u+9a1vydve9jaZnZ2V73znOwe9alPvh3/4h+WjH/2ofOMb35Cvf/3r8vKXv1yuv/56WV1d7U3za7/2azI/Py+/93u/J/fdd5/8xE/8hFxzzTWyvLzcm+aNb3yjPOlJT5K77rpLvvrVr8oP/dAPyfd93/dJnucHsVlT68tf/rI85SlPkWc961nytre9rTdej8H+unDhgjz5yU+Wn/mZn5E///M/l/vvv18+97nPyd/8zd/0ptFjsL9+9Vd/VU6cOCH/43/8D7n//vvlv/23/yZzc3PywQ9+sDeNHoO99Ud/9Efynve8R37v935PAPn0pz/d9/xe7e8f+ZEfkbNnz8o999wj99xzj5w9e1Ze8YpXTLSuU1e4/KN/9I/kjW98Y9+4pz/96fILv/ALB7RGl6/z588LIHfffbeIiIQQ5PTp0/Jrv/ZrvWna7bYsLCzIf/7P/1lERC5evChpmsqdd97Zm+bhhx8Wa6185jOfubQbMMVWVlbkxhtvlLvuukte+MIX9goXPQb77+d//uflBS94wcjn9Rjsv5e//OXyr/7Vv+ob96pXvUp+6qd+SkT0GOy3wcJlr/b3t771LQHkS1/6Um+aL37xiwLI//2//3fs9Zuqj4q63S733nsvN998c9/4m2++mXvuueeA1urytbS0BMDx48cBuP/++zl37lzf/q/X67zwhS/s7f97772XLMv6pjlz5gxnz57VYzSBN7/5zbz85S/nJS95Sd94PQb77w/+4A94znOewz//5/+cq6++mmc/+9n89m//du95PQb77wUveAH/63/9L/7qr/4KgP/zf/4PX/jCF/jRH/1RQI/BpbZX+/uLX/wiCwsLPPe5z+1N84//8T9mYWFhomMyVSGLjz/+ON57Tp061Tf+1KlTnDt37oDW6vIkIrz97W/nBS94AWfPngXo7eNh+/873/lOb5parcaxY8c2TaPHaDx33nknX/3qV/nKV76y6Tk9Bvvvb//2b/nwhz/M29/+dn7xF3+RL3/5y/zrf/2vqdfr/PRP/7Qeg0vg53/+51laWuLpT386zjm897z3ve/lJ3/yJwH9PbjU9mp/nzt3jquvvnrT/K+++uqJjslUFS4lY0zfYxHZNE7tzlve8hb+4i/+gi984QubntvJ/tdjNJ4HH3yQt73tbXz2s5+l0WiMnE6Pwf4JIfCc5zyH2267DYBnP/vZfPOb3+TDH/4wP/3TP92bTo/B/vnd3/1dPvGJT/DJT36S7/3e7+XrX/86t9xyC2fOnOH1r399bzo9BpfWXuzvYdNPekym6qOikydP4pzbVJmdP39+UyWodu6tb30rf/AHf8Cf/MmfcO211/bGnz59GmDL/X/69Gm63S6Li4sjp1Gj3XvvvZw/f56bbrqJJElIkoS7776b//gf/yNJkvT2oR6D/XPNNdfwjGc8o2/c93zP9/DAAw8A+ntwKfzbf/tv+YVf+AX+xb/4Fzzzmc/kda97Hf/m3/wbbr/9dkCPwaW2V/v79OnTPProo5vm/9hjj010TKaqcKnVatx0003cddddfePvuusunv/85x/QWl0+RIS3vOUtfOpTn+KP//iPueGGG/qev+GGGzh9+nTf/u92u9x99929/X/TTTeRpmnfNI888gjf+MY39BiN4cUvfjH33XcfX//613u35zznObz2ta/l61//Ok996lP1GOyzf/JP/smmNgB/9Vd/xZOf/GRAfw8uhfX1daztPz0553r/Dq3H4NLaq/39vOc9j6WlJb785S/3pvnzP/9zlpaWJjsm43/P+HAo/x36Ix/5iHzrW9+SW265RWZnZ+Xv/u7vDnrVpt7P/dzPycLCgvzpn/6pPPLII73b+vp6b5pf+7Vfk4WFBfnUpz4l9913n/zkT/7k0H+Ju/baa+Vzn/ucfPWrX5V/9s/+mf4L4i5U/6tIRI/Bfvvyl78sSZLIe9/7Xvnrv/5r+S//5b/IzMyMfOITn+hNo8dgf73+9a+XJz3pSb1/h/7Upz4lJ0+elHe96129afQY7K2VlRX52te+Jl/72tcEkPe///3yta99rddqZK/294/8yI/Is571LPniF78oX/ziF+WZz3zm5f/v0CIi/+k//Sd58pOfLLVaTb7/+7+/9++6aneAobePfvSjvWlCCPJLv/RLcvr0aanX6/KDP/iDct999/XNp9VqyVve8hY5fvy4NJtNecUrXiEPPPDAJd6ay8dg4aLHYP/94R/+oZw9e1bq9bo8/elPl9/6rd/qe16Pwf5aXl6Wt73tbXL99ddLo9GQpz71qfKe97xHOp1Obxo9BnvrT/7kT4a+/7/+9a8Xkb3b30888YS89rWvlfn5eZmfn5fXvva1sri4ONG6GhGRHVw5UkoppZS65KbqOy5KKaWUurJp4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoa/38EkGeSCdJLSQAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# call fixed temporal embedding with the vector of 'times'\n", - "plt.imshow(emb(ref_times).numpy(), aspect='auto')" - ] - }, - { - "cell_type": "code", - "execution_count": 136, - "id": "8b17fdb7", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "feat_dim = 1024\n", - "xfmr_encoder = TransformerEncoderLayer(d_model=feat_dim, nhead=8)\n", - "visual_encoder = VisualEncoder(d_model=feat_dim, model_name=\"resnet18\")" - ] - }, - { - "cell_type": "code", - "execution_count": 137, - "id": "7999fcef-953b-42cf-927c-f3b617f68157", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "def extract_features(\n", - " instances: list[\"Instance\"], \n", - " visual_encoder: \"dreem.models.VisualEncoder\",\n", - " force_recompute: bool = False\n", - " ) -> None:\n", - " \"\"\"Extract features from instances using visual encoder backbone.\n", - "\n", - " Args:\n", - " instances: A list of instances to compute features for\n", - " VisualEncoder : pass an instance of a visual encoder\n", - " force_recompute: indicate whether to compute features for all instances regardless of if they have instances\n", - " \"\"\"\n", - " if not force_recompute:\n", - " instances_to_compute = [\n", - " instance\n", - " for instance in instances\n", - " if instance.has_crop() and not instance.has_features()\n", - " ]\n", - " else:\n", - " instances_to_compute = instances\n", - "\n", - " if len(instances_to_compute) == 0:\n", - " return\n", - " elif len(instances_to_compute) == 1: # handle batch norm error when B=1\n", - " instances_to_compute = instances\n", - "\n", - " crops = torch.concatenate([instance.crop for instance in instances_to_compute])\n", - "\n", - " features = visual_encoder(crops)\n", - "\n", - " for i, z_i in enumerate(features):\n", - " instances_to_compute[i].features = z_i" - ] - }, - { - "cell_type": "code", - "execution_count": 138, - "id": "e299e8a0-61eb-4eee-901c-49aa7e678b3b", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# partial forward pass of the transformer - up until the encoder\n", - "\n", - "def prepare_for_xfmr(ref_instances):\n", - " # extract visual encoder features from instance object; shape=(1,n_instances,d=1024)\n", - " ref_features = torch.cat(\n", - " [instance.features for instance in ref_instances], dim=0\n", - " ).unsqueeze(0)\n", - "\n", - " # window_length = len(frames)\n", - " # instances_per_frame = [frame.num_detected for frame in frames]\n", - " total_instances = len(ref_instances)\n", - " embed_dim = ref_features.shape[-1]\n", - " # print(f'T: {window_length}; N: {total_instances}; N_t: {instances_per_frame} n_reid: {reid_features.shape}')\n", - " ref_boxes = get_boxes(ref_instances) # (n_instances,1,4)\n", - " ref_boxes = torch.nan_to_num(ref_boxes, -1.0)\n", - " ref_times, query_times = get_times(ref_instances, query_instances=None)\n", - "\n", - " # clip length \n", - " window_length = len(ref_times.unique())\n", - "\n", - " # computes the temporal embedding vector for each instance\n", - " ref_temp_emb = emb_t(ref_times)\n", - " # computes the positional embedding vector for each instance\n", - " ref_pos_emb = emb_p(ref_boxes)\n", - "\n", - " return_embedding=False\n", - " if return_embedding:\n", - " for i, instance in enumerate(ref_instances):\n", - " instance.add_embedding(\"pos\", ref_pos_emb[i])\n", - " instance.add_embedding(\"temp\", ref_temp_emb[i])\n", - "\n", - " # we need a single vector so average the temporal and spatial embeddings\n", - " ref_emb = (ref_pos_emb + ref_temp_emb) / 2.0\n", - "\n", - " # add a new dim at the beginning to represent the batch size (in our case 1)\n", - " ref_emb = ref_emb.view(1, total_instances, embed_dim)\n", - "\n", - " ref_emb = ref_emb.permute(1, 0, 2) # (total_instances, batch_size, embed_dim)\n", - "\n", - " batch_size, total_instances, embed_dim = ref_features.shape\n", - "\n", - " ref_features = ref_features.permute(\n", - " 1, 0, 2\n", - " ) # (total_instances, batch_size, embed_dim); note batch_size = 1\n", - "\n", - " return ref_features" - ] - }, - { - "cell_type": "code", - "execution_count": 139, - "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# pass instances through visual encoder to get the feature vector (q,k,v); modifies the feature attribute of each Instance in ref_instances\n", - "extract_features(ref_instances, visual_encoder)" - ] - }, - { - "cell_type": "markdown", - "id": "a972707a-51a7-45ff-987e-80ee0dea4752", - "metadata": {}, - "source": [ - "### Rotary Positional Embeddings" - ] - }, - { - "cell_type": "code", - "execution_count": 140, - "id": "f0823cf1-2a35-4920-a62e-896bd9dbb078", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# create transformer instance to test embeddings \n", - "tfmr = Transformer()" - ] - }, - { - "cell_type": "code", - "execution_count": 143, - "id": "5e0b9d31-34be-40f8-91dc-b91d59aee170", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "assoc = tfmr(ref_instances)" - ] - }, - { - "cell_type": "code", - "execution_count": 157, - "id": "9f29ca35-9ff2-4e9a-bba0-37a3a14ad522", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "gtr = GTRRunner()" - ] - }, - { - "cell_type": "code", - "execution_count": 160, - "id": "0aa3876a-6246-4d02-80a5-013d382f6d38", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "metrics = gtr._shared_eval_step(data[0],\"train\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aee0d129-83f2-4f76-b452-132391554b4c", - "metadata": {}, - "outputs": [], - "source": [ - "metrics" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "dreem", - "language": "python", - "name": "dreem" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/run_trainer.py b/scripts/run_trainer.py similarity index 86% rename from run_trainer.py rename to scripts/run_trainer.py index fcf38ff..5046222 100644 --- a/run_trainer.py +++ b/scripts/run_trainer.py @@ -4,7 +4,7 @@ # /Users/mustafashaikh/dreem/dreem/training # /Users/main/Documents/GitHub/dreem/dreem/training -os.chdir("/Users/mustafashaikh/dreem/dreem/training") +os.chdir("./dreem/training") base_config = "./configs/base.yaml" # params_config = "./configs/override.yaml" From 9eddead1612de140fd30b5412c3b5b2ac37b9c3e Mon Sep 17 00:00:00 2001 From: aaprasad Date: Fri, 16 Aug 2024 14:56:40 -0700 Subject: [PATCH 27/63] allow batch eval/inference flexibility rather than just different model checkpoints --- dreem/inference/eval.py | 31 +++++++++++++++++++---------- dreem/inference/track.py | 34 +++++++++++++++++++++----------- dreem/io/config.py | 40 ++++++++++++++++++++++++++++---------- dreem/models/embedding.py | 1 + dreem/models/gtr_runner.py | 13 ++++++++++--- 5 files changed, 85 insertions(+), 34 deletions(-) diff --git a/dreem/inference/eval.py b/dreem/inference/eval.py index eb39c7b..82bda2b 100644 --- a/dreem/inference/eval.py +++ b/dreem/inference/eval.py @@ -26,24 +26,35 @@ def run(cfg: DictConfig) -> dict[int, sio.Labels]: """ eval_cfg = Config(cfg) - if "checkpoints" in cfg.keys(): + # update with parameters for batch train job + if "batch_config" in cfg.keys(): try: index = int(os.environ["POD_INDEX"]) - # For testing without deploying a job on runai - except KeyError: - index = input("Pod Index Not found! Please choose a pod index: ") - - logger.info(f"Pod Index: {index}") - - checkpoints = pd.read_csv(cfg.checkpoints) - checkpoint = checkpoints.iloc[index] + except KeyError as e: + index = int( + input(f"{e}. Assuming single run!\nPlease input task index to run:") + ) + + hparams_df = pd.read_csv(cfg.batch_config) + hparams = hparams_df.iloc[index].to_dict() + _ = hparams.pop("Unnamed: 0", None) + + if eval_cfg.set_hparams(hparams): + logger.info("Updated the following hparams to the following values") + logger.info(hparams) else: - checkpoint = eval_cfg.cfg.ckpt_path + hparams = {} + + checkpoint = eval_cfg.cfg.ckpt_path + logger.info(f"Testing model saved at {checkpoint}") model = GTRRunner.load_from_checkpoint(checkpoint) + model.tracker_cfg = eval_cfg.cfg.tracker model.tracker = Tracker(**model.tracker_cfg) + logger.info(f"Using the following tracker:") + print(model.tracker) model.metrics["test"] = eval_cfg.cfg.runner.metrics.test model.persistent_tracking["test"] = eval_cfg.cfg.tracker.get( diff --git a/dreem/inference/track.py b/dreem/inference/track.py index 2c2eac4..d38e99b 100644 --- a/dreem/inference/track.py +++ b/dreem/inference/track.py @@ -96,25 +96,35 @@ def run(cfg: DictConfig) -> dict[int, sio.Labels]: """ pred_cfg = Config(cfg) - if "checkpoints" in cfg.keys(): + # update with parameters for batch train job + if "batch_config" in cfg.keys(): try: index = int(os.environ["POD_INDEX"]) - # For testing without deploying a job on runai - except KeyError: - index = input("Pod Index Not found! Please choose a pod index: ") - - logger.info(f"Pod Index: {index}") - - checkpoints = pd.read_csv(cfg.checkpoints) - checkpoint = checkpoints.iloc[index] + except KeyError as e: + index = int( + input(f"{e}. Assuming single run!\nPlease input task index to run:") + ) + + hparams_df = pd.read_csv(cfg.batch_config) + hparams = hparams_df.iloc[index].to_dict() + _ = hparams.pop("Unnamed: 0", None) + + if pred_cfg.set_hparams(hparams): + logger.info("Updated the following hparams to the following values") + logger.info(hparams) else: - checkpoint = pred_cfg.cfg.ckpt_path + hparams = {} + + checkpoint = pred_cfg.cfg.ckpt_path + logger.info(f"Running inference with model from {checkpoint}") model = GTRRunner.load_from_checkpoint(checkpoint) + tracker_cfg = pred_cfg.get_tracker_cfg() - logger.info("Updating tracker hparams") + model.tracker_cfg = tracker_cfg model.tracker = Tracker(**model.tracker_cfg) + logger.info(f"Using the following tracker:") logger.info(model.tracker) @@ -124,12 +134,14 @@ def run(cfg: DictConfig) -> dict[int, sio.Labels]: os.makedirs(outdir, exist_ok=True) for label_file, vid_file in zip(labels_files, vid_files): + logger.info(f"Tracking {label_file} - {vid_file}...") dataset = pred_cfg.get_dataset( label_files=[label_file], vid_files=[vid_file], mode="test" ) dataloader = pred_cfg.get_dataloader(dataset, mode="test") preds = track(model, trainer, dataloader) outpath = os.path.join(outdir, f"{Path(label_file).stem}.dreem_inference.slp") + logger.info(f"Saving results to {outpath}...") preds.save(outpath) return preds diff --git a/dreem/io/config.py b/dreem/io/config.py index b018790..dcf0b4b 100644 --- a/dreem/io/config.py +++ b/dreem/io/config.py @@ -174,9 +174,9 @@ def get_data_paths(self, data_cfg: dict) -> tuple[list[str], list[str]]: labels_path = f"{dir_cfg.path}/*{labels_suff}" vid_path = f"{dir_cfg.path}/*{vid_suff}" logger.debug(f"Searching for labels matching {labels_path}") - label_files = glob.glob(labels_path) + label_files = sorted(glob.glob(labels_path)) logger.debug(f"Searching for videos matching {vid_path}") - vid_files = glob.glob(vid_path) + vid_files = sorted(glob.glob(vid_path)) logger.debug(f"Found {len(label_files)} labels and {len(vid_files)} videos") else: @@ -197,7 +197,7 @@ def get_dataset( mode: str, label_files: list[str] | None = None, vid_files: list[str | list[str]] = None, - ) -> "SleapDataset" | "MicroscopyDataset" | "CellTrackingDataset": + ) -> "SleapDataset" | "MicroscopyDataset" | "CellTrackingDataset" | None: """Getter for datasets. Args: @@ -230,33 +230,37 @@ def get_dataset( dataset_params.slp_files = label_files if vid_files is not None: dataset_params.video_files = vid_files - return SleapDataset(**dataset_params) + dataset = SleapDataset(**dataset_params) elif "tracks" in dataset_params or "source" in dataset_params: if label_files is not None: dataset_params.tracks = label_files if vid_files is not None: dataset_params.videos = vid_files - return MicroscopyDataset(**dataset_params) + dataset = MicroscopyDataset(**dataset_params) elif "raw_images" in dataset_params: if label_files is not None: dataset_params.gt_images = label_files if vid_files is not None: dataset_params.raw_images = vid_files - return CellTrackingDataset(**dataset_params) + dataset = CellTrackingDataset(**dataset_params) else: raise ValueError( "Could not resolve dataset type from Config! Please include \ either `slp_files` or `tracks`/`source`" ) + if len(dataset) == 0: + logger.warn(f"Length of {mode} dataset is {len(dataset)}! Returning None") + return None + return dataset def get_dataloader( self, - dataset: "SleapDataset" | "MicroscopyDataset" | "CellTrackingDataset", + dataset: "SleapDataset" | "MicroscopyDataset" | "CellTrackingDataset" | None, mode: str, - ) -> torch.utils.data.DataLoader: + ) -> torch.utils.data.DataLoader | None: """Getter for dataloader. Args: @@ -267,6 +271,15 @@ def get_dataloader( Returns: A torch dataloader for `dataset` with parameters configured as specified """ + if dataset is None: + logger.warn(f"{mode} dataset passed was `None`! Returning `None`") + return None + + elif len(dataset) == 0: + logger.warn(f"Length of {mode} dataset is {len(dataset)}! Returning `None`") + return None + + if mode.lower() == "train": dataloader_params = self.cfg.dataloader.train_dataloader elif mode.lower() == "val": @@ -284,7 +297,7 @@ def get_dataloader( else: pin_memory = False - return torch.utils.data.DataLoader( + dataloader = torch.utils.data.DataLoader( dataset=dataset, batch_size=1, pin_memory=pin_memory, @@ -292,6 +305,13 @@ def get_dataloader( **dataloader_params, ) + if len(dataloader) == 0: + logger.warn( + f"Length of {mode} dataloader is {len(dataloader)}! Returning `None`" + ) + return None + return dataloader + def get_optimizer(self, params: Iterable) -> torch.optim.Optimizer: """Getter for optimizer. @@ -396,7 +416,7 @@ def get_checkpointing(self) -> pl.callbacks.ModelCheckpoint: filename=f"{{epoch}}-{{{metric}}}", **checkpoint_params, ) - checkpointer.CHECKPOINT_NAME_LAST = f"{{epoch}}-best-{{{metric}}}" + checkpointer.CHECKPOINT_NAME_LAST = f"{{epoch}}-final-{{{metric}}}" checkpointers.append(checkpointer) return checkpointers diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 8a959c9..c112d48 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -323,6 +323,7 @@ def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: """ temp_lookup = self.lookup N = times.shape[0] + times = times / times.max() left_ind, right_ind, left_weight, right_weight = self._compute_weights(times) diff --git a/dreem/models/gtr_runner.py b/dreem/models/gtr_runner.py index 01c2039..b3ad530 100644 --- a/dreem/models/gtr_runner.py +++ b/dreem/models/gtr_runner.py @@ -301,7 +301,7 @@ def on_test_epoch_end(self): avg_result = results_df[key].mean() results_file.attrs.create(key, avg_result) for i, (metrics, frames) in enumerate(zip(metrics_dict, preds)): - vid_name = frames[0].vid_name.split("/")[-1].split(".")[0] + vid_name = frames[0].vid_name.split("/")[-1] vid_group = results_file.require_group(vid_name) clip_group = vid_group.require_group(f"clip_{i}") for key, val in metrics.items(): @@ -310,11 +310,18 @@ def on_test_epoch_end(self): if metrics.get("num_switches", 0) > 0: _ = frame.to_h5( clip_group, - frame.get_gt_track_ids().cpu().numpy(), + [ + instance.gt_track_id.item() + for instance in frame.instances + ], save={"crop": True, "features": True, "embeddings": True}, ) else: _ = frame.to_h5( - clip_group, frame.get_gt_track_ids().cpu().numpy() + clip_group, + [ + instance.gt_track_id.item() + for instance in frame.instances + ], ) self.test_results = {"metrics": [], "preds": [], "save_path": fname} From d5993a929f12b819e4e8c32d6be7d55a4da6bf7b Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Sun, 18 Aug 2024 17:29:57 -0700 Subject: [PATCH 28/63] linting --- dreem/inference/tracker.py | 1 - dreem/io/instance.py | 4 +- dreem/models/attention_head.py | 48 ++++---- dreem/models/embedding.py | 46 ++++---- dreem/models/mlp.py | 1 - dreem/models/transformer.py | 200 +++++++++++++++++++-------------- tests/test_models.py | 71 +++++++----- tests/test_training.py | 2 +- 8 files changed, 210 insertions(+), 163 deletions(-) diff --git a/dreem/inference/tracker.py b/dreem/inference/tracker.py index 8426e84..58480f4 100644 --- a/dreem/inference/tracker.py +++ b/dreem/inference/tracker.py @@ -463,7 +463,6 @@ def _run_global_tracker( # hungarian matching match_i, match_j = linear_sum_assignment((-traj_score)) - track_ids = instance_ids.new_full((n_query,), -1) for i, j in zip(match_i, match_j): # The overlap threshold is multiplied by the number of times the unique track j is matched to an diff --git a/dreem/io/instance.py b/dreem/io/instance.py index c3aa568..ba97182 100644 --- a/dreem/io/instance.py +++ b/dreem/io/instance.py @@ -565,7 +565,9 @@ def add_embedding(self, emb_type: str, embedding: torch.Tensor) -> None: emb_type: Key/embedding type to be saved to dictionary embedding: The actual torch tensor embedding. """ - if type(embedding) != dict: # for embedding agg method "average", input is array + if ( + type(embedding) != dict + ): # for embedding agg method "average", input is array # for method stack and concatenate, input is dict embedding = _expand_to_rank(embedding, 2) self._embeddings[emb_type] = embedding diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 8ea04b2..35e7b59 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -9,13 +9,7 @@ class ATTWeightHead(torch.nn.Module): """Single attention head.""" - def __init__( - self, - feature_dim: int, - num_layers: int, - dropout: float, - **kwargs - ): + def __init__(self, feature_dim: int, num_layers: int, dropout: float, **kwargs): """Initialize an instance of ATTWeightHead. Args: @@ -25,23 +19,27 @@ def __init__( embedding_agg_method: how the embeddings are aggregated; average/stack/concatenate """ super().__init__() - if 'embedding_agg_method' in kwargs: - self.embedding_agg_method = kwargs['embedding_agg_method'] + if "embedding_agg_method" in kwargs: + self.embedding_agg_method = kwargs["embedding_agg_method"] else: self.embedding_agg_method = None # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels # ensures output represents ref instances by query instances if self.embedding_agg_method == "stack": - self.q_proj = torch.nn.Conv1d(in_channels=3, out_channels=1, - kernel_size=1, stride=1, padding=0 - ) - self.k_proj = torch.nn.Conv1d(in_channels=3, out_channels=1, - kernel_size=1, stride=1, padding=0 - ) + self.q_proj = torch.nn.Conv1d( + in_channels=3, out_channels=1, kernel_size=1, stride=1, padding=0 + ) + self.k_proj = torch.nn.Conv1d( + in_channels=3, out_channels=1, kernel_size=1, stride=1, padding=0 + ) else: - self.q_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) - self.k_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) + self.q_proj = MLP( + feature_dim, feature_dim, feature_dim, num_layers, dropout + ) + self.k_proj = MLP( + feature_dim, feature_dim, feature_dim, num_layers, dropout + ) def forward( self, @@ -63,12 +61,16 @@ def forward( # if stacked embeddings, create channels for each x,y,t embedding dimension # maps shape (1,192,1024) -> (1,64,3,1024) if self.embedding_agg_method == "stack": - key = key.view( - batch_size, 3, num_window_instances//3, feature_dim - ).permute(0, 2, 1, 3).squeeze(0) - query = query.view( - batch_size, 3, num_query_instances//3, feature_dim - ).permute(0, 2, 1, 3).squeeze(0) + key = ( + key.view(batch_size, 3, num_window_instances // 3, feature_dim) + .permute(0, 2, 1, 3) + .squeeze(0) + ) + query = ( + query.view(batch_size, 3, num_query_instances // 3, feature_dim) + .permute(0, 2, 1, 3) + .squeeze(0) + ) # key, query of shape (batch_size, num_instances, 3, feature_dim) k = self.k_proj(key).transpose(1, 0) q = self.q_proj(query).transpose(1, 0) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 17c988f..6ec0ef9 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -101,7 +101,7 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: # 100 since it's a fraction of [0,1]*100. temp is from [0, clip_len]; since clip_len # not available, we use the last value in the indexing array since this will be the # last possible frame that we would need to index since no instances in a frame after that - self.build_rope_cache(max(101, input_pos[:, -1].max() + 1)) # registers cache + self.build_rope_cache(max(101, input_pos[:, -1].max() + 1)) # registers cache self.cache = self.cache.to(input_pos.device) # extract the values based on whether input_pos is set or not rope_cache = ( @@ -121,9 +121,8 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: return rope_cache - class Embedding(torch.nn.Module): - """Class that wraps around different embedding types. + """Class that wraps around different embedding types. Creates embedding array and transforms the input data Used for both learned and fixed embeddings. """ @@ -153,7 +152,7 @@ def __init__( normalize: bool = False, scale: float | None = None, mlp_cfg: dict | None = None, - embedding_agg_method: str = "average" + embedding_agg_method: str = "average", ): """Initialize embeddings. @@ -228,18 +227,17 @@ def __init__( if self.emb_type == "pos": if self.embedding_agg_method == "average": self._emb_func = self._sine_box_embedding - else: # if using stacked/concatenated agg method + else: # if using stacked/concatenated agg method self._emb_func = self._sine_pos_embedding elif self.emb_type == "temp": self._emb_func = self._sine_temp_embedding - + elif self.mode == "rope": # pos/temp embeddings processed the same way with different embedding array inputs self._emb_func = self._rope_embedding # create instance so embedding lookup array is created only once self.rope_instance = RotaryPositionalEmbeddings(self.features) - def _check_init_args(self, emb_type: str, mode: str): """Check whether the correct arguments were passed to initialization. @@ -268,7 +266,6 @@ def _check_init_args(self, emb_type: str, mode: str): f"Cannot use aggregation method 'average' for rope embedding; must use 'stack' or 'concatenate'" ) - def _transform(self, x, emb): """Routes to the relevant embedding function to transform the input queries @@ -281,15 +278,14 @@ def _transform(self, x, emb): return self._apply_rope(x, emb) else: return self._apply_additive_embeddings(x, emb) - - - def _apply_rope(self, x, emb): + + def _apply_rope(self, x, emb): """Applies Rotary Positional Embedding to input queries Args: x: Input queries of shape (batch_size, n_query, embed_dim) emb: Rotation matrix of shape (batch_size, n_query, num_heads, embed_dim // 2, 2) - + Returns: Tensor of input queries transformed by RoPE """ @@ -300,10 +296,8 @@ def _apply_rope(self, x, emb): # apply RoPE to each query token xout = torch.stack( [ - xout[..., 0] * emb[..., 0] - - xout[..., 1] * emb[..., 1], - xout[..., 1] * emb[..., 0] - + xout[..., 0] * emb[..., 1], + xout[..., 0] * emb[..., 0] - xout[..., 1] * emb[..., 1], + xout[..., 1] * emb[..., 0] + xout[..., 0] * emb[..., 1], ], -1, ) @@ -311,22 +305,20 @@ def _apply_rope(self, x, emb): xout = xout.flatten(3).squeeze(2) return xout - - + def _apply_additive_embeddings(self, x, emb): """Applies additive embeddings to input queries Args: x: Input tensor of shape (batch_size, N, embed_dim) emb: Embedding array of shape (N, embed_dim) - + Returns: Tensor: Input queries with embeddings added - shape (batch_size, N, embed_dim) """ _emb = emb.unsqueeze(0) return x + _emb - - + def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: """Get the sequence positional embeddings. @@ -341,8 +333,8 @@ def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: - An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. """ - # create embedding array; either rotation matrix of shape - # (batch_size, n_query, num_heads, embed_dim // 2, 2), + # create embedding array; either rotation matrix of shape + # (batch_size, n_query, num_heads, embed_dim // 2, 2), # or (N, embed_dim) array emb = self._emb_func(seq_positions, x.size()) # transform the input data with the embedding @@ -364,8 +356,9 @@ def _torch_int_div( """ return torch.div(tensor1, tensor2, rounding_mode="floor") - - def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) -> torch.Tensor: + def _rope_embedding( + self, seq_positions: torch.Tensor, input_shape: torch.Size + ) -> torch.Tensor: """Computes the rotation matrix to apply RoPE to input queries Args: seq_positions: Pos array of shape (embed_dim,) used to compute rotational embedding @@ -380,12 +373,11 @@ def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) is_pos_emb = 1 if seq_positions.max() <= 1 else 0 # if it is positional, scale seq_positions since these are fractions # in [0,1] and we need int indexes for embedding lookup - seq_positions = seq_positions*100 if is_pos_emb else seq_positions + seq_positions = seq_positions * 100 if is_pos_emb else seq_positions # RoPE module takes in dimension, num_queries as input to calculate rotation matrix rot_mat = self.rope_instance(x_rope, seq_positions.unsqueeze(0).int()) return rot_mat - def _sine_pos_embedding(self, centroids: torch.Tensor, *args) -> torch.Tensor: """Compute fixed sine temporal embeddings per dimension (x,y) diff --git a/dreem/models/mlp.py b/dreem/models/mlp.py index a6c5ab3..c497ab8 100644 --- a/dreem/models/mlp.py +++ b/dreem/models/mlp.py @@ -37,7 +37,6 @@ def __init__( # list concatenations to ensure layer shape compability for n, k in zip([input_dim] + h, h + [output_dim]) ] - ) if self.dropout > 0.0: self.dropouts = torch.nn.ModuleList( diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 272d688..460d911 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -81,23 +81,28 @@ def __init__( self.temp_emb = Embedding(emb_type="off", mode="off", features=self.d_model) if self.embedding_meta: - self.embedding_agg_method = embedding_meta["embedding_agg_method"] \ - if "embedding_agg_method" in embedding_meta else "average" + self.embedding_agg_method = ( + embedding_meta["embedding_agg_method"] + if "embedding_agg_method" in embedding_meta + else "average" + ) if "pos" in self.embedding_meta: pos_emb_cfg = self.embedding_meta["pos"] if pos_emb_cfg: self.pos_emb = Embedding( - emb_type="pos", features=self.d_model, + emb_type="pos", + features=self.d_model, embedding_agg_method=self.embedding_agg_method, - **pos_emb_cfg - ) # agg method must be the same for pos and temp embeddings + **pos_emb_cfg, + ) # agg method must be the same for pos and temp embeddings if "temp" in self.embedding_meta: temp_emb_cfg = self.embedding_meta["temp"] if temp_emb_cfg: self.temp_emb = Embedding( - emb_type="temp", features=self.d_model, + emb_type="temp", + features=self.d_model, embedding_agg_method=self.embedding_agg_method, - **temp_emb_cfg + **temp_emb_cfg, ) else: self.embedding_meta = {} @@ -136,7 +141,7 @@ def __init__( feature_dim=feature_dim_attn_head, num_layers=num_layers_attn_head, dropout=dropout_attn_head, - **self.embedding_meta + **self.embedding_meta, ) self._reset_parameters() @@ -190,20 +195,25 @@ def forward( # (encoder_features, ref_pos_emb, ref_temp_emb) \ encoder_features, pos_emb_traceback, temp_emb_traceback = self.encoder( - encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, - boxes=ref_boxes, times=ref_times, - embedding_agg_method=self.embedding_agg_method - ) # (total_instances, batch_size, embed_dim) or - # (3*total_instances,batch_size,embed_dim) if using stacked embeddings + encoder_queries, + embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + boxes=ref_boxes, + times=ref_times, + embedding_agg_method=self.embedding_agg_method, + ) # (total_instances, batch_size, embed_dim) or + # (3*total_instances,batch_size,embed_dim) if using stacked embeddings if self.return_embedding: for i, instance in enumerate(ref_instances): if self.embedding_agg_method == "average": - ref_pos_emb = pos_emb_traceback[0][i] # array + ref_pos_emb = pos_emb_traceback[0][i] # array else: - ref_pos_emb = {"x": pos_emb_traceback[0][0][i], "y": pos_emb_traceback[1][0][i]} # dict + ref_pos_emb = { + "x": pos_emb_traceback[0][0][i], + "y": pos_emb_traceback[1][0][i], + } # dict - instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict + instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict instance.add_embedding("temp", temp_emb_traceback) # -------------- Begin decoder --------------- # @@ -222,7 +232,7 @@ def forward( # just get boxes, we already have query_times from above query_boxes = get_boxes(query_instances) query_boxes = torch.nan_to_num(query_boxes, -1.0) - else: # for training, query_instances is None so just pass in the ref data + else: # for training, query_instances is None so just pass in the ref data n_query = total_instances query_instances = ref_instances query_features = ref_features @@ -230,11 +240,14 @@ def forward( query_times = ref_times decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( - query_features, encoder_features, + query_features, + encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, - enc_boxes=ref_boxes, enc_times=ref_times, - boxes=query_boxes, times=query_times, - embedding_agg_method=self.embedding_agg_method + enc_boxes=ref_boxes, + enc_times=ref_times, + boxes=query_boxes, + times=query_times, + embedding_agg_method=self.embedding_agg_method, ) # (L, n_query, batch_size, embed_dim) if self.return_embedding: @@ -242,7 +255,10 @@ def forward( if self.embedding_agg_method == "average": ref_pos_emb = pos_emb_traceback[0][i] # array else: - ref_pos_emb = {"x": pos_emb_traceback[0][0][i], "y": pos_emb_traceback[1][0][i]} # dict + ref_pos_emb = { + "x": pos_emb_traceback[0][0][i], + "y": pos_emb_traceback[1][0][i], + } # dict instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict instance.add_embedding("temp", temp_emb_traceback) @@ -250,7 +266,7 @@ def forward( decoder_features = decoder_features.transpose( 1, 2 ) # # (L, batch_size, n_query, embed_dim) or ((L, batch_size, 3*n_query, embed_dim)) if using stacked embeddings - encoder_features = encoder_features.permute(1, 0, 2) + encoder_features = encoder_features.permute(1, 0, 2) # (batch_size, total_instances, embed_dim) or (batch_size, 3*total_instances, embed_dim) asso_output = [] @@ -260,7 +276,7 @@ def forward( # or number of ref instances for training. total_instances is always the number of reference instances asso_matrix = self.attn_head(frame_features, encoder_features).view( n_query, total_instances - ) # call to view() just removes the batch dimension; output of attn_head is (1,n_query,total_instances) + ) # call to view() just removes the batch dimension; output of attn_head is (1,n_query,total_instances) asso_matrix = AssociationMatrix(asso_matrix, ref_instances, query_instances) asso_output.append(asso_matrix) @@ -305,9 +321,7 @@ def __init__( self.activation = _get_activation_fn(activation) - def forward( - self, queries: torch.Tensor - ) -> torch.Tensor: + def forward(self, queries: torch.Tensor) -> torch.Tensor: """Execute a forward pass of the encoder layer. Args: @@ -451,9 +465,12 @@ def __init__( self.norm = norm if norm is not None else nn.Identity() def forward( - self, queries: torch.Tensor, embedding_map: Dict[str, Embedding], - boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str = None + self, + queries: torch.Tensor, + embedding_map: Dict[str, Embedding], + boxes: torch.Tensor, + times: torch.Tensor, + embedding_agg_method: str = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Execute a forward pass of encoder layer. Computes and applies embeddings before input to EncoderLayer @@ -511,9 +528,11 @@ def forward( decoder_queries: torch.Tensor, encoder_features: torch.Tensor, embedding_map: Dict[str, Embedding], - enc_boxes: torch.Tensor, enc_times: torch.Tensor, - boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str = None + enc_boxes: torch.Tensor, + enc_times: torch.Tensor, + boxes: torch.Tensor, + times: torch.Tensor, + embedding_agg_method: str = None, ) -> torch.Tensor: """Execute a forward pass of the decoder block. @@ -521,7 +540,7 @@ def forward( decoder_queries: Query sequence for decoder to generate (n_query, batch_size, embed_dim). encoder_features: Output from encoder, that decoder uses to attend to relevant parts of input sequence (total_instances, batch_size, embed_dim) - + Returns: The output tensor of shape (L, n_query, batch_size, embed_dim). @@ -529,11 +548,16 @@ def forward( decoder_features = decoder_queries intermediate = [] - # since the encoder output doesn't change for any number of decoder layer inputs, + # since the encoder output doesn't change for any number of decoder layer inputs, # we can process its embedding outside the loop if embedding_agg_method == "average": - encoder_features, *_ = apply_embeddings(encoder_features, embedding_map, - enc_boxes, enc_times, embedding_agg_method) + encoder_features, *_ = apply_embeddings( + encoder_features, + embedding_map, + enc_boxes, + enc_times, + embedding_agg_method, + ) # TODO: ^ should embeddings really be applied to encoder output again before cross attention? # switched off for stack and concatenate methods as those further split the tokens. Kept for "average" # for backward compatibility @@ -542,9 +566,7 @@ def forward( decoder_features, pos_emb_traceback, temp_emb_traceback = apply_embeddings( decoder_features, embedding_map, boxes, times, embedding_agg_method ) - decoder_features = layer( - decoder_features, encoder_features - ) + decoder_features = layer(decoder_features, encoder_features) if self.return_intermediate: intermediate.append(self.norm(decoder_features)) @@ -557,10 +579,14 @@ def forward( return decoder_features.unsqueeze(0), pos_emb_traceback, temp_emb_traceback -def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], - boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str): - """ Applies embeddings to input queries for various aggregation methods. This function +def apply_embeddings( + queries: torch.Tensor, + embedding_map: Dict[str, Embedding], + boxes: torch.Tensor, + times: torch.Tensor, + embedding_agg_method: str, +): + """Applies embeddings to input queries for various aggregation methods. This function is called from the transformer encoder and decoder Args: @@ -574,7 +600,9 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings - queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) + queries = queries.permute( + 1, 0, 2 + ) # queries is shape (batch_size, n_query, embed_dim) # calculate temporal embeddings and transform queries queries_t, ref_temp_emb = temp_emb(queries, times) @@ -595,14 +623,13 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], # forward pass of Embedding object transforms input queries with embeddings queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) - queries_avg = None # pass dummy var in to collate_queries + queries_avg = None # pass dummy var in to collate_queries pos_emb_traceback = (ref_pos_emb_x, ref_pos_emb_y) - # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( - (queries_avg, queries_t, queries_x, queries_y, queries), - embedding_agg_method) + (queries_avg, queries_t, queries_x, queries_y, queries), embedding_agg_method + ) # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) @@ -640,40 +667,46 @@ def _get_activation_fn(activation: str) -> callable: raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") -def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str - ) -> torch.Tensor: - """Aggregates queries transformed by embeddings +def collate_queries( + queries: Tuple[torch.Tensor], embedding_agg_method: str +) -> torch.Tensor: + """Aggregates queries transformed by embeddings - Args: - _queries: 5-tuple of queries (already transformed by embeddings) for _, x, y, t, original input - each of shape (batch_size, n_query, embed_dim) - embedding_agg_method: String representing the aggregation method for embeddings + Args: + _queries: 5-tuple of queries (already transformed by embeddings) for _, x, y, t, original input + each of shape (batch_size, n_query, embed_dim) + embedding_agg_method: String representing the aggregation method for embeddings - Returns: - Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), - stacked (increased number of tokens), or averaged (original token number and length) - """ + Returns: + Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), + stacked (increased number of tokens), or averaged (original token number and length) + """ + + queries_avg, queries_t, queries_x, queries_y, orig_queries = queries + + if embedding_agg_method == "average": + collated_queries = queries_avg + elif embedding_agg_method == "stack": + # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) + # stacked is of shape (batch_size, 3*n_query, embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) + elif embedding_agg_method == "concatenate": + mlp = MLP( + input_dim=queries_t.shape[-1] * 3, + hidden_dim=queries_t.shape[-1] * 2, + output_dim=queries_t.shape[-1], + num_layers=1, + dropout=0.0, + ) + # concatenated is of shape (batch_size, n_query, 3*embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) + # pass through MLP to project into space of (batch_size, n_query, embed_dim) + collated_queries = mlp(collated_queries) + else: + collated_queries = orig_queries - queries_avg, queries_t, queries_x, queries_y, orig_queries = queries + return collated_queries - if embedding_agg_method == "average": - collated_queries = queries_avg - elif embedding_agg_method == "stack": - # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) - # stacked is of shape (batch_size, 3*n_query, embed_dim) - collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) - elif embedding_agg_method == "concatenate": - mlp = MLP(input_dim=queries_t.shape[-1] * 3, hidden_dim=queries_t.shape[-1] * 2, - output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) - # concatenated is of shape (batch_size, n_query, 3*embed_dim) - collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) - # pass through MLP to project into space of (batch_size, n_query, embed_dim) - collated_queries = mlp(collated_queries) - else: - collated_queries = orig_queries - - return collated_queries - def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ @@ -682,9 +715,12 @@ def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: Args: bb: Bounding boxes of shape (n_query, n_anchors, 4) from which to compute x,y centroids; each bounding box is [ymin, xmin, ymax, xmax] - - Returns: + + Returns: A tuple of tensors containing the emebdding array for x,y dimensions, each of shape (n_query,) """ # compute avg of xmin,xmax and ymin,ymax - return bb[:,:,[1,3]].mean(axis=2).squeeze(), bb[:,:,[0,2]].mean(axis=2).squeeze() \ No newline at end of file + return ( + bb[:, :, [1, 3]].mean(axis=2).squeeze(), + bb[:, :, [0, 2]].mean(axis=2).squeeze(), + ) diff --git a/tests/test_models.py b/tests/test_models.py index 76ef074..bdf17f0 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -15,7 +15,7 @@ TransformerEncoderLayer, TransformerDecoderLayer, spatial_emb_from_bb, - apply_embeddings + apply_embeddings, ) @@ -35,7 +35,9 @@ def test_att_weight_head(): """Test self-attention head logic.""" b, n, f = 1, 10, 1024 # batch size, num instances, features - att_weight_head = ATTWeightHead(feature_dim=f, num_layers=2, dropout=0.1, embedding_agg_method="average") + att_weight_head = ATTWeightHead( + feature_dim=f, num_layers=2, dropout=0.1, embedding_agg_method="average" + ) q = k = torch.rand(size=(b, n, f)) @@ -165,19 +167,39 @@ def test_embedding_validity(): with pytest.raises(Exception): # embedding_agg_method cannot be average for rope - _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="average") - _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="stacked") + _ = Embedding( + emb_type="pos", mode="rope", features=128, embedding_agg_method="average" + ) + _ = Embedding( + emb_type="pos", mode="rope", features=128, embedding_agg_method="stacked" + ) - _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="stack") - _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="concatenate") + _ = Embedding( + emb_type="pos", mode="rope", features=128, embedding_agg_method="stack" + ) + _ = Embedding( + emb_type="pos", mode="rope", features=128, embedding_agg_method="concatenate" + ) - _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="average") - _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="stack") - _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="concatenate") + _ = Embedding( + emb_type="pos", mode="fixed", features=128, embedding_agg_method="average" + ) + _ = Embedding( + emb_type="pos", mode="fixed", features=128, embedding_agg_method="stack" + ) + _ = Embedding( + emb_type="pos", mode="fixed", features=128, embedding_agg_method="concatenate" + ) - _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="average") - _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="stack") - _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="concatenate") + _ = Embedding( + emb_type="pos", mode="learned", features=128, embedding_agg_method="average" + ) + _ = Embedding( + emb_type="pos", mode="learned", features=128, embedding_agg_method="stack" + ) + _ = Embedding( + emb_type="pos", mode="learned", features=128, embedding_agg_method="concatenate" + ) _ = Embedding(emb_type="temp", mode="learned", features=128) _ = Embedding(emb_type="pos", mode="learned", features=128) @@ -198,16 +220,10 @@ def test_rope_embedding(): x = torch.rand(size=(1, N, d_model)) pos_emb = Embedding( - emb_type="pos", - mode="rope", - features=d_model, - embedding_agg_method="stack" + emb_type="pos", mode="rope", features=d_model, embedding_agg_method="stack" ) temp_emb = Embedding( - emb_type="temp", - mode="rope", - features=d_model, - embedding_agg_method="stack" + emb_type="temp", mode="rope", features=d_model, embedding_agg_method="stack" ) ref_x, ref_y = spatial_emb_from_bb(boxes) @@ -452,10 +468,7 @@ def test_transformer_decoder(): # with position pos_emb = query_pos_emb = torch.ones_like(encoder_features) - decoder_features = transformer_decoder( - decoder_queries, - encoder_features - ) + decoder_features = transformer_decoder(decoder_queries, encoder_features) assert decoder_features.size() == decoder_queries.size() @@ -467,8 +480,12 @@ def test_transformer_basic(): num_detected = 10 img_shape = (1, 100, 100) embedding_meta = {"embedding_agg_method": "stack"} - transformer = Transformer(d_model=feats, num_encoder_layers=1, num_decoder_layers=1, - embedding_meta=embedding_meta) + transformer = Transformer( + d_model=feats, + num_encoder_layers=1, + num_decoder_layers=1, + embedding_meta=embedding_meta, + ) frames = [] @@ -514,7 +531,7 @@ def test_transformer_embedding(): embedding_meta = { "pos": {"mode": "learned", "emb_num": 16, "normalize": True}, "temp": {"mode": "learned", "emb_num": 16, "normalize": True}, - "embedding_agg_method": "average" + "embedding_agg_method": "average", } transformer = Transformer( diff --git a/tests/test_training.py b/tests/test_training.py index 8c5206e..5729510 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -138,7 +138,7 @@ def test_config_gtr_runner(tmp_path, base_config, params_config, two_flies): "dataset.clip_length": 8, "trainer.min_epochs": 1, "checkpointing.dirpath": model_dir, - "logging.save_dir": logs_dir + "logging.save_dir": logs_dir, } cfg.set_hparams(hparams) From bcb661a036f2d769c0d91863411404f32f176816 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 26 Aug 2024 14:54:00 -0700 Subject: [PATCH 29/63] add cross attn for rope-stack before final asso matrix output --- dreem/models/attention_head.py | 36 +++++++++++++++++++++++++--------- dreem/models/transformer.py | 2 -- scripts/run_eval.py | 12 ++++++++++++ scripts/run_trainer.py | 3 +-- 4 files changed, 40 insertions(+), 13 deletions(-) create mode 100644 scripts/run_eval.py diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 35e7b59..701dc6f 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -33,6 +33,9 @@ def __init__(self, feature_dim: int, num_layers: int, dropout: float, **kwargs): self.k_proj = torch.nn.Conv1d( in_channels=3, out_channels=1, kernel_size=1, stride=1, padding=0 ) + self.attn_x = torch.nn.MultiheadAttention(feature_dim, 1) + self.attn_y = torch.nn.MultiheadAttention(feature_dim, 1) + self.attn_t = torch.nn.MultiheadAttention(feature_dim, 1) else: self.q_proj = MLP( feature_dim, feature_dim, feature_dim, num_layers, dropout @@ -59,26 +62,41 @@ def forward( num_window_instances = key.shape[1] # if stacked embeddings, create channels for each x,y,t embedding dimension - # maps shape (1,192,1024) -> (1,64,3,1024) + # maps shape (1,num_instances*3,feature_dim) -> (num_instances,3,feature_dim) if self.embedding_agg_method == "stack": - key = ( - key.view(batch_size, 3, num_window_instances // 3, feature_dim) + key_stacked = ( + key + .view(batch_size, 3, num_window_instances // 3, feature_dim) .permute(0, 2, 1, 3) - .squeeze(0) + .squeeze(0) # keep as (num_instances*3, feature_dim) ) + key_orig = key.squeeze(0) # keep as (num_instances*3, feature_dim) + query = ( query.view(batch_size, 3, num_query_instances // 3, feature_dim) .permute(0, 2, 1, 3) .squeeze(0) ) - # key, query of shape (batch_size, num_instances, 3, feature_dim) - k = self.k_proj(key).transpose(1, 0) - q = self.q_proj(query).transpose(1, 0) - # k,q of shape (batch_size, num_instances, feature_dim) + # pass t,x,y frame features through cross attention with entire encoder 3*num_window_instances tokens before MLP; + # note order is t,x,y + out_t, _ = self.attn_t(query=query[:,0,:], key=key_orig, value=key_orig) + out_x, _ = self.attn_x(query=query[:,1,:], key=key_orig, value=key_orig) + out_y, _ = self.attn_y(query=query[:,2,:], key=key_orig, value=key_orig) + # combine each attention output to (num_instances, 3, feature_dim) + collated = torch.stack((out_t, out_x, out_y), dim=0).permute(1,0,2) + # mlp_out has shape (1, num_window_instances, feature_dim) + mlp_out = self.q_proj(collated).transpose(1,0) + + # key, query of shape (num_instances, 3, feature_dim) + # TODO: uncomment this if not using modified attention heads for t,x,y + k = self.k_proj(key_stacked).transpose(1, 0) + # q = self.q_proj(query).transpose(1, 0) + # k,q of shape (num_instances, feature_dim) + attn_weights = torch.bmm(mlp_out, k.transpose(1, 2)) else: k = self.k_proj(key) q = self.q_proj(query) + attn_weights = torch.bmm(q, k.transpose(1, 2)) - attn_weights = torch.bmm(q, k.transpose(1, 2)) return attn_weights # (B, N_t, N) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 460d911..4cc2041 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -186,11 +186,9 @@ def forward( ref_times, query_times = get_times(ref_instances, query_instances) batch_size, total_instances, embed_dim = ref_features.shape - ref_features = ref_features.permute( 1, 0, 2 ) # (total_instances, batch_size, embed_dim) - encoder_queries = ref_features # (encoder_features, ref_pos_emb, ref_temp_emb) \ diff --git a/scripts/run_eval.py b/scripts/run_eval.py new file mode 100644 index 0000000..a433852 --- /dev/null +++ b/scripts/run_eval.py @@ -0,0 +1,12 @@ +from dreem.training import train +from omegaconf import OmegaConf + +# /Users/mustafashaikh/dreem/dreem/training +# /Users/main/Documents/GitHub/dreem/dreem/training + + +inference_config = "tests/configs/inference.yaml" + +cfg = OmegaConf.load(inference_config) + +eval.run(cfg) \ No newline at end of file diff --git a/scripts/run_trainer.py b/scripts/run_trainer.py index 5046222..397fca9 100644 --- a/scripts/run_trainer.py +++ b/scripts/run_trainer.py @@ -4,8 +4,7 @@ # /Users/mustafashaikh/dreem/dreem/training # /Users/main/Documents/GitHub/dreem/dreem/training -os.chdir("./dreem/training") - +os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") base_config = "./configs/base.yaml" # params_config = "./configs/override.yaml" From fd77ded3f85f5a88ca1e821216f643837dedbd9d Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Tue, 27 Aug 2024 16:14:41 -0700 Subject: [PATCH 30/63] minor bug fix in rope embedding for single instance clips --- dreem/models/embedding.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 6ec0ef9..516f551 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -101,6 +101,7 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: # 100 since it's a fraction of [0,1]*100. temp is from [0, clip_len]; since clip_len # not available, we use the last value in the indexing array since this will be the # last possible frame that we would need to index since no instances in a frame after that + if input_pos.dim() <= 1: input_pos = input_pos.unsqueeze(0) self.build_rope_cache(max(101, input_pos[:, -1].max() + 1)) # registers cache self.cache = self.cache.to(input_pos.device) # extract the values based on whether input_pos is set or not @@ -370,12 +371,13 @@ def _rope_embedding( # use num_heads=1 for compatibility with torch ROPE x_rope = torch.rand(input_shape).unsqueeze(2) # infer whether it is a positional or temporal embedding - is_pos_emb = 1 if seq_positions.max() <= 1 else 0 + is_pos_emb = 1 if seq_positions.max() < 1 else 0 # if it is positional, scale seq_positions since these are fractions # in [0,1] and we need int indexes for embedding lookup seq_positions = seq_positions * 100 if is_pos_emb else seq_positions + seq_positions = seq_positions.unsqueeze(0).int() # RoPE module takes in dimension, num_queries as input to calculate rotation matrix - rot_mat = self.rope_instance(x_rope, seq_positions.unsqueeze(0).int()) + rot_mat = self.rope_instance(x_rope, seq_positions) return rot_mat From 41454f76f9095f52555e444593d8337d878eb5c9 Mon Sep 17 00:00:00 2001 From: aaprasad Date: Fri, 30 Aug 2024 10:17:31 -0400 Subject: [PATCH 31/63] use `sleap-io` as video backend instead of imageio --- dreem/datasets/sleap_dataset.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/dreem/datasets/sleap_dataset.py b/dreem/datasets/sleap_dataset.py index 8bbf9d8..4de5eb2 100644 --- a/dreem/datasets/sleap_dataset.py +++ b/dreem/datasets/sleap_dataset.py @@ -106,7 +106,7 @@ def __init__( # if self.seed is not None: # np.random.seed(self.seed) self.labels = [sio.load_slp(slp_file) for slp_file in self.slp_files] - self.videos = [imageio.get_reader(vid_file) for vid_file in self.vid_files] + self.vid_readers = {} # do we need this? would need to update with sleap-io # for label in self.labels: @@ -139,9 +139,6 @@ def get_instances(self, label_idx: list[int], frame_idx: list[int]) -> list[Fram video = self.labels[label_idx] video_name = self.video_files[label_idx] - - vid_reader = self.videos[label_idx] - # img = vid_reader.get_data(0) skeleton = video.skeletons[-1] @@ -162,12 +159,12 @@ def get_instances(self, label_idx: list[int], frame_idx: list[int]) -> list[Fram lf = video[frame_ind] try: - img = vid_reader.get_data(int(lf.frame_idx)) - except IndexError as e: - logger.warning( - f"Could not read frame {frame_ind} from {video_name} due to {e}" - ) - continue + img = lf.image + except FileNotFoundError as e: + if video_name not in self.vid_readers: + self.vid_readers[video_name] = sio.load_video(video_name) + vid_reader = self.vid_readers[video_name] + img = vid_reader[lf.frame_idx] if len(img.shape) == 2: img = img.expand_dims(-1) @@ -370,5 +367,5 @@ def get_instances(self, label_idx: list[int], frame_idx: list[int]) -> list[Fram def __del__(self): """Handle file closing before garbage collection.""" - for reader in self.videos: + for reader in self.vid_readers: reader.close() From 64c970beb8b7a1f4a6dc5cf880fabe398d85bd08 Mon Sep 17 00:00:00 2001 From: aaprasad Date: Fri, 30 Aug 2024 10:47:54 -0400 Subject: [PATCH 32/63] lint --- dreem/io/config.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dreem/io/config.py b/dreem/io/config.py index dcf0b4b..36d53df 100644 --- a/dreem/io/config.py +++ b/dreem/io/config.py @@ -274,11 +274,10 @@ def get_dataloader( if dataset is None: logger.warn(f"{mode} dataset passed was `None`! Returning `None`") return None - + elif len(dataset) == 0: logger.warn(f"Length of {mode} dataset is {len(dataset)}! Returning `None`") return None - if mode.lower() == "train": dataloader_params = self.cfg.dataloader.train_dataloader From b63f24fb2a76bffed84d4245d3a3dffa84eb0bf9 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 31 Jul 2024 15:23:56 -0700 Subject: [PATCH 33/63] create notebook for dev --- rope.ipynb | 619 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 619 insertions(+) create mode 100644 rope.ipynb diff --git a/rope.ipynb b/rope.ipynb new file mode 100644 index 0000000..2652e38 --- /dev/null +++ b/rope.ipynb @@ -0,0 +1,619 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/conda/envs/dreem/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import dreem\n", + "import os\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import math\n", + "import torch\n", + "import logging\n", + "from dreem.models.mlp import MLP\n", + "from dreem.models.model_utils import *\n", + "from dreem.datasets import SleapDataset" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "\"\"\"Module containing different position and temporal embeddings.\"\"\"\n", + "\n", + "logger = logging.getLogger(\"dreem.models\")\n", + "# todo: add named tensors, clean variable names\n", + "\n", + "\n", + "class Embedding(torch.nn.Module):\n", + " \"\"\"Class that wraps around different embedding types.\n", + "\n", + " Used for both learned and fixed embeddings.\n", + " \"\"\"\n", + "\n", + " EMB_TYPES = {\n", + " \"temp\": {},\n", + " \"pos\": {\"over_boxes\"},\n", + " \"off\": {},\n", + " None: {},\n", + " } # dict of valid args:keyword params\n", + " EMB_MODES = {\n", + " \"fixed\": {\"temperature\", \"scale\", \"normalize\"},\n", + " \"learned\": {\"emb_num\"},\n", + " \"off\": {},\n", + " } # dict of valid args:keyword params\n", + "\n", + " def __init__(\n", + " self,\n", + " emb_type: str,\n", + " mode: str,\n", + " features: int,\n", + " n_points: int = 1,\n", + " emb_num: int = 16,\n", + " over_boxes: bool = True,\n", + " temperature: int = 10000,\n", + " normalize: bool = False,\n", + " scale: float | None = None,\n", + " mlp_cfg: dict | None = None,\n", + " ):\n", + " \"\"\"Initialize embeddings.\n", + "\n", + " Args:\n", + " emb_type: The type of embedding to compute. Must be one of `{\"temp\", \"pos\", \"off\"}`\n", + " mode: The mode or function used to map positions to vector embeddings.\n", + " Must be one of `{\"fixed\", \"learned\", \"off\"}`\n", + " features: The embedding dimensions. Must match the dimension of the\n", + " input vectors for the transformer model.\n", + " n_points: the number of points that will be embedded.\n", + " emb_num: the number of embeddings in the `self.lookup` table (Only used in learned embeddings).\n", + " over_boxes: Whether to compute the position embedding for each bbox coordinate (y1x1y2x2) or the centroid + bbox size (yxwh).\n", + " temperature: the temperature constant to be used when computing the sinusoidal position embedding\n", + " normalize: whether or not to normalize the positions (Only used in fixed embeddings).\n", + " scale: factor by which to scale the positions after normalizing (Only used in fixed embeddings).\n", + " mlp_cfg: A dictionary of mlp hyperparameters for projecting embedding to correct space.\n", + " Example: {\"hidden_dims\": 256, \"num_layers\":3, \"dropout\": 0.3}\n", + " \"\"\"\n", + " self._check_init_args(emb_type, mode)\n", + "\n", + " super().__init__()\n", + "\n", + " self.emb_type = emb_type\n", + " self.mode = mode\n", + " self.features = features\n", + " self.emb_num = emb_num\n", + " self.over_boxes = over_boxes\n", + " self.temperature = temperature\n", + " self.normalize = normalize\n", + " self.scale = scale\n", + " self.n_points = n_points\n", + "\n", + " if self.normalize and self.scale is None:\n", + " self.scale = 2 * math.pi\n", + "\n", + " if self.emb_type == \"pos\" and mlp_cfg is not None and mlp_cfg[\"num_layers\"] > 0:\n", + " if self.mode == \"fixed\":\n", + " self.mlp = MLP(\n", + " input_dim=n_points * self.features,\n", + " output_dim=self.features,\n", + " **mlp_cfg,\n", + " )\n", + " else:\n", + " in_dim = (self.features // (4 * n_points)) * (4 * n_points)\n", + " self.mlp = MLP(\n", + " input_dim=in_dim,\n", + " output_dim=self.features,\n", + " **mlp_cfg,\n", + " )\n", + " else:\n", + " self.mlp = torch.nn.Identity()\n", + "\n", + " self._emb_func = lambda tensor: torch.zeros(\n", + " (tensor.shape[0], self.features), dtype=tensor.dtype, device=tensor.device\n", + " ) # turn off embedding by returning zeros\n", + "\n", + " self.lookup = None\n", + "\n", + " if self.mode == \"learned\":\n", + " if self.emb_type == \"pos\":\n", + " self.lookup = torch.nn.Embedding(\n", + " self.emb_num * 4 * self.n_points, self.features // (4 * n_points)\n", + " )\n", + " self._emb_func = self._learned_pos_embedding\n", + " elif self.emb_type == \"temp\":\n", + " self.lookup = torch.nn.Embedding(self.emb_num, self.features)\n", + " self._emb_func = self._learned_temp_embedding\n", + "\n", + " elif self.mode == \"fixed\":\n", + " if self.emb_type == \"pos\":\n", + " self._emb_func = self._sine_box_embedding\n", + " elif self.emb_type == \"temp\":\n", + " self._emb_func = self._sine_temp_embedding\n", + "\n", + " def _check_init_args(self, emb_type: str, mode: str):\n", + " \"\"\"Check whether the correct arguments were passed to initialization.\n", + "\n", + " Args:\n", + " emb_type: The type of embedding to compute. Must be one of `{\"temp\", \"pos\", \"\"}`\n", + " mode: The mode or function used to map positions to vector embeddings.\n", + " Must be one of `{\"fixed\", \"learned\"}`\n", + "\n", + " Raises:\n", + " ValueError:\n", + " * if the incorrect `emb_type` or `mode` string are passed\n", + " NotImplementedError: if `emb_type` is `temp` and `mode` is `fixed`.\n", + " \"\"\"\n", + " if emb_type.lower() not in self.EMB_TYPES:\n", + " raise ValueError(\n", + " f\"Embedding `emb_type` must be one of {self.EMB_TYPES} not {emb_type}\"\n", + " )\n", + "\n", + " if mode.lower() not in self.EMB_MODES:\n", + " raise ValueError(\n", + " f\"Embedding `mode` must be one of {self.EMB_MODES} not {mode}\"\n", + " )\n", + "\n", + " def forward(self, seq_positions: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Get the sequence positional embeddings.\n", + "\n", + " Args:\n", + " seq_positions:\n", + " * An (`N`, 1) tensor where seq_positions[i] represents the temporal position of instance_i in the sequence.\n", + " * An (`N`, n_anchors x 4) tensor where seq_positions[i, j, :] represents the [y1, x1, y2, x2] spatial locations of jth point of instance_i in the sequence.\n", + "\n", + " Returns:\n", + " An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding.\n", + " \"\"\"\n", + " emb = self._emb_func(seq_positions)\n", + "\n", + " if emb.shape[-1] != self.features:\n", + " raise RuntimeError(\n", + " (\n", + " f\"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \\n\"\n", + " f\"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions.\"\n", + " )\n", + " )\n", + " return emb\n", + "\n", + " def _torch_int_div(\n", + " self, tensor1: torch.Tensor, tensor2: torch.Tensor\n", + " ) -> torch.Tensor:\n", + " \"\"\"Perform integer division of two tensors.\n", + "\n", + " Args:\n", + " tensor1: dividend tensor.\n", + " tensor2: divisor tensor.\n", + "\n", + " Returns:\n", + " torch.Tensor, resulting tensor.\n", + " \"\"\"\n", + " return torch.div(tensor1, tensor2, rounding_mode=\"floor\")\n", + "\n", + " def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Compute sine positional embeddings for boxes using given parameters.\n", + "\n", + " Args:\n", + " boxes: the input boxes of shape N, n_anchors, 4 or B, N, n_anchors, 4\n", + " where the last dimension is the bbox coords in [y1, x1, y2, x2].\n", + " (Note currently `B=batch_size=1`).\n", + "\n", + " Returns:\n", + " torch.Tensor, the sine positional embeddings\n", + " (embedding[:, 4i] = sin(x)\n", + " embedding[:, 4i+1] = cos(x)\n", + " embedding[:, 4i+2] = sin(y)\n", + " embedding[:, 4i+3] = cos(y)\n", + " )\n", + " \"\"\"\n", + " if self.scale is not None and self.normalize is False:\n", + " raise ValueError(\"normalize should be True if scale is passed\")\n", + "\n", + " if len(boxes.size()) == 3:\n", + " boxes = boxes.unsqueeze(0)\n", + "\n", + " if self.normalize:\n", + " boxes = boxes / (boxes[:, :, -1:] + 1e-6) * self.scale\n", + "\n", + " dim_t = torch.arange(self.features // 4, dtype=torch.float32)\n", + "\n", + " dim_t = self.temperature ** (\n", + " 2 * self._torch_int_div(dim_t, 2) / (self.features // 4)\n", + " )\n", + "\n", + " # (b, n_t, n_anchors, 4, D//4)\n", + " pos_emb = boxes[:, :, :, :, None] / dim_t.to(boxes.device)\n", + "\n", + " pos_emb = torch.stack(\n", + " (pos_emb[:, :, :, :, 0::2].sin(), pos_emb[:, :, :, :, 1::2].cos()), dim=4\n", + " )\n", + " pos_emb = pos_emb.flatten(2).squeeze(0) # (N_t, n_anchors * D)\n", + "\n", + " pos_emb = self.mlp(pos_emb)\n", + "\n", + " pos_emb = pos_emb.view(boxes.shape[1], self.features)\n", + "\n", + " return pos_emb\n", + "\n", + " def _sine_temp_embedding(self, times: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Compute fixed sine temporal embeddings.\n", + "\n", + " Args:\n", + " times: the input times of shape (N,) or (N,1) where N = (sum(instances_per_frame))\n", + " which is the frame index of the instance relative\n", + " to the batch size\n", + " (e.g. `torch.tensor([0, 0, ..., 0, 1, 1, ..., 1, 2, 2, ..., 2,..., B, B, ...B])`).\n", + "\n", + " Returns:\n", + " an n_instances x D embedding representing the temporal embedding.\n", + " \"\"\"\n", + " T = times.int().max().item() + 1\n", + " d = self.features\n", + " n = self.temperature\n", + "\n", + " positions = torch.arange(0, T).unsqueeze(1)\n", + " temp_lookup = torch.zeros(T, d, device=times.device)\n", + "\n", + " denominators = torch.pow(\n", + " n, 2 * torch.arange(0, d // 2) / d\n", + " ) # 10000^(2i/d_model), i is the index of embedding\n", + " temp_lookup[:, 0::2] = torch.sin(\n", + " positions / denominators\n", + " ) # sin(pos/10000^(2i/d_model))\n", + " temp_lookup[:, 1::2] = torch.cos(\n", + " positions / denominators\n", + " ) # cos(pos/10000^(2i/d_model))\n", + "\n", + " temp_emb = temp_lookup[times.int()]\n", + " return temp_emb # .view(len(times), self.features)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "525188c5-1317-4003-90d1-bb1c4b9e9112", + "metadata": { + "jupyter": { + "source_hidden": true + }, + "tags": [] + }, + "outputs": [], + "source": [ + "def _learned_pos_embedding(self, boxes: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Compute learned positional embeddings for boxes using given parameters.\n", + "\n", + " Args:\n", + " boxes: the input boxes of shape N x 4 or B x N x 4\n", + " where the last dimension is the bbox coords in [y1, x1, y2, x2].\n", + " (Note currently `B=batch_size=1`).\n", + "\n", + " Returns:\n", + " torch.Tensor, the learned positional embeddings.\n", + " \"\"\"\n", + " pos_lookup = self.lookup\n", + "\n", + " N, n_anchors, _ = boxes.shape\n", + " boxes = boxes.view(N, n_anchors, 4)\n", + "\n", + " if self.over_boxes:\n", + " xywh = boxes\n", + " else:\n", + " xywh = torch.cat(\n", + " [\n", + " (boxes[:, :, 2:] + boxes[:, :, :2]) / 2,\n", + " (boxes[:, :, 2:] - boxes[:, :, :2]),\n", + " ],\n", + " dim=1,\n", + " )\n", + "\n", + " left_ind, right_ind, left_weight, right_weight = self._compute_weights(xywh)\n", + " f = pos_lookup.weight.shape[1] # self.features // 4\n", + "\n", + " try:\n", + " pos_emb_table = pos_lookup.weight.view(\n", + " self.emb_num, n_anchors, 4, f\n", + " ) # T x 4 x (D * 4)\n", + " except RuntimeError as e:\n", + " logger.exception(\n", + " f\"Hint: `n_points` ({self.n_points}) may be set incorrectly!\"\n", + " )\n", + " logger.exception(e)\n", + " raise (e)\n", + "\n", + " left_emb = pos_emb_table.gather(\n", + " 0,\n", + " left_ind[:, :, :, None].to(pos_emb_table.device).expand(N, n_anchors, 4, f),\n", + " ) # N x 4 x d\n", + " right_emb = pos_emb_table.gather(\n", + " 0,\n", + " right_ind[:, :, :, None]\n", + " .to(pos_emb_table.device)\n", + " .expand(N, n_anchors, 4, f),\n", + " ) # N x 4 x d\n", + " pos_emb = left_weight[:, :, :, None] * right_emb.to(\n", + " left_weight.device\n", + " ) + right_weight[:, :, :, None] * left_emb.to(right_weight.device)\n", + "\n", + " pos_emb = pos_emb.flatten(1)\n", + " pos_emb = self.mlp(pos_emb)\n", + "\n", + " return pos_emb.view(N, self.features)\n", + "\n", + "\n", + "def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor:\n", + " \"\"\"Compute learned temporal embeddings for times using given parameters.\n", + "\n", + " Args:\n", + " times: the input times of shape (N,) or (N,1) where N = (sum(instances_per_frame))\n", + " which is the frame index of the instance relative\n", + " to the batch size\n", + " (e.g. `torch.tensor([0, 0, ..., 0, 1, 1, ..., 1, 2, 2, ..., 2,..., B, B, ...B])`).\n", + "\n", + " Returns:\n", + " torch.Tensor, the learned temporal embeddings.\n", + " \"\"\"\n", + " temp_lookup = self.lookup\n", + " N = times.shape[0]\n", + "\n", + " left_ind, right_ind, left_weight, right_weight = self._compute_weights(times)\n", + "\n", + " left_emb = temp_lookup.weight[\n", + " left_ind.to(temp_lookup.weight.device)\n", + " ] # T x D --> N x D\n", + " right_emb = temp_lookup.weight[right_ind.to(temp_lookup.weight.device)]\n", + "\n", + " temp_emb = left_weight[:, None] * right_emb.to(\n", + " left_weight.device\n", + " ) + right_weight[:, None] * left_emb.to(right_weight.device)\n", + "\n", + " return temp_emb.view(N, self.features)\n", + "\n", + " def _compute_weights(self, data: torch.Tensor) -> tuple[torch.Tensor, ...]:\n", + " \"\"\"Compute left and right learned embedding weights.\n", + "\n", + " Args:\n", + " data: the input data (e.g boxes or times).\n", + "\n", + " Returns:\n", + " A torch.Tensor for each of the left/right indices and weights, respectively\n", + " \"\"\"\n", + " data = data * self.emb_num\n", + "\n", + " left_ind = data.clamp(min=0, max=self.emb_num - 1).long() # N x 4\n", + " right_ind = (left_ind + 1).clamp(min=0, max=self.emb_num - 1).long() # N x 4\n", + "\n", + " left_weight = data - left_ind.float() # N x 4\n", + "\n", + " right_weight = 1.0 - left_weight\n", + "\n", + " return left_ind, right_ind, left_weight, right_weight" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# create Embedding object\n", + "emb = Embedding(emb_type=\"temp\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# get sample crops from training data to pass through the network\n", + "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", + " \n", + "data = SleapDataset([os.path.join(train_path,\"10-1.slp\")], [os.path.join(train_path,\"10-1.mp4\")], crop_size=64,\n", + " mode=\"train\", clip_length=32, anchors=\"centroid\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# get a list of all instances; this is the format that the model pipeline uses as input data\n", + "ref_instances = []\n", + "for frame in data[0]:\n", + " for instance in frame.instances:\n", + " ref_instances.append(instance)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# get the vector of times using the list of crops+labels\n", + "# query_instance is the instances in last frame (set to None)\n", + "ref_times, query_times = get_times(ref_instances, None)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi4AAAGiCAYAAADA0E3hAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB5bElEQVR4nO39e5Bkd33f/z8/n885fZnLzt6kXS2SQNgiGC84WDgEQgwOIBuDKf+oCo7BGCf8YcwlKECwMamy7DKSTcpAImJSdlFAmRC5UgbHztehELEtTAkMERALcLAdy+hirVbSzs61L+d8Pu/fH59zek73dM90z2Vnevf9qOo63adPn+tMn/ec7nm/jIgISimllFJTwB70CiillFJKjUsLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNjQMtXH7zN3+TG264gUajwU033cSf/dmfHeTqKKWUUuqQO7DC5Xd/93e55ZZbeM973sPXvvY1/uk//ae87GUv44EHHjioVVJKKaXUIWcOKmTxuc99Lt///d/Phz/84d647/me7+HHf/zHuf322w9ilZRSSil1yCUHsdBut8u9997LL/zCL/SNv/nmm7nnnns2Td/pdOh0Or3HIQQuXLjAiRMnMMbs+/oqpZRSavdEhJWVFc6cOYO1O/vQ50AKl8cffxzvPadOneobf+rUKc6dO7dp+ttvv51f/uVfvlSrp5RSSql99OCDD3Lttdfu6LUHUriUBq+WiMjQKyjvfve7efvb3957vLS0xPXXX8//7w9eTTpb640Psrl6syZsHsfwT8eGjQ9sXp+h43TZumxdti5bl63L1mVvuZz57CK//SP/H/Pz80PnMY4DKVxOnjyJc27T1ZXz589vugoDUK/Xqdfrm8ans7UdFS7O7O6Ae9n5Addl67J12bpsXbYu+0pddq2bApsvXEziQP6rqFarcdNNN3HXXXf1jb/rrrt4/vOffxCrpJRSSqkpcGAfFb397W/nda97Hc95znN43vOex2/91m/xwAMP8MY3vvGgVkkppZRSh9yBFS4/8RM/wRNPPMGv/Mqv8Mgjj3D27Fn+6I/+iCc/+ckHtUpKKaWUOuQO9Mu5b3rTm3jTm950kKuglFJKqSmiWUVKKaWUmhpauCillFJqamjhopRSSqmpcaDfcdmtp8+eozGX9o0b9j/ne2Hw/9v9wOPB/1UffH6iZW0xr1D533vP8PG7NTivrZYz7P/+xzHqOA3Or7ovBvsObLfs7X4WBnsRjJp/YsrHledN2HScRjZlGtHzYNN0xeuryylfW12WM75vfSfpqTBsfYKYTdMGzND9s9U+HbYeo9Zh2DT92yi9ZW6s9/a/25Oug0U2HUdnZNPPgjWh7+dt3N4W+7HsSYyz//eLLvvKWvalNNWFy9FknWaS4Bj2JjJeATNugeEHLk6NLiA2z88Paf6z1fSDrymn6Stg+k7ow8ePY9T2j1MgBTFQnkTHvHg37FhVl2OL+ZXzt737tu+EG0+snp0Y+cu9adcNruvANg6ZzVZFyjgn3k3bWezvrU7u/Ws4efHUK46qRcKYJ9XdrMtgETzpMnezDjtZ7m4KiL20F3+cDdvmUcs6DNusVJX+RCql1AT266quUmo8WrgopZRSampo4aKUGupK+bxcKbV7l/L9QgsXpdRQk35fSil15bqU7xdauCillFJqamjhopRSSqmpoYWLUmoo/Y6LUuow0sJFKTWUfsdlOO1rotTB0t9ApZSawJXUx0WLNHUY6U+lUkqpoa6kIk1Nj6lu+f//PfpMGuspNZuT2EBiAtYEUhOwRkitxyLY8rHxWCO9tvPWCM4ELBtDiHEB1gRc8bj85R18HMeFvjb2w6IG0k1jKsa4Gr85bmAgN2lIPMDw+Wy/sOrrh0UJjBM7sBPj5iONiiLYK9X5J6Z/f4Rtso2qRuUHVQ1v4+8H8nn618EiW+ZS7TRyYFjUwDj5RZuWMUHkwKTRBpPEDYxal93GDYzah3sZNzB0um1a7+90+YPz2C5e4VLGDajpcim/EzfVhctDS0dphITEBVLnSW3A2UBqfa+QSSr3hxU1ifU44riyqCmf6ytqTCAURRDFG8jgL3H1jXirTJ7eNGO8CXixpAxk8pgtspOKwaiT+raFTZH/E8SS9nKIts9NSgdygyYtZIIZOEHIxgnGVbapeuKx9OcaTWrYScxt2rcbx8gysP+2WOQkv8TVaQeLg8Fgw8ECY9t5j3miKacLvZ/t8fKLqs8Nn++YAZNGtl3WsBPrToqWYcsbtcyDsFUBcSVdAdGcJDWK/lQopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKqQM1Tt8jpUpauCillDpQ2zW+U6pKCxellFJKTQ0tXJRSSik1Naa65X/2jQX8bB1JIDiQROLNCVRuxgnWCdYGrAtYKzgX2/wnzuOskDqPM4KzgZr1OBuKqABPzXkSU0QJVKIDyriAxIZebMBg9lHZ1r+adzSYfdT/fKXF/MDnvn2ZSAORAm6g1b8djAkomdFt8quZSt5Us4Iq+UVmc/v/uNzhl3rHadk9GA9QjRAYmZdkBrZhF1eaq9sKG3ED1WVCGTlQRiKYXVX9ozJzGNxfm7Zr8pbv43x/oLxUv1W8wLD8omEmyUvqy2Ea0vYfhucWjZz/hFlJ27X9Hye/Z7c5QZPkFY27/EvpUuUkKVWa6sLlyN8JZhZCAiE1SGIIaVnExPGSSHzswCdCPqqwSQLWBpyLRY2zgcR5UhcLk9TFzKPUemrW92UglQVNHG4EOW5V0MBAmKO4Ig9pIwfJC32BkB47NAOpmnk0KiNpWHaQG/E+4bH9hY/xvTf4lEqWj/G9E08cP2SGlWlG2fJkVjl5uYFxsDnjCCbPSbL4kXlF1ZNbdbrNmUY7s/mEOlA4bJNjVDXuCWxwntXX9gc8hi0Li03z3UHA47BlwPgZSX3z2kFe0nbbBFufVCcpGkYWqxMuc6xl7TDgcVSw5GHIDNKAR1U6+J9GpZRSSqkxaeGilFJKqamhhYu6rEz6MZFSSqnpou/y6rLi9uBzcKWUUoeXFi5KKaWUmhpauCillFJqamjhoi4r+h0XpZS6vOm7vLqs6HdclFLq8qaFi7qs6BUXpZS6vE1159x0PWAldsYNSeyOGxIIidnonOtMHFpiZ91EkKKTrjhBkjj0xS0ruunaJMSOujaQJAFXdNBNXNFRt+ymazaGg910LRJjAYxs3C866ZbddUfFBJQddXtRABLvZ7hNsQDlyXpU19xJVedT7SALsetn33KqTShHFQ3bXQXZ1OZ+yPTVaarPj/PasVQjDja2Mbb539j2kfthl4bt5/LYl11Lh7WjH8dWXVS3a3E/VmfbHXTMHZxm2+7K23Q83UnX3J0uqzRq/0/a7n5a7EXn2mlxWLoFq+GmunCZ+84aicvBGMQasHEoziLOFEVLMUw3HsdCx2wUOK6IC+jFBFSHQpZANymKnCIPCSeQhBgV4ATrAkniewWOs0UOkpFeXIAzoVfYVIucMvvIGumLDUgrw7Ko6d0vowJg030YP/Oob/yQN6bBaQfb3Kf4vgiAqmpMwKC++fTlJ9ne9B7Te658E6nGCgSxQzONhtnuRDQqRmBkbtFAvtE4bdy3Xv5WsQO+EjswXv7TpvlvddLZIh/JVwooKsseVRBsd3IbPNkPZiQNzn+r7KItC7IJW/GPk1lUXd9Ji5bq+oyzvHIZ+52TNK7DWrRoTtKVSUtKpZRSSk0NLVyUUkopNTW0cFFKKaXU1NDCRSml1GVjnC9Wq+mmhYtSSqnLxuX6X11qgxYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmxnQXLgYwJt4AqdzfNGnZ+LHohGpEMAJ93+MSNsZVbibOfNN4xCBi4l0h3i9uoXwMxWPTGwaKYXGD/m/CD3ZGrXbZDBi8WILYvi6ykxjsftsbv8MW13vZ+n77ZR3OL97tpLX8fsxjr40bKzDtDuO+V0oNN9Ut/y8+bZ4adVxXcN2AzQSbBUweh3SERARCUaQUxNCLByjjAiSJwzIioIwJCL0ogOJ+OS4tcpHSIvcoAZ8KmStjAcosJAErkAjGCcZtZCA5JzgXcDbeEhdzj8qYgLSIBajZvC8qIB0YOoqYgS2yj2BzNMCwmIBMXDFOeuMGbRUNUC1+3BZ1VUrRqn+wiOq1+DeV1v+2r/V/b7wZXeyVtivuytcNiw4IGFLjN8aZjQIzxW8u9CqLmjQCYGhWERv7otyXMTtpY/wok52IB47nwD7oL162L1InKXYGW+lP0vp/0KSZSYM/M4Ot+Ee14R86/wnXaS/a/k9aVO5l1MBW6zDOf/WMmwd1WDKD9iLyYNxtVts7+J+I3TCVG8VFkcErLmXREi+JQHmlJQgmVMYVj42XeHVG2BgXqAwH79ObnhCv2Jhg4vt7eV8MBNO7KlOs6MYqQeVKzcZt47EdeZLqXbHZ4o1m1JWU8qS+kQO0+x+HYUXNltNPeLVm1BvIqPHbXaEZ5w1p0m2K85WJioedXLUaNv9Jl7vtMg7JFS69IqKUKk134aKUUkqpK4oWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFJKqakx1Z1zl24w1LC4LriOw3UE2yV20s0Emws2E4wHkwesL5rOBTYa0VG0+c9jv3+XGyQXrDWIpa+LrrhKJ13HRvfcxCAJRVfd2Ek3duCNXXRD2U3XOSQRvBO8BZIALnbUtUU3XesCzsUuuonzpC52xU1d7KI7rJtuan1vaBGsCaTG95qRjdVRV+LrMhwOwRYN0cpOj30NwPa4F1i1+dpgB9m4vP7ny6ZxloHup6MaxW3XeXOc15XTjLM8Npr+jds4LYjZ6D6M7e+gW7lfnWaS+W9lsDvtpu61lY6fg51uq+O2Xc7AD061i+iwDrrbdbYdXP+hy9ziGI2zjP3sdjpqe3Y1zx10FT4sJunYq65sU124JN+7TMcIvuUwLYdrWVybOOxQ3CQOy2KmWxY0AZvHeACCYHzAlh12q0wRAVBGBDhDcHajkOnFAlSjAWIEQDm+vB8LmqKYSUCs68UDBCf4ajxAErBJEQ2QhF40QC3xsaApYgJq1seipmj/Xy1mLBIjAYyQFIVMWcSUhc2wYqYsZGAjGqAsZByBgOt7I9yu6+u4nWe92OHzMhsnF4vve7N3JmycxCsvCX3RA35jGUNOQtUCyBnfm6Z6YnPF+lnjN07ijO5K3GvLP/ZJrzKfSqFW3fY+Mnmn3a0MFkvQX8CUJ8TBAmYr23XdHZxnOd9R7f/jOKms3+67Io/TUn674mUvi4VxW/DvNENqP4qlS2EvWu5Pi8MSc3CY6d5RSiml1NTQwkUppZRSU2PiwuXzn/88P/ZjP8aZM2cwxvD7v//7fc+LCLfeeitnzpyh2Wzyohe9iG9+85t903Q6Hd761rdy8uRJZmdneeUrX8lDDz20qw1RSiml1OVv4sJlbW2N7/u+7+NDH/rQ0Off97738f73v58PfehDfOUrX+H06dO89KUvZWVlpTfNLbfcwqc//WnuvPNOvvCFL7C6usorXvEKvPdD56mUUkopBTv4cu7LXvYyXvaylw19TkT44Ac/yHve8x5e9apXAfDxj3+cU6dO8clPfpKf/dmfZWlpiY985CP8zu/8Di95yUsA+MQnPsF1113H5z73OX74h394F5ujlFJKqcvZnn7H5f777+fcuXPcfPPNvXH1ep0XvvCF3HPPPQDce++9ZFnWN82ZM2c4e/Zsb5pBnU6H5eXlvptSSimlrjx7WricO3cOgFOnTvWNP3XqVO+5c+fOUavVOHbs2MhpBt1+++0sLCz0btddd91errZSSimlpsS+/FeRMf19AkRk07hBW03z7ne/m6Wlpd7twQcf3LN1VUoppdT02NPC5fTp0wCbrpycP3++dxXm9OnTdLtdFhcXR04zqF6vc+TIkb6bUkoppa48e1q43HDDDZw+fZq77rqrN67b7XL33Xfz/Oc/H4CbbrqJNE37pnnkkUf4xje+0ZtmXPU0I63l2LpH6gHfCPg6+KbgGxQ3g29A3jDkdRMf1w2+ZvF1R6hZJLFI6ghJcd8aMPHWazIpRTRAACOC8YIJxC68vrzRGxoPpryfg83jOJuDyU0chnjf5OXQQDDgDZJbQm4IweJzi/cWHyzd3JF7RxaKx8GReUculjw48mDJxZIFR1aMy4IjF0ceHB5LEEMmLt6C27gvDi+WILbXPba8X3ZyLLvUVrtv+m1+jLzY3m0nBrvpDnYkHdZtd1SnTYfghnQ6rU5ffX7YfKqdUrfrCmyR3m1cB92efU9iBPY6F6I67yLKYutpdt5pdZLt38/t3CsH/fOk1F6b+L+KVldX+Zu/+Zve4/vvv5+vf/3rHD9+nOuvv55bbrmF2267jRtvvJEbb7yR2267jZmZGV7zmtcAsLCwwBve8Abe8Y53cOLECY4fP8473/lOnvnMZ/b+y2hcP3XDl7EzTdZDjaW8ycVshqWswVKnyXK3zmq7zmqrRt5OoGOxLYtrW1zbxBiA9kAkQHGz3TISQDB5iEWKD5hM+iMByuKmEgkgie1lGlVzjoKrRAC4MuMIpBIXIEUGUowIECSNj30i5IkgSYwDwJWRAAFrA0kRCZDYjYwjZ6SXceRM6EUBlLEAZRxAaooIgEpEQBkHkBo/OhKgyEQqT/LliaJaRAwrMEYVL86ErYuAgfdeV9RNG+3+N/6Vviyqhs1vWDxAfI3tRQN4TO/56vjqybBXyBUxAMNsat0+dKr+eWy01C/GVyIOnNmcUbRn7ds35UFVd3hlP+6w+BxVSFTjBOKSzNDconLanSyjVG2TX22hv120AAxvxb8Xrf6HteDfLmJgVLv/nRZR48YMjLMe+1HIHdZ2/zuNXRich+YzTW7iwuV//+//zQ/90A/1Hr/97W8H4PWvfz0f+9jHeNe73kWr1eJNb3oTi4uLPPe5z+Wzn/0s8/Pzvdd84AMfIEkSXv3qV9NqtXjxi1/Mxz72MZwb9da+S/oXh9pDhyVHZBozZ9TO6AlOqQ0TFy4vetGLkMEgwgpjDLfeeiu33nrryGkajQZ33HEHd9xxx6SL3xn9hVd76LCEoE1rYJ6anBYt02EaPjq8HBz8u++loFdclFJTbC8+llD7b/w0eLUbV0bhopRSU0yvuCi1QQsXpZRSag/oR0WXhhYuSk3oMHy/RSmlrlT6DqyUUkrtAf2Oy6WhhYtSSimlpsbE/w59mFzMZ2iEFC+Wus05mq6TWk/NeppJxlyty0q9zno3pd1OyWcSsrYjb7teEzrbKZrRdUxsRtctm9FZbFZpRucFk8dGdLGLrkDY+DzTlPfzgBGDCbEhXUgMxhusM0XTOSka1EFwhpAItrgvvaZ0RbO6DEwqSNHMLhRN6CQVJDP4xBKSQPCCtYG8aERnres1o3PekdqAs7EJ3bBmdIkJdIIjNYFELLlxJNaTY+NrCL2GdAGDFUswAYshFI3ovDgcQsBuNIyS/mZ0gx12q83qysZ0o5rQOTO8eZ0jbJpvtQHY4L8L9y2TasOxjSZkZVM9j9k0ftM4s3kbevMc8Xn35mZfG9tWbQZWjq82myvXf7AR3U70NVir/Gv14D6tPjfJv4LvtnHYYO+SrZY9SfO5YfMeR/XY7OS7DKMa6Y3zb+3ax2U66HdcLo2pLlz+eu0U19S7HEvWWUjWmbHd3ht7JgltSVj3dVZ9neW8wUrW4GK3yXKnwVqnRqsTCxppOUxZzJS3orNu0paioNnoquu6AZsHTB6LF+MDJkj/6aiMC7AbXXVxhuAsYik67FJ01a121C2KmnSj025IN4oZSSCkUnTYlWI6wTshS0LsqusE62IxY10sZsrOus5Kr7Nu6vymYqbm8l5Rk1aG1gRS4zcVMoMddQGcxGImw2101q0UDL3iwWwuZKon/8EiZmgnXLFDW/7DsA6wA2TI66oHsXqCKZZt2TjxOON70QgwfofXod1nzYjutH3bXBk/bN0L20Uw9OY2UOCNUyDF6fauk+ngPivf+DeKt83Fy7i2+xfi7QqjUV1ttzLq56263uMUf6O62W41/bjrs5Nuvftht8vUfxG/culHRUoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlPdOffLX3kaXJWSzGbMzbY5OtPiWH2dY7UW82mbOdehYTNOpqtcXVsmiCUTRzukrPo6a3mdlbzOcrfBarfOaqdGq1Oj3UkIrQTTtriWjZ10W7bXTdd1hKQXDxBiLEAeowFMLrH9f5BeLEC1q661BjEGDL1uumJN0fLfVrrnmo0uupVuulLpptvrpFt00xXnYiSA2+imS3EzTjBJwFrpddO1VmIUgPM4GzvplvEAqfXUnCcxG7EA9aKrbuyiW3bV9VgjvW66fZ10qx11hY0uuibE+7LRUdQRCLj+Tp9DGmNu1U13sOX+qI66sEVX3UpHWmc2Wt+P6phbHb/xOl8sY3hX0GEddl1l/W0RrRDXZ4wuultswzi27DBcmVd1uq1a1E8SQ1CNEoiPh0UfFBEMY7a8n6Sj6k665w6z1TaP0+133OUMfe0EXXMnsVXMwKRda8fdvt1GG4zTcn/cjr2TxFvsp73oVH0QnZH301QXLumSJdQducCaEYwREhuoOU/d5QTbxRJb06fGY238QZyxXWZsl3XXoekaNF1Gw2XUkwYriWfV1WlbwdsEb0GMRYxAUXBgyh8AgeovWTBYAUTiJMXvkCnGlS8xSHyZszHvyMb8HwkBSQyEeI4yUhmKwQgEqSxTIAC2eD6kxZtDOY0Uiy2GIhaSgGARAeeknA0y4mQXrAHyeJ70CbicBMigWDg4CfF5KUaYEE+45bBUFCyheK68b4sCwRH6Ws+X48ZVFjHDMo02TTsys6j/Dbaa21N9Iytzi8rxMKSAqWQeDTP4umF5TNvlGA2f74iMpFEnoSF5T9vZ7UmxOp9R67WTN9udtIHfq+JlmFEnncNyUlRqGulvjlJKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaU93y/8Q3PfIQZLMJ2VxKa26Wv5s7zt/Me8xsTnO2w5GZNgv1Nsfr6xyttZh1HWZcl7rJWUjWWUjWyUJC1nSshxotn7KSN1jJ6ix1m6x06qx1arRbNdbbCablsC0T84vaFteGpMgvch1wXcF1BZvFm8kDNheM35xhZPOAFCkC5KEvw0gSGzOMUoO4eAtpmV8khMTgU4qsooHsIldmGAmSgDgQJ4SkHAreClkiGBcwTvoyjJKkyCdyoZdflDqPM6Evv6hmPan1fflFcRjzi7bLMIIit0gEawIZDodg2b/8omEG2/9vamdfjUOodn4fnPeoTJEt1sFjhreFH5VRNDK7aMi8B5a7VRRAuQ8G84iq96vT7NSweVRb6k+aWTRui/9hGTbVOIGdtv3fbUbR4Lyqy9jPfJndxBiMnOc+ZSZdCrvNSLpcTEsUxVQXLo0nupClpC1HtwhDzLoGmzmyzLDuDd5bfLBI+cZYi9lFdZf3MoywXYJY5qVN26UcSdosJw1mki6zSZOLSZNl51lPanSTFO+SXiCiWBNDDZ1BrMTHJsYQiQVrLBB6b9NGpO/ka0LxIAjGxNcaa5AgiLOYUBYtNoY1BlPc4nnLB1OMBxMMIYBJiMPisSSChJiBJAmVYSxw4rKEMlJJxCCJj0MxBOcJYkhdDA/s5RcRT2jeelJjYm5RL3xQcBIIxmy8oRXZRZ540hgcX70/SX6RF7upeCkNjt+ukNnuxDoquyg+NzybqHoC2y7PqJznOBlF253Yhu2TYds/evuqBcXwabYzKmtqcB6TZBZNHPA3RvDeQdiLk8ReFguXWxDfXtmLkEO1tw5/aaWUUmrfadGipoUWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSik1wjT8l81emZZtnY61VEoppQ7AlfRfRdOyrVq4KKWUUmpqaOGilFJKjTAtH5/shWnZ1ulYS6WUUuoATMvHJ3thWrZVCxellFJKTQ0tXJRSSik1NaY6qwgLxscwQ5cJrguhXQYRWvLE0bUpqzYG/pkyMA6DxxIw1G0W84oKzgRcERRYt55GktHwCVnNkgdL8JasXmQCedvLCCpzg2LYT9E6W2L0TkwqClhTrDQBjAEfMFu12RbBFNlBJghiDMaDKbbbWIP1EEw5Ls4WUwwrjykHhmLZIEbAbzwp3hCwGBPw3mJMkY1kYvCeDUUAXwBrHIS4OVaEYIQ8OLAecCQUK1oEFMZ24haKkMXeuEpGUV9uzw7yiqrHcD9tlVcUn5dNeUWlSfJpqrlEo0yaLzNpAOXI+YzIKxqVTTTJPGDvwt72M6doLwMWDwPNKlLTYqoLF1931NuedDWj8WgMJZTEks848llHd9aRzTmy+QYX5+Z5bF4Ic550rsvcbJtjMy1ONNY4VlvnaNpiwbWYcR3mXJtr0vjGk0nCeqix4hss5w0uZk2Wu00udpost+ust2u02ymynmDbMejRtQyuY3CbUqPBdUMlOVqwWYghiT7EoMVqgrQpAu6sAWuKQEeLJKZIgLZFMrQhlCnRiSlSoInD6rhhydE2hjD2JUc7gSRgk5gc7SZMjbZGSEwgtR6LkFiPM0JSpEY7AokNWyZHV1Ojy4hKS+idGKup0VsVMaXyhL1dUVO+dtMJ2AzMc1RidMEy+otuzvjNhc02qc+usm6bnp/gBN8fVlidT2Uefds2fN7VtOjdGAx1LOcdH28UL33F7jbGLVYG5zlOQvRY852gaNkuhXqrYuJSpDEPS02eJOSyui7DgjS1WFI7oR8VKaWUUmpqaOGi1BVkPz86UZObln8/Veow0d8apa4gelleKTXttHBR6gqiV1yUUtPu8itcLr8tUkoppVTh8jvNT99/ISp1yehHRUqpaXf5FS5KKaWUumxdfoXL5bdFSu0Z/Y6LUmraXX6nef2oSKmR9KOiw2UaO+wqddAuv8JFKaWUUpctLVyUuoLoR0WHizagU2pyU51VtHY6JXRrJOsB1wkYH9+UbS64VqAmYL3B5mC7Bts15B1D3rEsthPW2zWWZxosNmc4Vl/nWG2dI0mbOdehUYQv2iJwccZ2IYl5G6kJJNaTOk8t8awmgXYS8GmCpC5mA9WKDKGawdcgaYNPwaU25hZlgu0KkpiYWeTjUIrcIiNFYCPEoMUgxUX+AGKwYhGJ942YXtCjCTHnMCTxcQgCobwPJimG3iC+yCySGN4YkmL2CRAsIRhMEpchwSBiCC7gQ8AHg3eBPFhS5xEx5NaSSCAxnmBNLxgxYEhMwBtDagLBGHywvcyi6nSp8X3Bi4iNWY7F/fKTjmrwYjWob1R+zrghjKOCDQcDAau5K+UyBwMDy48Bhp2c+gIlB6athjRWs2zKdRvMd5k0OLFavGzk9Gxs9+C2VbOENnKENufObLnMgXyb6mu32reThC3uRVG2XV7Rtusw4Uc/exUmuROTHkN1eRmWQzUtprpwuXBWaK476ouO+sVAbTWQrnpc25OsZQCIs4SaJW86slkbb3OO7rwjm6/xxNwMj88doT7X4chsm+PNdU421jheW+NYus6cazNju8y5NgCZODr1tBe8eDFrspTF0MWlToPVdp12q0a3nUDbbgQvtk0MW2ybgeDFeLOZxXYDNhdMbmL6sw8YIaZEBynSnmP0s+QBsQZrY9iiuBimKC7eYvCiIRRhi72QxYRi2koIo6s8l5gY0JgKkgiSWLwTfCLkScA6wTqPc0lf6GLiYpBi6uI4Z2JRkhRFXnk/FoJhI3hR+oMXM1wMZ5TB8EVDQLAYHEKgSI+WjcJhsIiB4YWMrxQCw4wqBAYLlOrJeFTooC/Wc5iySNmY1mxMWyy3Oi6I3Vi3gZNdtQDZymBhUw0aLF/rxY5dvOzUYKDiuMXLboqT6nHoCzbcVAjurHjZrmgpwwm3O1mME7R4UFfORgUsXur1mSTocTcO63eQLtX2H1Z6nVIppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NSYqHC5/fbb+YEf+AHm5+e5+uqr+fEf/3G+/e1v900jItx6662cOXOGZrPJi170Ir75zW/2TdPpdHjrW9/KyZMnmZ2d5ZWvfCUPPfTQ7rdGKaWUUpe1iQqXu+++mze/+c186Utf4q677iLPc26++WbW1tZ607zvfe/j/e9/Px/60If4yle+wunTp3npS1/KyspKb5pbbrmFT3/609x555184QtfYHV1lVe84hV47/duy5RSSil12Zmo5f9nPvOZvscf/ehHufrqq7n33nv5wR/8QUSED37wg7znPe/hVa96FQAf//jHOXXqFJ/85Cf52Z/9WZaWlvjIRz7C7/zO7/CSl7wEgE984hNcd911fO5zn+OHf/iH92jTlFJKKXW52VVW0dLSEgDHjx8H4P777+fcuXPcfPPNvWnq9TovfOELueeee/jZn/1Z7r33XrIs65vmzJkznD17lnvuuWdo4dLpdOh0Or3Hy8vLABx56kVWc0N7sUZ60VJbSqgtO2orQroecO2AzWLOj2vHHKCkJaRrhnTFks0a8llHNufI5xIem21yYX6Ov59tc7TZ5nhjnRP1NY6m68wlHWZsl7rNsCYw59q98MUjSZuFtMVSvcnFepPVZp2Vdp12JyXrJGQtR952A3lF4Dr9uUU2s7iOYHPBZgGbB0wuEPpzixDBeOJ4a5AgiDUYbxFriuBGg03K/CKJuUUJBGcIiWATeuNsmVXkDCEFSWJekiRxWilvzuCTcl6Czy15EnAu4KwjcZ6ud6TV3CJTPGdj+GJiA7kJ2JCQWk9iXC+/KJMYaJmJKwIuhdR4AjFUMlQzi0zAi4tZP0WMSzW3CDZnFw1mCW2XWVQ+N5jvMyxUcTB7Z9gytwthHBauWA1jHJZZ5LfI8BnMtxm2rdXXDwYuVrepus2jtnUnRmUiDT63mzDCYXkzg/Mblge022VUVbNldhvkuPV67C7DZth+uNRhfDtd3pWefH4lbf+OCxcR4e1vfzsveMELOHv2LADnzp0D4NSpU33Tnjp1iu985zu9aWq1GseOHds0Tfn6Qbfffju//Mu/vGn8P7nmfs6ZNR46dpQnFufILtTIFh35oqF+0VBbMaRrAdf2uHYOAomBtOao1S35jCObMWRzhu68JZ+zZGuWlfmE1lyd1bkaKzN1rmrWOFlbw6frWBNomJzU5MwkXYIYFpKUY0mdpbTJ8VqDi90my40Gy50GK+06rUZKt52Stxy+7XD1onhpg6sZXI1eIROSInjRgWQGawMmC4AFHzAUqdGheHsJBmOKcMWigMEbjLOIjwWMOIMtC45e6KLBOundlyQmR/eSpZMYtGi8QXKzEboYBPEG74v5BYt3HudsTHy2MTna2fjm44zgbKBmfV96dGIDuVgSE0itj0WJCQRjSWz8yNAST1yp9b031GoBE4uW0AsctGbjxFemR8fx5cl/47kqXw0vHGKwSOiN36KAgfGLmMFk6Pi82TR+VGL0sHWD8YqZvlToIcVLuU2jAhd3qi9EcB+Kl92G401SXExStOxU9djs5AQ1KqlcE6LVNNpxmf+Wt7yFv/iLv+C//tf/uuk5Y/p/EURk07hBW03z7ne/m6Wlpd7twQcf3OlqK6WUUmqK7ahweetb38of/MEf8Cd/8idce+21vfGnT58G2HTl5Pz5872rMKdPn6bb7bK4uDhymkH1ep0jR4703dQlduVchVRKKXWITVS4iAhvectb+NSnPsUf//Efc8MNN/Q9f8MNN3D69Gnuuuuu3rhut8vdd9/N85//fABuuukm0jTtm+aRRx7hG9/4Rm8adQjp1WR1APRjDKXUoIm+4/LmN7+ZT37yk/z3//7fmZ+f711ZWVhYoNlsYozhlltu4bbbbuPGG2/kxhtv5LbbbmNmZobXvOY1vWnf8IY38I53vIMTJ05w/Phx3vnOd/LMZz6z919GSikF+h0MpdRmExUuH/7whwF40Yte1Df+ox/9KD/zMz8DwLve9S5arRZvetObWFxc5LnPfS6f/exnmZ+f703/gQ98gCRJePWrX02r1eLFL34xH/vYx3DO7W5r1P7Rj4qUUkodAhMVLiLbn72MMdx6663ceuutI6dpNBrccccd3HHHHZMsXh0k/aNXHQC92qKUGqRZRWo8esVFHYDd9iVRSl1+tHBR49E/fJVSSh0CWrgopZRSampo4aKUOrT0Oy5KqUG7yio6aE3b5fqZRWaSjEcbbR6dm2N1oUn3aI3ORUttyVFbttRWHUkrZhe5bgAB143ZPzazuK7BtS3ZuiFZt2RrhmzOsTiXsjLX5MLsDI/OtDjRWCuyi1rMuzZ1m5Eaj0No2AxnAg2b0bRdZpMuM0mXmbTBSr3Oaq1Oux6zi3qt//uyi4oIgDLDqGti6/+uwWYWmwVMbmNmURCMLz77H8guwlkQsCEgwcQso8QQvMHkYNKirX8eW/8bb7B5bP1vytb/KRgfn5MEQiKYYAi5iW3/00rr/14OUsB709f6P/eWxIUYA2DjuMQGnCmziwKJ9Zta/wcMwZheVpEPdiOzqGh9ngIUeUWbsoug99HWuK3/d5pbVJ3P5iyiydr/97XynyC3aNh6D1tP6G8XP7S9/4Rt/3dqMO9oq1iBsEet94dNv9u8or1q979pXYb8G/h2rf73+/tAk0YXHObvJ+02m0odrKkuXGZdh6sa63xX4zFW5hqcWzjCA8eO8+DyAheXZuleqFFbdDG3aKnILlo1JC2P7QZs25MYqKUWX3fkM5ZsxtKdM2Tztrg5Lh5JWZurszxXZ2WmwYnGGifrqxxL15lzbZzJmbExBHLOtVlwKcfSdVZqDVbq/dlFq50arWaNbjshbyX4tsW1bSxa6pUCpg1JB0LqcN2A7Zq4zrnB5AFjiuDFanaRMZjgY+CiNYg3vewiaw3BGWxuEVcGLJriPr3gxJBQBDRCSNkoaHIwaSxWggdJzEb4ojd4bwjOEpzgbcC6GL6Yh1is5FZIivDFxAZS58lNIBFLEvoLmDh0RSFjYtFSDK2pZLaUGUN9gYvFiaQ8CVTfmyQ+t10Bs5Pcoup8hp3UtytiqsVLnGZzoTKqqNmYbyVQcoxCpjwRBkzfdg2OH1W89Oa9iyKmP4to/OJlr2xVvGyVVzTOOmx1or+UwYV7sb+22pZJspNGFWTTVkTsRf6U2jn9qEgptWO7vfKilFKT0ncdpZQqTNtf/pPay6tVSh0ULVyUUjtW/chIHX6jvh+k1DTRn2KllFJKTQ0tXJRSO6bfcVFKXWr6rqOU2jH9qEgpdalp4aKUUoVJ/rVXKXUwtHBRSqmC/leRUoefFi5KKVW43K+46H8VqcuB/hQrpXbscvtyrl5xUerwu7zedZRSl5R+OXe66BUXdTmY6qyi3//O93H9NW2eNHORE+lazA460uHamYucOzrPuZNHeGJlluWLDdzFJIYuLjlqKwm11UCyHoMXrQ+4Ir8oWTXUlmNuUXfWkM2VmUUJi/MNFufneGiuw9HZFscaLY7X1zheW2chaTFju73gxbkihHHetTmarLPq66w0GqxkdZa6TVa7NVbbdTrtlKydkHcctmVjVlHLxODFtiFpC67jcB3BdS2uK9hMsFnA5gGTC4Qit6jMFxTAF+OLzCJT5hflgjiDdTG7SJwhpAbphSXSf0uLoMUUQtcQUno5RuJivlHMLIrziKGLFuMCuROsDTgnOBewtghXdAHnHakNveDF1PoieNFTc/2ZRbn1WITM+l5ekTcxU8iKxCFCMAFb/MW8VfCil40TrjWyKbPIb5H5Mzh+q8wi2D63CGJ20WDO0WBm0WCwoqt8pDEst6icbqvt2DZYsRjfl2M0EI44uL3jqu6XneQV7cSovKi+DKKB3JxheUXjGCfLZqsspHJ9x93mcQMNtwqp7E0zhdlB6soy1YXL0uIMj8ylNFzGnOvE4iHpcjJd5Wi6zkLa5qH0KI+k86wkM3RcCsYCBsRiAhgv2AxMHmJBYA0uC9jMYXOHzS3GG0wwmGDJJKUlBqm80TojOAI2iWnGzsQTbGo8DZNTtxl1m1O3OTWb907UzghrNrBuhcwJwTjEFqckY+KJ1hjECGx6IylSoAmYHBCDBIlFi8Q3sXi/uCOmdzNiCGKxQRCxGBFCauK0UoQ2SvEY6W1rea6Mj4UgYMX0TltxcoOIIAJGBJwBCYiAcxuvF1cUGJV9GcSAAzxxGOJmEiC1njw4sJDiyShmZkIMVCyHperjgeBFa+IJsUyOroYuTnoSHpUY3Xt+i+DF0jjJy8PSooc91z/f/gTprdZ9u+Jlq3Xeia0Spve6eJk0pXi3J+7DFMA3TqG1F0XhpXSY9u+kxv3ZGqfAvJLpnpkm0/Peoi5z03SiU0pdXrRwUUoppdTU0MJlmkzvFVJ1mZn04xel1PQ5rP+FpoXLNNGr80opddk7LN9vOSzrMehwrpUaTv/IVYeEfsdFqf1zWK90HBZauCilJqYfFSm1fw7rlY7DQvfONNE/ctUhoVdclNo/esVla1q4TBP9I1cdEnrFRan9c1iuuBzWAupw7B01Hv0jVymlLnuHpWA4LAXUoMO5VkqpQ00/KlLq8ndYCqhBU93yf+6bdVYuHuHekwv85YlTnF5Y4UmzF7mqtkrTZVzTWGI+bXNVc5VH5+d59Ngcq0tN2ksptYu2yC6y1FYS0rVA0vLYPIAXknWP6wZcy5GuW7IVS3fekC07snlLez7l7+caPD43x5HZNseb65xoxNyiI0mLGbeRW5Qaz7xrkxrPjOvGeIK0zkqtwXIj3lY7NVrNGt12Qt5K8G2La9leZpFvg2sTs4w6QtIRbGawXVvkFllMHjBBMKHIKRqVXRQM1sd2/OKF4AzGW2wvr6iSWZTHPCKTg01iVlFIwOSmyCwSjI8t/CURQgI4QbxBrOCTYv7O4q1gXcC5QB4CNnckzpO6QBYsqY2t5p1x5OKH5BYFAoYcR2I9wcR2/Y4YoWARMnFFhpGN2UXVvKKB1v+9K1iyfW7RqMyi8rmt2v5X5wej84uqbe7L6YblEw3mFsXX9H90U40AGPbmEwa2y4vFFvMo84nGySzqn+dkxcxgO//qPHba9n/8zJ7++WzVYn0wU2grB9GO/jB+bLfbdZpkn++V3S7T6mf5Q+1H9tV0Fy4Pe7IArU7Kej7L34f4hpqaQNMtsZCsczxZ42S6yvHaGgu1BR6uL/BEbY5OWickLp5wLWBiZo9ZjwGGJvfQAdsNRcihw2UW1zWxYMgMWZ7Q9YalYPDBENi4QTxgzsb8orQoYhriqJucpivyi1zML6onDVYSz6qr03aCdwneglhbrJ9BrOlFDmFiDFDMM4qBPhYLeTy5GNicXYTpFTA42xtaAQkBSQyEeH43AkZM3CdiMAKhmmEkRZRQ8VgkngisCBKKcS5OJ8GCCMEFBBujkMTgXIizEkPiNn6wgzWbhjHHKIdQnIiLHCMnoZdnFHOjxssuckjMaxoztygWVFsXL+V02xlVxAyeTPtP2KGvSBkMVhw86VaLnGE2FT5Diq9xsoz65zn6jXtUsbFVZtEoe3Wi3qoI2smb7U6Klr0MWhw06q/lvczBmYaT9bRlMant6UdFSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaU905N2kLZlkIqUGco02TB4Khkye0fMrVjRXmXAdnAnWbs5C2aTdTcu9YFENGDXCIid1zxTpCYkjWDK4TML5oBe8DrmuQ9kYnW7Gm6FgLGTVWis6MQQx5sGQ1R5Y65lyHhs2o2yzOywRSm9PAkDlHEItPN15bdnhsUzS5ja8itqstutZWO3oaQ6+NrBWsAXIpW9+CD7Fj7gAjgmBiPEDJF01oTbWiNb1liyk7ZVa63FI2rjUb3UYrs5TKUCR2J44/dQERi3PDj601UnTDdRubFxzY+PJghLx43FsJKe5s0T237Jhbbf+/V91zYbz2/33TD3SO3Un33MHn+ue/dQfdYcbpkjupwW63o7dRtm37v1920/Z/nK65+9H6fL/a/e/HuvbmfQk72R7GOAS1e1NduNQvdEiWDHN/7+gcTWmdcLSunuPhq2f4+6sXOHV8mevmL/KkxkVOpGtcXVvmu2YeY/HIDI8cX+DhtQXOL8+xttikvZhSX7TULlrqS5b6ciBZ87h2jm15bCsnWbXUao76jCObc3TnDN15S3bEks0nLB6psTw/w2OzsxxttjneWOdEfY2j6TpHkjYzrkPD5KS2y4ztMufadJKUdkhZ9XWW8wZLWZOLnSYr3TorM3XarRpZO8G3HL5lcS0Tbx2DbxtcW2IcQVeKm8Hkgs0E6wPkJrb+96HX/t8IMbfIxGLEWIPkBqxBkhhnEJzB5BbrDCGNeUU227gfMuL9DEIKNofgivyiRJCkyC9yRX5RIoiTIr/IElzAOOnlF+UukCSe3FsyF0iL7KIsOFLrqTlPHiyJddSs72UXZWJJTcDjcQSskV5+URkBUL4Jj8ov2rhf/GDtQXZRddrtVLOJyuUCQ0/ag8VLfF1/+/9S9SQ8KsuoLwtpILdosHgZzDIaNEmBs/U2XrriZZK2/8OKl3Hb/Jf7blhBcBC5PIOmvS3+NEQPjHIYjv+00Y+KlFJKKTU1tHBRSqkDpH9tKzUZLVyUUuoA7SRVWqkrmRYuSil1gPSKi1KT0cJFKaUOkF5xUWoyWrgopdQB0isuSk1GCxellFJKTQ0tXJRSSik1NbRwUUqpA6TfcVFqMlq4KKXUAdLvuCg1malu+Z/N17AhZg3ZTEhaQrpq8A1Dp1bjcTfX+2vGY1lIWlhiS/jZpMNCrU2rmZJ1E7LckuUO4w3WG0ywvaAd2/UYHx8YL9hccN1A0raEtGh9nxhCYvEuoWXqOCM4G9vLV/MygrXUbdZrwZ4ajzeWus1puoxuSOgmDi+WzDu8t4Rg8d4QvMH4mC9kgsEHIBSPBYwYjBisgAgELJaAyUGw/blFUmyPECMBbDHKA8bE+YXYBt8EsD7mORkfowKMAWMFa2KwUTBFbJIHawzBlC30DcZIjBbAxK32xfqYgBiQYAjG4L3FAMYI1mzc92LJgxRxTEIutpdPtCm3qGjXHzdodHZRzCcylawis2Vu0aD9yC3acl4j2v7H52RoFtGW2TsjXtP3+gkziyaNOrjUBqMVhtlqn22a34Tt/quPt4oSGFyHUe34t8rhGYx/UOpyMtWFy+KNKUcu1mhcyKkvdqlfEGYfSeguJDG36Ko5Hr5qhoevOsqJ46tcf2SRa2cucjJd5WS6wpMbF1g60uT88XkeWjvKueV5VhdnaC8m1C5Y6heL27Lr5RaVt2TFUqsPyS2aj7lFF4/UWJ5r8tjsHAszLU40Y27RQtpiIWkx59pFblEMYJyXNscTx3paY6XeYCVvcLHR5GK3yVKnwUq7yC1qpeRti1u3uHbMLco7hqTMLepu5BbZrmBzi81CLDhyE4ciGE9f8VLNLhIvYA02MQRnsXnMLtrIKiLesiK3KKUo3ECK56wrHwumklsUM4wESQTJDD4RgrMYF/BOisyimFeU2EDiHKnzpDYOc2vjeOOpuZhXNJhbVBaLjjiPsgCpBkHa4sQQTMBierlFvVDGgdyiMlBzMHSxNKqIGRy/1Ul9nMyi8vlqxlB8buMkNhi+WDUsv8hjdpxZNM42D+NlYxsnzSsq98HQZbP9soft5zCicBhWZExivzJ0dhseOEmBNnIeI7ZtJ+u2n6GO00wL0OEO559GSimllFJDaOGilFJKqamhhYtSakf262MQpZTaihYuSimllJoaWrgopXZEv0yplDoIWrgopXZEPypSSh0ELVyUUjuiV1yUUgdBCxellFJKTQ0tXJRSO6IfFSmlDoIWLkoppZSaGhMVLh/+8Id51rOexZEjRzhy5AjPe97z+J//83/2nhcRbr31Vs6cOUOz2eRFL3oR3/zmN/vm0el0eOtb38rJkyeZnZ3lla98JQ899NCOVj6bh+68IZtz5E2HpEWb7lxwHUhakKwbwlrKynqDJ9qzPN6ZYylvsu7rBAyOQM3mNJOMZi0jaeT4ZsDPCHkT8qYhb1p83RJqjpBaxMXP9mPrfMFmgs2IbfYzsF2wHUvoOLqdhFY3ZS2rsZLVWcvrrPsa675OWxKC2P5cEoTUeBLjqbucmvXUnCd1HpcEbOrBlW32QdKNNvshgeA2bpLEDvbiDGKLMCFrkKK1fwwX2sxIL+wo5iB5KfKRqjcqw8H7UHbQN8HEocT79PKVDIiBYJBgEClvEELMZyrHhcrNB1vctxvjes9bAhvPQWyX77HFdPH5cjgoiN02v2dUq3kvdqyMnnHa4Q+2rZ+0hbrb4krITlqI7/eVler2Vbe9On5wHzjC0NthclBXpC7nNvGapK0AjIiM/ZPwh3/4hzjn+O7v/m4APv7xj/Pv//2/52tf+xrf+73fy6//+q/z3ve+l4997GM87WlP41d/9Vf5/Oc/z7e//W3m5+cB+Lmf+zn+8A//kI997GOcOHGCd7zjHVy4cIF7770X59xY67G8vMzCwgI/8Km3cX71FMnDdWb/3jBzPlC/kOHaHgDfcHQXEtrHLa2rDO2rAlzV4arjK1x/ZJHrmoucSNdo2IxMHBfyWc53Ym7RI0tHWFts4i6k1C8Y6otCfUmoL3uSdY9te2xehASmDl935DOObM7SnTNkc4ZsHrJ5IZ/32LmM5myXozMtjo/KLTI5AJkkZOJYDzWW8ibLeYPF7gxLnSbL3TorrQbtdopvJZiWw7Ysrg1Jy+A64NrgOoLrMpBbJJXcomLoQ8wqGvwxMEW+UDHExdwiSYpAyV6wZJldVM0q6s8tEkcvt0h6RVW8L0nMLcIKJIJJAsYJzgWsCyRJwNlA4jxpkV+UOk/NepwN1Kwnsb7IL4o5RakJJNbjjGCReJ+YX2RNLAydCVikf1i84TsEa0JvGMfF4aiT7DDjFCmwdX5RtVAaDNobVkSNyp8ZVZBVp69O0ze+zHWqPD9OkTbOl3er86lu36jtHhY2uBtb7d/qPpjki8jjFiyD8xxMiR48ltXspi2XP+bP3aj5b7WOfcvZRVbRuMsa3CejCpdx1mXT8R1jeaMMW4+dHvdRyw1iRx7LnSx/kuX25rlHy68ue767yH/6p7/P0tISR44c2XKdR5noisuP/diP8aM/+qM87WlP42lPexrvfe97mZub40tf+hIiwgc/+EHe85738KpXvYqzZ8/y8Y9/nPX1dT75yU8CsLS0xEc+8hF+4zd+g5e85CU8+9nP5hOf+AT33Xcfn/vc53a0AUoppZS6cuz4Oy7ee+68807W1tZ43vOex/3338+5c+e4+eabe9PU63Ve+MIXcs899wBw7733kmVZ3zRnzpzh7NmzvWmG6XQ6LC8v992UUkopdeWZuHC57777mJubo16v88Y3vpFPf/rTPOMZz+DcuXMAnDp1qm/6U6dO9Z47d+4ctVqNY8eOjZxmmNtvv52FhYXe7brrrpt0tZVSSil1GZi4cPkH/+Af8PWvf50vfelL/NzP/Ryvf/3r+da3vtV73gx84VNENo0btN007373u1laWurdHnzwwUlXWymllFKXgYkLl1qtxnd/93fznOc8h9tvv53v+77v4z/8h//A6dOnATZdOTl//nzvKszp06fpdrssLi6OnGaYer3e+0+m8qaUUkqpK8+u+7iICJ1OhxtuuIHTp09z11139Z7rdrvcfffdPP/5zwfgpptuIk3TvmkeeeQRvvGNb/SmUUoppZQaJZlk4l/8xV/kZS97Gddddx0rKyvceeed/Omf/imf+cxnMMZwyy23cNttt3HjjTdy4403cttttzEzM8NrXvMaABYWFnjDG97AO97xDk6cOMHx48d55zvfyTOf+Uxe8pKX7MsGKqWUUuryMVHh8uijj/K6172ORx55hIWFBZ71rGfxmc98hpe+9KUAvOtd76LVavGmN72JxcVFnvvc5/LZz36218MF4AMf+ABJkvDqV7+aVqvFi1/8Yj72sY+N3cNFKaWUUleuiQqXj3zkI1s+b4zh1ltv5dZbbx05TaPR4I477uCOO+6YZNFKKaWUUtOdVTRpK3SllFJKTbeJrrgcNi85/X9Ze/I5/vopV/M3j51k6ZFZmo/UaT4qNC8Eass5zce71Bcts+cc7aOO9okmiycbPHryON84uc41R5e5fm6Ra+pLHE/WOJmu8JTmEzx2ZJ6/P3mEh1aOcuHiHK3FGrULlvpiSn0xobYSSFdj+3+TB5L1DNfOSVcd9YYjm7Vks0X7//mEbM7Rmq+zNtfk/Ow8c7Ntjs20ONFY41htnaNpiwXXYsZ1SI1nxuTUbcaca3MsqXMiXWO53uRi1mR5psHFdpOVdp31do2sneBbCXnL4tom3jqmaP1PvN8tIwBMka1kMWUEQIit/ylyiAAIASMGYwziQZzFWUEyg00MoWt6uU2x/b9UIgCk0u7f9HKUJDEEB7b3GMQaJBFCAiSCOIskAUksxgV8LkXrf0tWtPyvJZaudaQ2kLnYvr/mYr5TYgNdIyQhkFofW/6LxRkhMR5rhBxLYkMvBsCZgJWN9v/BBCyGgGAxOIRA0X5bNlr/V9vFD2v/X21nv1X7//K5YW30HaG3nLJQL1uXl8usrke1RXe1dfdgflHZ3r+cPojtTeMx/eNNwIvttfQOmE3bM2zdx2kBXs673L7qtlW3u9rufi/b/o9aTnwcevvQItu2/Z80m2hwns7Ili3nx/lDbZKcour2lfMfd9/upt3/pMvaqW2jEcY4pupwmuorLjOuy5GkzZG0TbPeRWqBkG6ECwLgBesDNisze8B2DSY35Lkl844sOHxxYkqNp2Ezmq5Lw+U0khyX+N7JtXcSthShhcVyyvDBIpDQ+iIbKBdsDtYbTA54QwgWX9zyYDcCA4uAwFIMjhNSm5MWmTypjSfomN8TsFYwNmb9iI25hX1Da4rHphhXCVmE3vrLqD46IjEgsQxehBig2LtJcas+pghojNOW0w8bDj5frAxxcWZjVBG4GHf15uyaavBiyfees33DcvoyfHEnJn3dbgIYtwtdHJWZtNVJbKsgxktt3Eyn0k6vtJZZVfthPwIVL+ewRKV2Y6oLF6WUUkpdWbRwUUoppdTU0MJFKaWUUlNDCxellFJKTQ0tXJRSSik1NbRwUUoppdTU0MJFKaWUUlNDCxellFJKTQ0tXJRSSik1Naa65f/Xlq/n+rTFNY0l0qs899e7PDR/lIsLDTrHHI3HLI2LgXQtYPJAfQlsLiRtS7Lu6KzO8uBKnQsnZnh0YZ7rZxc5WV9l3rU5lqyTznhmky5ztQ6PNI6w1JzBz9TIm5Z8xpHNWOorlmTNk7Q9JgsYH3AtMF56rfVd1+I6Btux5B1D1nYstx3tTsrabI2VZoMTjTprtTpHk3XmXKfX+t+aQIMc51qkxlO3OTWbU3OeusupJTkraZ12UiNLE/KaI7RsbMOfGkJKrw1/SAyhC86BSyziBMkN1gZMLmCIHYD9QBdQEYwv7wcQgxEDEtv4I2B87JRrgondgxNDCMRuwgFCMHGYxOmCNxhPjAcQgwQIAQix8zHBxm7FAhJMvBXdc30I+GDwRQRAcAYvgdwEahiCeHIT2/oHE1v/B29IrCcEE1v/IzgJBGMgFC3IMaTGb7RdNiHeLzuYiqXsEO4IBDG9Tqxl2/hRXWwhds/drktstQV+3/hKa3rY3DJ9WPv/ON1G6/7N85S+1v/lNMPGV2MJhrVKHycCoG+9RrRbH6ftf/l4p4btu1HL2W6d96Nj7k7tR6fdaWiLf6mPgbvCM/IOw/ZPdeHy8OoCVx31XNe4wNW1ZY6na8ykXf6fO0nLzoBYrLe4rpB2BNPJi9yeBJs7rDcQEtaY4ZwRUhuLgXnXZqYoHmZctygWPA/awEU7S5c6EPvqm2B7rf6dF2weMMFjfMB412t9b4LtO+FnJHSBZSBUfg4s0svPcVZITWz3n0qOLbJ0HAFnpDdtVQbxFGaKk6wxlaHE4SYWS4jbVJwAjQeqbf7LcUaKbQdLiBk+xat6p6reS2Lr/tjWPxYoSNzejdcYRKQYZ3qn/V6r/2LKYCzk2/xAWOh6CNaQlG/ixYKsCcUCPeCwUqyEBDIcKb63BcOKF2tCPJn37ttNxQvQGz/KfhYvw6bZmHZ4ATNJ8dI3v0pu0ahtKI0qYqonxUkzi3Zr3HltyvOprPPE2UQj9uOlKAzKk81WWUjjOkzF2uVMIx9G04+KlFI7ctj/Er+S6Enu8jPsKqmKdM8opXbkSv7L+0redqUOmhYuSqkduZKvuBy2bde/zi8/ehVtNP1pV0rtyJV81eFK3nZ1aWgxOpruGaWUUkpNDS1clFI7ctg+LrmS6ccKlx89pqNp4aKU2hH9uESp/aMfFY2me2YMWvkqpQ4zPcmpK4n+tI9B3xSU2kw/Kjo89I8rdSWZ6s655y4c4a+POAKG4+ka1ghHay0W5lo8fjSl065hM4PxDrGQrPnYPDYXkraQrEHaMISaY63W4NFkjpqN7eCvqllmXAeLMJt0OFJrcbRZp91NWe848m6KzQw2B+ttbHkvgDEYX3SfFcFmAdc1hFRw7aL9vjNIArlzZC6l5QIrLramT0zAmoArhgBp0TLWIaQ2py6Opu3ScQndxJEFR+Yd3ltCsPiiRX4o2/B7g6kV7fiDxKEU61vegsEKUHS3xQimPDGV3XOlGCcSRwWDMQIejI2deY0HY0xsUGuEYGKrf+shGInPGQimaOLri30WJHYZ9mWn37gqeBBjCSbEF3iDMfEWgsUbMEawoSguLViJyw1iyIMFG3/QgxHy4OLKGGLnVGNBAr4YOlN0ei3b/UNf91yIhex+d8/djVHdc2F499btuuT2TVvtcLtN99zqa2D7GIDDYruuujv5iKz82dl+/8qedLfdqb3sTrzXDkOr+UtJi9HRprpwqX91lr9ZOcbD1y/wjKvP8bS58zx74QFunDvP/zt2FX911VU8dm6BziMpM+cSmo9Z6kuedD0nXc+pX3Q0LzhaT1jaFxpcvLrG0lWzPHZilifPL/KUmSc4ma6ykKxzKl3m2sZFHpo9ygPzx3l0YZ71I02yI45s0VJfNNSWLbUVT9Ly2G7AdjymG3CdgGtZ0jVHtmbprhmyNUM2Hx+3Wo7OXMrqbJ3lmQarzTpr9TrHa2ssJC3mXJuGyajbjFRyGiZnxnWYSzrMp03m0w6zyQxLtQar9Tpr9RqdeoqvJYSOxdcsrg0uNbiawdXAdSjiDwSXCK5rkEyKYixAbsCHSkZRpXgRAzYWGxIs4uP94AzGW2wuhNTEgsmDzWPBZnzMcAreYHKwScwuCh5MHos5k8bnxcd8IxJBghRZRgZxBgmWkPgio8j0cotS5/HBktuYU5QWwyCeYA25WBITCBgsgrcGZ4TEeAhsyi3qO0EVxUugeEMZUrzARoZOtXAYVsRsV7yMOtkPZhJVC6Zh2UWD6wL9b4hb5hNRbovpjwMYiCSo7qetiphhMQDV4ucg2/5PWuyNv4zdnXzGWfZ2y9jqhL+bbds8r90VFochF2mnheOV/n2vS7390/EnkFJKKaUOpUtdcGrhopRSSu3CQV8putJo4aKUUkrtwpX+UdGlpoWLUkoptQt6xeXS0sJFKaWU2gW94nJpaeGilFJK7YJecbm0tHBRSimlduFKv+Ki/w6tlFJKTZEr/YqL/ju0UkoppdQIWrgopZRSampMdcv/dF3IVwxrSw0enllgNukS6vGSVdNlzNc7XJzJyOcSuvMG1zK4zGKCw2ZFdkgusf19G9y6IVtLWGo2eSLtMp+2Sa1ngdj2PLWeuaTLXK3Dcr1OZybFty15y+CaBtslzt9LzN7Bxlb5IkVmkMTldQ2+C7a8dSwhdXSThFaasJbWaCQZTZ9Rtzmp8TgrvcwiawJOhNR46janZnMaSUbbx+yiTuLIE4uksR2/pBKHHkKZG+RNXB8P4gwhoWjhH3OOMEIMFRqSWUR8WspxRcaRCUAQxBbt/U2cZ5lFFLOSwPqYJSShyFIKxGyiQMwrssQcpKLVP8XjeN8gEtv/G2OxVhAxiBh8sBgbsBJzinwlv4hAbP9PjAmIWUUWZzxBLN7IpswiKC6BVjKLXLW9/UDb/zjOjN36fL8zi0pbtbOfxFjt58ds2z4YGzBs/Kj2/pciT2c/lzG4Hw9Dq/vD6qCzm8al33G5tNs/1YXLsb9ssfBYyupDNR6//hTnrl/gKdc8wfcsPMqTm09wbWORRxeO8O2rruaBq4/TPtegc87RfNTSvOBJV3PqT3hqSznNCwntxx2tkyntq47wV1c3OX9yjicfXeQpc09wTW2Jq9NlTiarnKot8+jsEf5u/jgPH1lgeWGG7EJKbdGSLTrqS4baqiVZD7iWx+QB18pxHUPSsiRrjnTdkq1auvNFbtFcSjafcGE9ZXWuwdJcg4vNJicba5ysr3IsWWfetWNekfGkzlO3GTO2y4ztciRpM590WKo1WK41WKo3WGvUaLdq5I2ErO3wLYtrG0LN4DoQ2obQyy2CkFhcJtiOwTqDzQVrDBIE40MsMIpCrMwsMqbMD4oFiwSL8XGc8QabF0VRL7OozC2SjQyjhJhTVIQsSgIhjYVOyIvCq8ws8gafhDhNEEKRU5SHQGIDmQ2kLuBsILUBJwEfQswvkkBiPEmRWeSt78svSkzMOkorw3Eyi+L4ONgqt2ivM4s2ZxANzy0aXHb1ddXgyJHZRMNyjIbkDvXmOeRNbNiJeTCwsZxmWPEyuL27ycWp7pv9yivaTUbR4Ml6q2VPmlO0k0Kgemx2EzA5uA2HOdBRHW76UdEol6CA1N9Zpca3VeK2UurKoYXLAbrCUtqV2pW9+LhLKTX99J1gFL0aopRSSh06WrgopaaCflSklAItXJRSU0I/KlJKgRYuo+n3T5Q6VPSKi1IKtHAZTb/jotSholdclFKghYtSakroFRelFGjhopSaEnrFRSkFWriMpt9xUepQ0SsuSimY8sJFUouRImuoZQhrKRdbDS50Z1jKm2TisAjNJKNWzwjNQN6AvAl5w+BrNrarL7J0bCa4LriOwXQcrU7KSlZnJWuwHmp0QorH4ExsHd9wGfU0x9UCoS6EmuDr4GsGnxpCYpDExMyf8jszEvN7jN/ILrIZ2MxgMyCzeG/JckfHJ3SDoxsSOiEhE0cQSyiyeCC2006NxxohtZ6a9SRFu/vEBqwLGBfACuIEccShpbhvKkODWBN/Kop1lnLdrSnCh7b+8o8ps4uKbUXiuBgXUG4//Y+HjO8NBSgyjMrpKfOUxBTpAzGrKFSGfTdMX2vxjXEbP/5eNh6X0/a9BoMXG3ONtvgCVPWqwGA7872+YrDfJ/Jx29aPk7c0jVkuO40V2E27f6XU9qY6q6h1ImX+oufoxYzZcylrDyasXneSL16/wFVPusjTjz/K9c1F/vHxJb57/jH+9uRJ/vbUCRbPzdJ+JKH5qGXm8UBtKSddykiXcxqLCZ3HHa3HHK2r5/nbq5s8ctURrjt2lO868jhn6hdZcC0Wmi1O15a5fnaRB+aP8eDCMS4cnaV7oUZ30VG/6KhdtNRXLMlaIGl5bBawHY/tepK2J1l31NYs2YqlO2fIVi3ZvCVbs1yc38gsWpyd4URjjZO1NY6m6ywk6zRMTmpyUutJjadhM+Zdm/mkwZG0zcW0yXK9wXK9wWqnxnq9RtZJyFsJvm1xbRuDJdsG1zHxficWbS41uK7gOkVeUddg8oAxRWaRmC0yiwzWC3iDcTYWSInFOiGkMXsoJPRyikIiMcsoiRlFpsgskjwGNYZEMN4QvEF8kVmUFJlFXop5BLw3OGdxldwiX80ssqHIBXIkNpCEQFLJKkqtJ4yRWRQwpAAm4MURkBi0KG4jgHHC3KIyl2enmUXV+ZYmzS3aNptoIMcozndjmVtlF/XWqRpQWcklKl+zXV5Rue67LQDLfVM9NrvNK5q0WNmLoMVJc4rGXpchGULbFZ67yY4ax7jbUtrv9dkNDdXcvam+4qKUUkqpK4sWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFLqsneYe7uoyWjhopRS6rI32FRPTS8tXJRSSik1Naa65f+jL4D1cw3mHww0nsg48kCH5hMJa48krDzpKv7s2qMcv2aJG48/znXNRZ618DDXzlzk744d5++uPs7io7O0HnE0z1uaT8TW/8laTtLKqS0lNC44Wo8ntK+a56+unuGhqxa47thFnjr/BGfqF5lzbZ7iOhxL1jndXOGh+aM8vHCEi8dm6S7WqC06uhcdtSVLbcWSrgaStsd2AyYLpLngOhbXsqTrlu6aJVs1dFdt0f7fsVi0/r8w12Rxpmj9X1/lSNLua/0/YzukxjNju0Xr/zYreWP81v/1Suv/NiQdCKnDdQM2MdhubP9vcoPxA63/Ibb+90AQjK20/neGEEAsGF9p/e9M0fZ/c+t/44nt/wOYvIgCSGOb/+BBvIlt/0PZ+t8QnCU4wRf5TL7S+j+3QuI8zgip83gJ5CaQiN3U+j8OXREBYGK7/2JozUarbis2tquvtvsvW7CX7dOrf+BJfG671v/b5f5UW+H3jR/R+r+6TBje/r/a9j9Os7nF/6g4gHL6wXWsGra+ZQv5wRb/1fFbtf0fta2TqM5zVNv/wfb3e5lDtFWEgDOCH1hukP7jtJWtWuQPzns/7cX+2mpbJsnAGhVlMG3t9yeNP7gcTfUVF6l7QhrDAgFMFnDdQNKOwYuma+nmCXmIm5kaz6zrMJd2aNQypBYItZh303v/EMHkgs0CLhNct8jqyQx5EXzY8clGvgxCw2Y0bZeGy6glHpeEXqZOSIowwyK8UMqQQiGe5H15i/k9Jo/DeDPgDSFYcu/oBkcullxcESA4mE8T4g3BEYMgE+tJTMzqcVZwLmAsYCWujy0CFyu3ON4gpljfXshisaBRv+ci9H6nKgGJplLc9Acrbjwux5lyHmXoYuX5eDMDjzfGiVRXxfRu5eNyWA1j3E55AqueJL3Y3ptd9RiMOglVbbfMUTk/49pJ8OI4r9nqBLR9Zs7+hA5ebmnR0xhEOQkNn1R7ZaoLF6WUUkpdWbRwUUoppdTU0MJFKaWUUlNjV4XL7bffjjGGW265pTdORLj11ls5c+YMzWaTF73oRXzzm9/se12n0+Gtb30rJ0+eZHZ2lle+8pU89NBDu1kVpZRSSl0Bdly4fOUrX+G3fuu3eNazntU3/n3vex/vf//7+dCHPsRXvvIVTp8+zUtf+lJWVlZ609xyyy18+tOf5s477+QLX/gCq6urvOIVr8B7v/MtUUoppdRlb0eFy+rqKq997Wv57d/+bY4dO9YbLyJ88IMf5D3veQ+vetWrOHv2LB//+MdZX1/nk5/8JABLS0t85CMf4Td+4zd4yUtewrOf/Ww+8YlPcN999/G5z31ub7ZKKaWUUpelHRUub37zm3n5y1/OS17ykr7x999/P+fOnePmm2/ujavX67zwhS/knnvuAeDee+8ly7K+ac6cOcPZs2d70wzqdDosLy/33ZRSSil15Zm4Ad2dd97JV7/6Vb7yla9seu7cuXMAnDp1qm/8qVOn+M53vtObplar9V2pKacpXz/o9ttv55d/+ZcnXVWllFJKXWYmuuLy4IMP8ra3vY1PfOITNBqNkdMZ099kS0Q2jRu01TTvfve7WVpa6t0efPDBSVZbKaWUUpeJia643HvvvZw/f56bbrqpN857z+c//3k+9KEP8e1vfxuIV1Wuueaa3jTnz5/vXYU5ffo03W6XxcXFvqsu58+f5/nPf/7Q5dbrder1+qbxT7n+MR5cqJHP1JmbqzHzWCBZ9zQuejBgc8d65wh/0U5ZvGqGp8xd4Gi6zunGMhyH+63weDJPqNUIdYevG+oXHelajssCtWUfO7h6i/GOdj7DA5mjnSes+RrXNRc5lqyT2pxj6ToQ20o7IzzuAh3XICQJITH41BBSqKUGWTe4jsfksXW+6xRdY4NgxMWOscFAMBAceYDVELuu+mDp+oSs4cjEsZC0mLHdoiV96HXyLdcFYotoa2L7e2cD1gptUyO34J1FrC265hbdcY1BjCCm6ChsbPEYrDHYrOiAGYo2//0ta+M4F7vcG4A8NuoVgUCsliUQO94iRcfcoqusQGyDazZmKwAGKUbErrfFZOXkYkEEL0XXXBcq0xoSF+cfGxYbgjXUgC6xvXwQD46NFSyGVmKH4RRPRtmiOfTa+ve6nRb7Pojd1PrfmthCvuz0GsT0tf4f7AC7Xev/UW3/e89XWtYPKpe7Xev/US3+yzgAP9A+eVT7/+o6V7ev97pKy/Vh2zWq7X91vbcySSzAuG3/L5Vhbf/Hfd2ltJepy9PYgl9dehMVLi9+8Yu57777+sb9y3/5L3n605/Oz//8z/PUpz6V06dPc9ddd/HsZz8bgG63y913382v//qvA3DTTTeRpil33XUXr371qwF45JFH+MY3vsH73ve+iVb+OccfoDmf8FfhNK1unaRtSdY96WqOzQTjE8RY1l2TR1LPbNplPm1zIl2j6TISEzBGeEwWMD7F5BabgevamFmUZbG4kASMBWNp2TpPuDnqztN0GXWbc9xmLLgWqen/r6jHxNAVUxQjButNUQSB8YL1vsjjCfEE2fv9d9Brt2/AOHIL66YojGzM17EmxIIFwbqABVKT91r/b3WEyxb4OQlBwIjFCPGNsmijH9vvG3ygaMkfi4n42vLszujixQiC7Xsb2nhVebnPEHpTCVBGCwjBGMpdYHOKx3E5powiMDHXCF/uPkswgY2LidX7RUXlPARLF6hVVswG6StaCMTMIjEbLf4ptr0sXipFTLV4icWKGXg8fvGynd0UL7D7k/Gw4iXOd+sCBrZfdxhd0Ey63sP2a3W/XKqiZFRe1FZ5Rftpqyyk+PhgijWlxjFR4TI/P8/Zs2f7xs3OznLixIne+FtuuYXbbruNG2+8kRtvvJHbbruNmZkZXvOa1wCwsLDAG97wBt7xjndw4sQJjh8/zjvf+U6e+cxnbvqyr1JKqb13qUIWldoPe54O/a53vYtWq8Wb3vQmFhcXee5zn8tnP/tZ5ufne9N84AMfIEkSXv3qV9NqtXjxi1/Mxz72MZxze706SimlBlzKhGil9tquC5c//dM/7XtsjOHWW2/l1ltvHfmaRqPBHXfcwR133LHbxSullJqQFi1qmmlWkVJKKaWmhhYuSil1hbnU/3mk1F7SwkUppa4w+lGRmmZauCillFJqamjhopRSSqmpoYWLUkpdYfQ7LmqaTXXhkonjSK1Nc75N51igfczQXUjIGw4x4LpCuiYkK4bWcoPza3Ocb8+znDcIYmi6jPl6h3SmSzYnZHPQnTPkMxZfd4g1mCC4TiBpCUkLknVDtpayuN7kic4sF7MZ1kONTGIPmrrNmXVd5modGvUM08zxDcE3IG8a8obB1w2+ZpHEgo2fNfda/ueCzQTXLW9gu2A6ltB1dLoJa90a63mNtbzOeqjRkYRMHEFsX1fQ1OakxlO3OTWbU3Oe1Po4dB6XeEwSIBFCKoSE3k0q90MC4iAkhuBMjAawFNEAJnbuH5EzZaTo9S9S3I9dg2OsgRQ3+m6U9/3GML4uRiH0T2sq8QhFZ99gYtv/YAjlOCAEixSxCUHiNLHd/+ab7xvG/RrE9IbDxOlM7z4w5LGtTD/6ewbbdZYdx3bdeAdbtVenr7aXdxstnUeO3zzvrZddjQCwW8xnP0zapRh219Z+L1viK6X2oQHdpbTua9x07AG+e/YxvnrsOv7qxCm6/6/B/AMpM4/mNJ7ISFct9YsJaxdrPLZ0kotPavLdVzf47vnHuL55gVP1Za6ZOc7/WzjBowtHyY7UyGcSZs5bGk9Asp5T63iSdUe6lpKuWForNVbWEv5yvc7FE03WFmo8qXGR48kaV9eWmXMdjqbrHK23+PvmAuebc7SbDfJmgm8YfN1SrxkkMSTrHtvx2DzgfMBmFtcNuI7DdR2uA7ZjcB1D1k7pdC0XsoROltCaTWn7hFY95WiasuBazLgOqfGbbnWb03QZs67LUtKgnuSspHXWUk87qZGnjjx1hNQiiUGciYVKUbSIM7iO4Gxsiy+ZwdqAySW23h/MLZKYQVSEA4GzIGBDQJxBQkASE4sUDyHtL158iEUjUhQqEgsoisKEIDF6IBS5R0mx6GARJ4REYrElsXgJwRJcIIjB2ViAlcWJswGR2Nq/Jp5gfWw3b/Ne+/8QDInxEGL0AKFoi47ptaev5hbFfTAqCmBz6/+yoKmeVHebWVSd31bZRdUCapLMonJ8nP/o9v9xOzcvv6+Vf7EPh43bNO0etKMflUU0Kq9oUlsVK1ut/2BWz6SN4g7TlZRxspX2I1pgPwvFw7R/JzVuDtRBxVBM4nCv3SUk1V8e/cL94TO97xdK7Zj+949Sm2nhopRSSqmpoYVLwVQvAepf94eP/uGprkDT/NGEUvtFCxc1HfT9WymlFFq49Oh3XA45PSbqCqTfcVFqMy1cCvpRkVLqsNGPipTaTAsXpZRSSk0NLVzUdNA/PJVSSqGFi5oW+lG/ugLpd1yU2myqCxcvZnPnUEPRgn5g4rKLa6HarXFTh8fy9dZs0cq+fz1iO/j+aYe1Mh+6bqOUvepHjJYhb2qTdDwsv9djBj5Hn2gdN890hy8cMbteJ97y8RYTj3FV5rBcuNlpR1Z1ZdHvuCi12VS3/P+T/32Wvzy7xnOuepAXnPx/PO3Ieb5++loevPYka3+XMv9gQvPxnJlHM+pLluZjCWuPzvOta2e4/8xxbjz5ON819xjfNfM41zSWefDIMf765FU8ftURWo+kzDxSY/a8o76Y41o5zUc9teWExsWE9QuW1oUZHjpV59GT8zx84iJPmbvANY0lFpJ1nlS/yLF0nZP1Vf5+ZoGH5xd4YmGW9YsNsiOObNFSWzLUlyzpqiNZ97iOx/iAawVsN+DajqTlSNct3TVDtmboridk85aV+YTWfJ3luTqLMzOcaKxxsr7G0WSdOdehYbNeu/9516ZhM2ZslznXYTbpMJ92WEobrNQarNTrrNbrdOopeS1B6o5QM7i6wdVMvN8Gl0LSgZA6XDdguwbbDdjcYPKAsTF7iKJFfy+jiPi8GMBZTIh5RxIMxsboA+PLIb1bSAwmLbKNUtMfDeAN4sGkQijuizNIIvHmBXEGk8RhCIL3Budi6/88BBIb8C7gbMBbS+pi2/9ELDXJCWLITSARS2oC3hqCsSS2mM6GXsv+vtb/Rav+QFEUi+27T3G/bP1fzgPoiwOA8dr+l9NtpW+eA0VTuexhre7HaftfPrcx//Hb/49q+19u01Zt/4eZpH38pG3/t2zVv6sso923WD+oAucgc5j2K+Nq0piFrYwbabDbZV7qvK+DNtV/9pkQA/MgHri6zaknOSQh5uuUUTESg/ysl+KkWGTXVN4QHYHEehLnwcWcG2xx9QE2rtjI5kBAEWJw35ArLqnxJDbgjMSLEUYQE9dNRl0dChuhhKbI/DFhY9nlC8urLuUvhi9DAAfXw4S+E4c1gkV6Q2Nk46qLATGysU7VdTQgg1dUrCme2+KXTmTz48FxMPxyiPQPjWweb6ByoPrnExfVv27DFr3xXH8o4l4bdlKPy9vfwMX9MCqLZqvwxa1epw4XDYdUh9XhfEdUSh16hz2ITe3OXocfKrVX9J1HKbUjeuVEKXUQtHBRSiml1NTQwkUppdQm+h0XdVhp4aKUUmoT/Y6LOqy0cFFKKaXU1NDCRSml1Cb6UZE6rLRwUUoptYl+VKQOKy1clFJKbaJXXNRhNdUt/4/+pWEpnOCzT23wjGse5SlzT/APjpzHPTnwN/WruDjTJJtNmTlvSVcD9YseBGxmWW/P8c12wsWrm9y48Bgn66tc01gmPRaoO89DtWOs1OrkDYevGxoXLOlajut4ahfBBIfN463dneGBbsJ6lrJypM61Mxc5lqzTsBnH0nUAEhNIned8Msd60kTShJBYQmoIqaGWGNI1g2v52D4/CLbjSYTYQj/YePOxPX7mHVmosZTbeN872s2UbkjIU0uWOGZsl7rNcAgOYcZ1YpfgolOwNRK7+hadfa0R2i6Q2RRvQayNHYSNiV2ErUGsFEOLs4J0YydgawRrgVxie1sfMGWX2KJVrRHAh9htFyAPGGdit9pgkKKhmZH+G5heB+EgJnbGTYvZiiGIYKXsIgwEAyJI0aFXRGIzYmdAAsHGZYrbGJZCuW4O8AnB+vjYxuNQDoMxvWGKj0+ULf1NIIjbaMNd9jsRW7T6N5W2zuXiNlr/D7b9h/Fa/4/bYbec97DW/5O0/R98rn8ZZYTBqBb5lXkOafvfN+2Itv/D57v1yXYnVxEG2/7vhS3jAwb2wXbt4Hfa7n9wvnsRPVCd16jx2rhQ7dZUFy4zj3pkwbC80OSxo7NcP3uB0/UlrAmsZzUeXEvprKTUVizpWsC1A3ViBo5vGNZna1yYnWFppsHJ+ioLrkXa8HRCLEIeazmy9ZTuqiFpWZKWwYSAa+dgwKcGX7f4uqXdTFhuNFisz7CQtplzHWZMhxnjIYVMHG2f0PWOLHNkmSXvGlzXkHfAFfdtFjN58DH3xxqDs/HkmiRCSOgVOyG1+MTRTRPWk5R6UqPhMmo2J7EhZhWJxxnfy8lJjaducryzMR5ADHliyYIjDxYfLN5bvDdIbgi5wdckFkwJmMRgvBDKTKEEJJiYDSSxwDEiYA0SpNKivyheiOMxBhMknt6MKeITYiQDxmDsRqyC9bEWMbZYZjGU3mMTYxJMjHPACBLYCMssowoMBGOxBIIxGGPwxmCCwRgbiy8xBAx5sFgXTy65WKwIwQhBLMEIiMUbAQl4Y4kL3CheBjOEyjfyUCletmqNP6x42c4kxQv0n5C3e34nxcuwaScx6faMY7BgGCev6FLY7xP6sIJw1/Pcpysy+7GuvXlfwuOqV6z2j5a+SimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aIub9r9UymlLitauKjLm/ZSUEqpy4oWLkoppZSaGlPdOdd4obYk1M87Hpk7yl+mGfkRR2I8xxvrPH6sRWfVkbQsNnfUl4AAaStQW7bki5a1mSYP1I/RcDk0ITWeo2mL4811VhYadNYdruVwHYvrJqQCNgsYLyQtIV2DfAV809Gp13m8NkszyUiKVvEztotFaLqMhbTNaq3OerPGUjch71hs0S3X5mBzGzvBBgEpOtAGwfqAzWJnXdcxuDaEGoR27J6bpQmtpEYt8dRdTs16UhM751oELKTFPrMmkNqcVBx1m9N1OY2Q002yGB2Qxs654mPX3OAN1htCXnSy9UUnW2+KdQXjQBKLSIit8ouPZwzE7rmYXufc3rETQTBFDEDxnI/pAhTLMQaMMRgjGFsszxZdfG2lo66n6I5bvL7ovluO6++gK0W7/tgt15hycsEaGyczgrUSu+daIYgU3XMDFkMWHKn1BDE4U7SRH9I9FxN6bf9D8dghve655fjYSdf2tf0fZru2/3thp51Fd9I9d7/a/m+/rpe2K+5+mqTd/352pFXqUprqwsVmgYW/a1NbrbG03uBvuqcJNxieeezv+b6jD3GivsZfNM7weP0oIY2b2rjgqV/IcW2Haztsp8Zj2TFyb+EkPGXmCa5rXOBI0uJIrc2306tZrs0jSYI4x4yF+sUM186xWcBmCa6bYDNLK0tZzue438f8oDBnOVVfZiFZ56RdoWEzmi6j4XL+PvFcSGbpJnXEWYKL+T9iDWIgJYduwOYB8YHEC8YLxjust9gis8h4SxZS2sEQgokt+4MlF0vAEJLijcrGoiw1PmYXOek9TowntXHobMAaWDNCx0CwkBuLmI11wxSZRUVxUEbviLFxn2CLYiRsLl6KzCGE2JofGws0MRBfGSOB+t6PTXxBebIRAcxGLVTkFMWhbOQZSfncxlCkWF4CG+FDwyWb3uQTggmk1ve91CI4CfHxkOKF6km2UrwMFjKDxctOM4vK6cYxrO3/TjKLyueBLbOLgJFxAcMKklEFTXUf7DYWYJy2/9vFI+zEbvOKxi1a7BbREofB5VRITpvtcrAOK/2oSCmllFJTQwsXpZRSSk0NLVyUUkopNTW0cFFKKaXU1NDCRSmllFJTQwsXpZRSSk0NLVyUUlNrt/8KrZSaPvpbr5SaWvvdkE8pdfhMdQO65afUWHgiJVkPzD1sCEnK38op2tcnPO3oY8y6LjccfYJu7liRI2AcITE0Fj02ExoXA+IsmIRFFrgvWLonHU+eucCc63Bdc5H8uOWvxbAqc0ACxiHWUFvKsV1P0vbIsiE4EBvntS6zPCiGgCEXS1Z3zLs2dZNzNF0na8bmcCKGRSCjTqwhy0ZADoDExO67xsdOva4TwJYdYe1GMzgDOQldYBkIElu/BTF4MXji8mZsl5Siey70ytbgRjcg6gBBEjyAWIwY8rhWvUZw/WILOWs27o/qoBu75sZjYIJAHpu39ZrQ9c23aEJnyuXFBl3lNOX0cZyw0f4uvkygaGAXiucsOFM0jBvNInQHNq/aQRcLCT52zg0QjCHFs6kJXbH/4qaMbkI3aCdN6GCy7rLbNVfbqgldXMfxG9GVrxvWSbdvHhN0yp206V5cx0vf9Gy7/bxV9+FN89qj5nPbNSAbtZ9GdXeOz11ZxeRhb/B3OZrqwmX9lGGmbZl5tIs7FxCTEtKUc80FjjVanDxyjhtmn8Aa4T5vaXXmcS1L0jLUO4Haco4Rh1hHqDmWa7M83FjgWG2dY8k6p+tLWCPk4vjr3NHuzmK7Nrb/71hs12O6gTTk8TzuDJIYQuJopQ3OFy34my5jxnapuw7zJkAtvqnnweKDYckbsryOzU1xi+3/rRdMHmKb/TwgxUlfDKTWxOW5WHiIA3GOzKWsWyF1AWdCX/v/1HisCVj6i5fyBBHE4lNDLq7owBu78Xa9QYIheCka4hqCBxMMJgg+bHSqNSF2pw2E2P12VPv/YlsMZqN4Kdv/99r4G2wulZb9JnbbNQZbdOvFlNEAbEQGFEVO7y5FsYeAL96Eq+3/fezaa4wQgsWbou1/sORGINAbWiPkRcGSEoo3fbdRvEjAD2n/v+nkOqR4AYa2/j+I4mWSE/s4Lf23es123XP75rlF2/rddNMdpzPuTrrnDh63vejAO0mb/0Hbtf2fpHhS6qDoT2ipcoJVSiml1OGkhYtSSimlpsYVU7gYvZSilFJKTb0rpnAR/f6U2qUr7UuHSil1GF0xhcu2V1x28YU3pZRSSl0aV0zhsu0Vl0v8r5FKKaWUmtwVU7hsq7jiotddlFJKqcPriilctv2oSP8dWm1D+1sopdTBm8oGdGUjttBpk2eBPO8ixuC7Ht+xhPU22VqXjs2wRshaXfK1DqGd4juWPPO43GO8kGcO33X4DoRWRr7WoVvr0s4zvPV0uhnZWhe/3ia0HL5j8V0hz3JMnmG8IFjyLMF3E3zHENpCaGX49Q5Z2qUrXTq1DOtyghg63tHtdMlbHfx6jbBukZbg2xbfoZh/wOQeyXNsHlt1iRiCceS5K9bb4rsmrnsqhCRAkuNNF0+H3HfIfJduktFJM1KXITYnLzryilgyEdrB0PGGbp6QeUPWTcg7cb6+HQhtj7QSaFtM20DHYDrEtrpdgS6QCVLcbCYYH7B50UAvSOyMG4qmcNXP7YyJvevEgjWINUjR9VfEImGjC3AQgwTw1aGH4CEEYjM+XzSmSyR2M06kaM4niBMobiYRjA0YJ2BDMS4U4wLG+uK+j03kXMDYnMwGxATEeoRAsB5nhGAC1giWgLcBR3zsTMAiReO/uN2umLZsOmeLBnSm+PJv2bSs2p10sJFZOZ/tTNKIbbAxWrUBXfU5GTLPUZ1wtyr2qq/pW1bxmtD3/MZ8tmqgNjiPrQw22Cu3sTp+cJow4d96dshxq85j0/z7trNf2eF2kgZ0w9Z21HyHrcOwdYTddc4d9jOxeT9vf4xHdazdat3GXdao5fshy9xuPYbtv0mWuZt12Olyy2M0vJv35MuvLqObZcDGeXwnjOzm1QfkoYce4rrrrjvo1VBKKaXUDjz44INce+21O3rtVBYuIQS+/e1v84xnPIMHH3yQI0eOHPQqXXGWl5e57rrrdP8fID0GB0+PwcHTY3DwJjkGIsLKygpnzpzB2p19/D6VHxVZa3nSk54EwJEjR/SH9QDp/j94egwOnh6Dg6fH4OCNewwWFhZ2tRz9tqFSSimlpoYWLkoppZSaGlNbuNTrdX7pl36Jer1+0KtyRdL9f/D0GBw8PQYHT4/BwbvUx2Aqv5yrlFJKqSvT1F5xUUoppdSVRwsXpZRSSk0NLVyUUkopNTW0cFFKKaXU1JjKwuU3f/M3ueGGG2g0Gtx000382Z/92UGv0mXh9ttv5wd+4AeYn5/n6quv5sd//Mf59re/3TeNiHDrrbdy5swZms0mL3rRi/jmN7/ZN02n0+Gtb30rJ0+eZHZ2lle+8pU89NBDl3JTLhu33347xhhuueWW3jg9Bvvv4Ycf5qd+6qc4ceIEMzMz/MN/+A+59957e8/rMdhfeZ7z7/7dv+OGG26g2Wzy1Kc+lV/5lV8hhI3sHD0Ge+vzn/88P/ZjP8aZM2cwxvD7v//7fc/v1f5eXFzkda97HQsLCywsLPC6172OixcvTrayMmXuvPNOSdNUfvu3f1u+9a1vydve9jaZnZ2V73znOwe9alPvh3/4h+WjH/2ofOMb35Cvf/3r8vKXv1yuv/56WV1d7U3za7/2azI/Py+/93u/J/fdd5/8xE/8hFxzzTWyvLzcm+aNb3yjPOlJT5K77rpLvvrVr8oP/dAPyfd93/dJnucHsVlT68tf/rI85SlPkWc961nytre9rTdej8H+unDhgjz5yU+Wn/mZn5E///M/l/vvv18+97nPyd/8zd/0ptFjsL9+9Vd/VU6cOCH/43/8D7n//vvlv/23/yZzc3PywQ9+sDeNHoO99Ud/9Efynve8R37v935PAPn0pz/d9/xe7e8f+ZEfkbNnz8o999wj99xzj5w9e1Ze8YpXTLSuU1e4/KN/9I/kjW98Y9+4pz/96fILv/ALB7RGl6/z588LIHfffbeIiIQQ5PTp0/Jrv/ZrvWna7bYsLCzIf/7P/1lERC5evChpmsqdd97Zm+bhhx8Wa6185jOfubQbMMVWVlbkxhtvlLvuukte+MIX9goXPQb77+d//uflBS94wcjn9Rjsv5e//OXyr/7Vv+ob96pXvUp+6qd+SkT0GOy3wcJlr/b3t771LQHkS1/6Um+aL37xiwLI//2//3fs9Zuqj4q63S733nsvN998c9/4m2++mXvuueeA1urytbS0BMDx48cBuP/++zl37lzf/q/X67zwhS/s7f97772XLMv6pjlz5gxnz57VYzSBN7/5zbz85S/nJS95Sd94PQb77w/+4A94znOewz//5/+cq6++mmc/+9n89m//du95PQb77wUveAH/63/9L/7qr/4KgP/zf/4PX/jCF/jRH/1RQI/BpbZX+/uLX/wiCwsLPPe5z+1N84//8T9mYWFhomMyVSGLjz/+ON57Tp061Tf+1KlTnDt37oDW6vIkIrz97W/nBS94AWfPngXo7eNh+/873/lOb5parcaxY8c2TaPHaDx33nknX/3qV/nKV76y6Tk9Bvvvb//2b/nwhz/M29/+dn7xF3+RL3/5y/zrf/2vqdfr/PRP/7Qeg0vg53/+51laWuLpT386zjm897z3ve/lJ3/yJwH9PbjU9mp/nzt3jquvvnrT/K+++uqJjslUFS4lY0zfYxHZNE7tzlve8hb+4i/+gi984QubntvJ/tdjNJ4HH3yQt73tbXz2s5+l0WiMnE6Pwf4JIfCc5zyH2267DYBnP/vZfPOb3+TDH/4wP/3TP92bTo/B/vnd3/1dPvGJT/DJT36S7/3e7+XrX/86t9xyC2fOnOH1r399bzo9BpfWXuzvYdNPekym6qOikydP4pzbVJmdP39+UyWodu6tb30rf/AHf8Cf/MmfcO211/bGnz59GmDL/X/69Gm63S6Li4sjp1Gj3XvvvZw/f56bbrqJJElIkoS7776b//gf/yNJkvT2oR6D/XPNNdfwjGc8o2/c93zP9/DAAw8A+ntwKfzbf/tv+YVf+AX+xb/4Fzzzmc/kda97Hf/m3/wbbr/9dkCPwaW2V/v79OnTPProo5vm/9hjj010TKaqcKnVatx0003cddddfePvuusunv/85x/QWl0+RIS3vOUtfOpTn+KP//iPueGGG/qev+GGGzh9+nTf/u92u9x99929/X/TTTeRpmnfNI888gjf+MY39BiN4cUvfjH33XcfX//613u35zznObz2ta/l61//Ok996lP1GOyzf/JP/smmNgB/9Vd/xZOf/GRAfw8uhfX1daztPz0553r/Dq3H4NLaq/39vOc9j6WlJb785S/3pvnzP/9zlpaWJjsm43/P+HAo/x36Ix/5iHzrW9+SW265RWZnZ+Xv/u7vDnrVpt7P/dzPycLCgvzpn/6pPPLII73b+vp6b5pf+7Vfk4WFBfnUpz4l9913n/zkT/7k0H+Ju/baa+Vzn/ucfPWrX5V/9s/+mf4L4i5U/6tIRI/Bfvvyl78sSZLIe9/7Xvnrv/5r+S//5b/IzMyMfOITn+hNo8dgf73+9a+XJz3pSb1/h/7Upz4lJ0+elHe96129afQY7K2VlRX52te+Jl/72tcEkPe///3yta99rddqZK/294/8yI/Is571LPniF78oX/ziF+WZz3zm5f/v0CIi/+k//Sd58pOfLLVaTb7/+7+/9++6aneAobePfvSjvWlCCPJLv/RLcvr0aanX6/KDP/iDct999/XNp9VqyVve8hY5fvy4NJtNecUrXiEPPPDAJd6ay8dg4aLHYP/94R/+oZw9e1bq9bo8/elPl9/6rd/qe16Pwf5aXl6Wt73tbXL99ddLo9GQpz71qfKe97xHOp1Obxo9BnvrT/7kT4a+/7/+9a8Xkb3b30888YS89rWvlfn5eZmfn5fXvva1sri4ONG6GhGRHVw5UkoppZS65KbqOy5KKaWUurJp4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoa/38EkGeSCdJLSQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# call fixed temporal embedding with the vector of 'times'\n", + "plt.imshow(emb(ref_times).numpy(), aspect='auto')" + ] + }, + { + "cell_type": "markdown", + "id": "a972707a-51a7-45ff-987e-80ee0dea4752", + "metadata": {}, + "source": [ + "### Rotary Positional Embeddings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5d01ca30-c642-4a50-bd5b-802711c4bb16", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import torchtune.modules as tune\n", + "from dreem.models.transformer import TransformerEncoderLayer\n", + "from dreem.models import VisualEncoder\n", + "from dreem.models import GlobalTrackingTransformer" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "87bebe90-d8e7-40bf-8783-ee5c57944632", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "feat_dim = 1024\n", + "xfmr_encoder = TransformerEncoderLayer(d_model=feat_dim, nhead=8)\n", + "visual_encoder = VisualEncoder(d_model=feat_dim, model_name=\"resnet18\")\n", + "rope = tune.RotaryPositionalEmbeddings(16,32,10000)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "7999fcef-953b-42cf-927c-f3b617f68157", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def extract_features(\n", + " instances: list[\"Instance\"], \n", + " visual_encoder: \"dreem.models.VisualEncoder\",\n", + " force_recompute: bool = False\n", + " ) -> None:\n", + " \"\"\"Extract features from instances using visual encoder backbone.\n", + "\n", + " Args:\n", + " instances: A list of instances to compute features for\n", + " VisualEncoder : pass an instance of a visual encoder\n", + " force_recompute: indicate whether to compute features for all instances regardless of if they have instances\n", + " \"\"\"\n", + " if not force_recompute:\n", + " instances_to_compute = [\n", + " instance\n", + " for instance in instances\n", + " if instance.has_crop() and not instance.has_features()\n", + " ]\n", + " else:\n", + " instances_to_compute = instances\n", + "\n", + " if len(instances_to_compute) == 0:\n", + " return\n", + " elif len(instances_to_compute) == 1: # handle batch norm error when B=1\n", + " instances_to_compute = instances\n", + "\n", + " crops = torch.concatenate([instance.crop for instance in instances_to_compute])\n", + "\n", + " features = visual_encoder(crops)\n", + "\n", + " for i, z_i in enumerate(features):\n", + " instances_to_compute[i].features = z_i" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", + "metadata": {}, + "outputs": [], + "source": [ + "# pass instances through visual encoder to get the feature vector (q,k,v)\n", + "x = extract_features()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "dreem", + "language": "python", + "name": "dreem" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From c320eea6c1fe208d5d76a0761c3caca7dca65466 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 31 Jul 2024 15:51:21 -0700 Subject: [PATCH 34/63] test update of notebook --- rope.ipynb | 180 +++++++++++------------------------------------------ 1 file changed, 36 insertions(+), 144 deletions(-) diff --git a/rope.ipynb b/rope.ipynb index 2652e38..310fa2c 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -12,7 +12,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "/opt/conda/envs/dreem/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "/opt/miniconda3/envs/dreem/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", " from .autonotebook import tqdm as notebook_tqdm\n" ] } @@ -33,7 +33,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", "metadata": { "tags": [] @@ -289,129 +289,7 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "525188c5-1317-4003-90d1-bb1c4b9e9112", - "metadata": { - "jupyter": { - "source_hidden": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "def _learned_pos_embedding(self, boxes: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute learned positional embeddings for boxes using given parameters.\n", - "\n", - " Args:\n", - " boxes: the input boxes of shape N x 4 or B x N x 4\n", - " where the last dimension is the bbox coords in [y1, x1, y2, x2].\n", - " (Note currently `B=batch_size=1`).\n", - "\n", - " Returns:\n", - " torch.Tensor, the learned positional embeddings.\n", - " \"\"\"\n", - " pos_lookup = self.lookup\n", - "\n", - " N, n_anchors, _ = boxes.shape\n", - " boxes = boxes.view(N, n_anchors, 4)\n", - "\n", - " if self.over_boxes:\n", - " xywh = boxes\n", - " else:\n", - " xywh = torch.cat(\n", - " [\n", - " (boxes[:, :, 2:] + boxes[:, :, :2]) / 2,\n", - " (boxes[:, :, 2:] - boxes[:, :, :2]),\n", - " ],\n", - " dim=1,\n", - " )\n", - "\n", - " left_ind, right_ind, left_weight, right_weight = self._compute_weights(xywh)\n", - " f = pos_lookup.weight.shape[1] # self.features // 4\n", - "\n", - " try:\n", - " pos_emb_table = pos_lookup.weight.view(\n", - " self.emb_num, n_anchors, 4, f\n", - " ) # T x 4 x (D * 4)\n", - " except RuntimeError as e:\n", - " logger.exception(\n", - " f\"Hint: `n_points` ({self.n_points}) may be set incorrectly!\"\n", - " )\n", - " logger.exception(e)\n", - " raise (e)\n", - "\n", - " left_emb = pos_emb_table.gather(\n", - " 0,\n", - " left_ind[:, :, :, None].to(pos_emb_table.device).expand(N, n_anchors, 4, f),\n", - " ) # N x 4 x d\n", - " right_emb = pos_emb_table.gather(\n", - " 0,\n", - " right_ind[:, :, :, None]\n", - " .to(pos_emb_table.device)\n", - " .expand(N, n_anchors, 4, f),\n", - " ) # N x 4 x d\n", - " pos_emb = left_weight[:, :, :, None] * right_emb.to(\n", - " left_weight.device\n", - " ) + right_weight[:, :, :, None] * left_emb.to(right_weight.device)\n", - "\n", - " pos_emb = pos_emb.flatten(1)\n", - " pos_emb = self.mlp(pos_emb)\n", - "\n", - " return pos_emb.view(N, self.features)\n", - "\n", - "\n", - "def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute learned temporal embeddings for times using given parameters.\n", - "\n", - " Args:\n", - " times: the input times of shape (N,) or (N,1) where N = (sum(instances_per_frame))\n", - " which is the frame index of the instance relative\n", - " to the batch size\n", - " (e.g. `torch.tensor([0, 0, ..., 0, 1, 1, ..., 1, 2, 2, ..., 2,..., B, B, ...B])`).\n", - "\n", - " Returns:\n", - " torch.Tensor, the learned temporal embeddings.\n", - " \"\"\"\n", - " temp_lookup = self.lookup\n", - " N = times.shape[0]\n", - "\n", - " left_ind, right_ind, left_weight, right_weight = self._compute_weights(times)\n", - "\n", - " left_emb = temp_lookup.weight[\n", - " left_ind.to(temp_lookup.weight.device)\n", - " ] # T x D --> N x D\n", - " right_emb = temp_lookup.weight[right_ind.to(temp_lookup.weight.device)]\n", - "\n", - " temp_emb = left_weight[:, None] * right_emb.to(\n", - " left_weight.device\n", - " ) + right_weight[:, None] * left_emb.to(right_weight.device)\n", - "\n", - " return temp_emb.view(N, self.features)\n", - "\n", - " def _compute_weights(self, data: torch.Tensor) -> tuple[torch.Tensor, ...]:\n", - " \"\"\"Compute left and right learned embedding weights.\n", - "\n", - " Args:\n", - " data: the input data (e.g boxes or times).\n", - "\n", - " Returns:\n", - " A torch.Tensor for each of the left/right indices and weights, respectively\n", - " \"\"\"\n", - " data = data * self.emb_num\n", - "\n", - " left_ind = data.clamp(min=0, max=self.emb_num - 1).long() # N x 4\n", - " right_ind = (left_ind + 1).clamp(min=0, max=self.emb_num - 1).long() # N x 4\n", - "\n", - " left_weight = data - left_ind.float() # N x 4\n", - "\n", - " right_weight = 1.0 - left_weight\n", - "\n", - " return left_ind, right_ind, left_weight, right_weight" - ] - }, - { - "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", "metadata": { "tags": [] @@ -424,7 +302,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", "metadata": { "tags": [] @@ -432,15 +310,15 @@ "outputs": [], "source": [ "# get sample crops from training data to pass through the network\n", - "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", - " \n", + "# train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", + "train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\n", "data = SleapDataset([os.path.join(train_path,\"10-1.slp\")], [os.path.join(train_path,\"10-1.mp4\")], crop_size=64,\n", " mode=\"train\", clip_length=32, anchors=\"centroid\")" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", "metadata": { "tags": [] @@ -456,7 +334,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 7, "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", "metadata": { "tags": [] @@ -470,7 +348,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", "metadata": { "tags": [] @@ -479,10 +357,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 7, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" }, @@ -512,7 +390,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "5d01ca30-c642-4a50-bd5b-802711c4bb16", "metadata": { "tags": [] @@ -527,11 +405,9 @@ }, { "cell_type": "code", - "execution_count": 27, - "id": "87bebe90-d8e7-40bf-8783-ee5c57944632", - "metadata": { - "tags": [] - }, + "execution_count": 11, + "id": "8b17fdb7", + "metadata": {}, "outputs": [], "source": [ "feat_dim = 1024\n", @@ -542,7 +418,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 12, "id": "7999fcef-953b-42cf-927c-f3b617f68157", "metadata": { "tags": [] @@ -585,13 +461,29 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'Tensor' object has no attribute 'has_crop'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[14], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# pass instances through visual encoder to get the feature vector (q,k,v)\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[43mextract_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43mref_times\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvisual_encoder\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# pass through fixed positional embedding (just to test output)\u001b[39;00m\n", + "Cell \u001b[0;32mIn[12], line 14\u001b[0m, in \u001b[0;36mextract_features\u001b[0;34m(instances, visual_encoder, force_recompute)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Extract features from instances using visual encoder backbone.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \n\u001b[1;32m 8\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124;03m force_recompute: indicate whether to compute features for all instances regardless of if they have instances\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m force_recompute:\n\u001b[0;32m---> 14\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m \u001b[43m[\u001b[49m\n\u001b[1;32m 15\u001b[0m \u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\n\u001b[1;32m 16\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstances\u001b[49m\n\u001b[1;32m 17\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_crop\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mand\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 18\u001b[0m \u001b[43m \u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 20\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m instances\n", + "Cell \u001b[0;32mIn[12], line 17\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Extract features from instances using visual encoder backbone.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \n\u001b[1;32m 8\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124;03m force_recompute: indicate whether to compute features for all instances regardless of if they have instances\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m force_recompute:\n\u001b[1;32m 14\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 15\u001b[0m instance\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m instance \u001b[38;5;129;01min\u001b[39;00m instances\n\u001b[0;32m---> 17\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_crop\u001b[49m() \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m instance\u001b[38;5;241m.\u001b[39mhas_features()\n\u001b[1;32m 18\u001b[0m ]\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 20\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m instances\n", + "\u001b[0;31mAttributeError\u001b[0m: 'Tensor' object has no attribute 'has_crop'" + ] + } + ], "source": [ "# pass instances through visual encoder to get the feature vector (q,k,v)\n", - "x = extract_features()" + "x = extract_features(ref_instances, visual_encoder)\n", + "\n", + "# pass through fixed positional embedding (just to test output)" ] } ], @@ -599,7 +491,7 @@ "kernelspec": { "display_name": "dreem", "language": "python", - "name": "dreem" + "name": "python3" }, "language_info": { "codemirror_mode": { From 21035fbbd9b2761fbed4de3804db8d0fe22b0d03 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Thu, 1 Aug 2024 20:07:24 -0700 Subject: [PATCH 35/63] implement rope embedding - changes to embedding class - add apply() function to Embedding class - remove references to embedding from encoderlayer fwd pass --- dreem/models/embedding.py | 173 +++++++++++++++++++++++++++++- dreem/models/transformer.py | 14 ++- rope.ipynb | 207 +++++++++++++++++++++++++----------- 3 files changed, 320 insertions(+), 74 deletions(-) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index c112d48..8f439d1 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -3,15 +3,127 @@ import math import torch import logging +from torch import nn, Tensor +from typing import Optional from dreem.models.mlp import MLP logger = logging.getLogger("dreem.models") # todo: add named tensors, clean variable names -class Embedding(torch.nn.Module): - """Class that wraps around different embedding types. +class RotaryPositionalEmbeddings(nn.Module): + """ + This class implements Rotary Positional Embeddings (RoPE) + proposed in https://arxiv.org/abs/2104.09864. + + Reference implementation (used for correctness verfication) + can be found here: + https://github.com/meta-llama/llama/blob/main/llama/model.py#L80 + + In this implementation we cache the embeddings for each position upto + ``max_seq_len`` by computing this during init. + + Args: + dim (int): Embedding dimension. This is usually set to the dim of each + head in the attention module computed as ````embed_dim`` // ``num_heads```` + max_seq_len (int): Maximum expected sequence length for the + model, if exceeded the cached freqs will be recomputed + base (int): The base for the geometric progression used to compute + the rotation angles + """ + + def __init__( + self, + dim: int, + max_seq_len: int = 4096, + base: int = 10_000, + ) -> None: + super().__init__() + self.dim = dim + self.base = base + self.max_seq_len = max_seq_len + self._rope_init() + + # We need to explicitly define reset_parameters for FSDP initialization, see + # https://github.com/pytorch/pytorch/blob/797d4fbdf423dd9320ebe383fb57ffb1135c4a99/torch/distributed/fsdp/_init_utils.py#L885 + def reset_parameters(self): + self._rope_init() + + def _rope_init(self): + theta = 1.0 / ( + self.base + ** (torch.arange(0, self.dim, 2)[: (self.dim // 2)].float() / self.dim) + ) + self.register_buffer("theta", theta, persistent=False) + self.build_rope_cache(self.max_seq_len) + + def build_rope_cache(self, max_seq_len: int = 4096) -> None: + # Create position indexes `[0, 1, ..., max_seq_len - 1]` + seq_idx = torch.arange( + max_seq_len, dtype=self.theta.dtype, device=self.theta.device + ) + + # Outer product of theta and position index; output tensor has + # a shape of [max_seq_len, dim // 2] + idx_theta = torch.einsum("i, j -> ij", seq_idx, self.theta).float() + + # cache includes both the cos and sin components and so the output shape is + # [max_seq_len, dim // 2, 2] + cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) + self.register_buffer("cache", cache, persistent=False) + + def forward(self, x: Tensor, *, input_pos: Optional[Tensor] = None) -> Tensor: + """ + Args: + x (Tensor): input tensor with shape + [b, s, n_h, h_d] + input_pos (Optional[Tensor]): Optional tensor which contains the position ids + of each token. During training, this is used to indicate the positions + of each token relative to its sample when packed, shape [b, s]. + During inference, this indicates the position of the current token. + If none, assume the index of the token is its position id. Default is None. + Returns: + Tensor: output tensor with RoPE applied + + Notation used for tensor shapes: + - b: batch size + - s: sequence length + - n_h: num heads + - h_d: head dim + + TODO: The implementation below can be made more efficient + for inference. + """ + # input tensor has shape [b, s, n_h, h_d] + seq_len = x.size(1) + + # extract the values based on whether input_pos is set or not + rope_cache = ( + self.cache[:seq_len] if input_pos is None else self.cache[input_pos] + ) + + # reshape input; the last dimension is used for computing the output. + # Cast to float to match the reference implementation + # tensor has shape [b, s, n_h, h_d // 2, 2] + xshaped = x.float().reshape(*x.shape[:-1], -1, 2) + + # reshape the cache for broadcasting + # tensor has shape [b, s, 1, h_d // 2, 2] if packed samples, + # otherwise has shape [1, s, 1, h_d // 2, 2] + rope_cache = rope_cache.view(-1, xshaped.size(1), 1, xshaped.size(3), 2) + + return rope_cache + + + + + + + +class Embedding(torch.nn.Module): + """Class that wraps around different embedding types. + Creates embedding array and transforms the input data Used for both learned and fixed embeddings. """ @@ -112,6 +224,10 @@ def __init__( self._emb_func = self._sine_box_embedding elif self.emb_type == "temp": self._emb_func = self._sine_temp_embedding + + elif self.mode == "rope": + self._emb_func = self._rope_embedding + def _check_init_args(self, emb_type: str, mode: str): """Check whether the correct arguments were passed to initialization. @@ -136,7 +252,40 @@ def _check_init_args(self, emb_type: str, mode: str): f"Embedding `mode` must be one of {self.EMB_MODES} not {mode}" ) - def forward(self, seq_positions: torch.Tensor) -> torch.Tensor: + + def _transform(self, x, emb): + + if emb==self._rope_embedding: + return self._apply_rope(x, emb) + else: + return self._apply_additive_embeddings(x, emb) + + + def _apply_rope(self, x, emb): + + + # tensor has shape [b, s, n_h, h_d // 2, 2] + x_out = torch.stack( + [ + x[..., 0] * emb[..., 0] + - x[..., 1] * emb[..., 1], + x[..., 1] * emb[..., 0] + + x[..., 0] * emb[..., 1], + ], + -1, + ) + # tensor has shape [b, s, n_h, h_d] + x_out = x_out.flatten(3) + + return x_out + + + def _apply_additive_embeddings(self, x, emb): + + return x + emb + + + def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: """Get the sequence positional embeddings. Args: @@ -147,7 +296,11 @@ def forward(self, seq_positions: torch.Tensor) -> torch.Tensor: Returns: An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. """ + # create embedding array (_emb_func selects appropriate callback based on config input) emb = self._emb_func(seq_positions) + + # transform the input data with the embedding + x = self._transform(emb, x) if emb.shape[-1] != self.features: raise RuntimeError( @@ -156,7 +309,7 @@ def forward(self, seq_positions: torch.Tensor) -> torch.Tensor: f"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions." ) ) - return emb + return x, emb def _torch_int_div( self, tensor1: torch.Tensor, tensor2: torch.Tensor @@ -172,6 +325,18 @@ def _torch_int_div( """ return torch.div(tensor1, tensor2, rounding_mode="floor") + + def _rope_embedding(self, x: torch.Tensor, emb_ids: torch.Tensor) -> torch.Tensor: + + # input must be of shape (num_batches, num_instances, num_attn_heads, d_model) + # use num_heads=1 for compatibility with torch ROPE + x_rope = torch.unsqueeze(x, 2) + rope = RotaryPositionalEmbeddings(self.features) + rot_mat = rope(x_rope) + + return rot_mat + + def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor: """Compute sine positional embeddings for boxes using given parameters. diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 4be6db6..13e4529 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -19,6 +19,7 @@ import copy import torch import torch.nn.functional as F +from typing import List # todo: add named tensors # todo: add flash attention @@ -298,21 +299,22 @@ def __init__( self.activation = _get_activation_fn(activation) def forward( - self, queries: torch.Tensor, pos_emb: torch.Tensor = None + self, queries: torch.Tensor, embeddings : List[Embedding] ) -> torch.Tensor: """Execute a forward pass of the encoder layer. Args: - queries: Input sequence for encoder (n_query, batch_size, embed_dim). + queries: Input sequence for encoder (n_query, batch_size, embed_dim); transformed with embedding pos_emb: Position embedding, if provided is added to src Returns: The output tensor of shape (n_query, batch_size, embed_dim). """ - if pos_emb is None: - pos_emb = torch.zeros_like(queries) + # TODO: delete this section; keep to check that pos_emb None is taken care of automatically by config +# if pos_emb is None: +# pos_emb = torch.zeros_like(queries) - queries = queries + pos_emb +# queries = queries + pos_emb # q = k = src @@ -471,6 +473,8 @@ def forward( The output tensor of shape (n_query, batch_size, embed_dim). """ for layer in self.layers: + # TODO: add embedding object call + # TODO: add the embedding object into the argument list to the forward() call queries = layer(queries, pos_emb=pos_emb) encoder_features = self.norm(queries) diff --git a/rope.ipynb b/rope.ipynb index 310fa2c..ee920e4 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -2,38 +2,32 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": 35, "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/miniconda3/envs/dreem/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], + "outputs": [], "source": [ "import numpy as np\n", "import dreem\n", "import os\n", "import matplotlib.pyplot as plt\n", - "\n", "import math\n", "import torch\n", "import logging\n", "from dreem.models.mlp import MLP\n", "from dreem.models.model_utils import *\n", - "from dreem.datasets import SleapDataset" + "from dreem.datasets import SleapDataset\n", + "import torchtune.modules as tune\n", + "from dreem.models.transformer import TransformerEncoderLayer\n", + "from dreem.models import VisualEncoder\n", + "from dreem.models import GlobalTrackingTransformer" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 36, "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", "metadata": { "tags": [] @@ -289,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 62, "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", "metadata": { "tags": [] @@ -297,12 +291,13 @@ "outputs": [], "source": [ "# create Embedding object\n", - "emb = Embedding(emb_type=\"temp\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)" + "emb_t = Embedding(emb_type=\"temp\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)\n", + "emb_p = Embedding(emb_type=\"pos\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 39, "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", "metadata": { "tags": [] @@ -310,15 +305,15 @@ "outputs": [], "source": [ "# get sample crops from training data to pass through the network\n", - "# train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", - "train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\n", + "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", + "# train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\n", "data = SleapDataset([os.path.join(train_path,\"10-1.slp\")], [os.path.join(train_path,\"10-1.mp4\")], crop_size=64,\n", " mode=\"train\", clip_length=32, anchors=\"centroid\")" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 40, "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", "metadata": { "tags": [] @@ -334,7 +329,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 41, "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", "metadata": { "tags": [] @@ -351,6 +346,10 @@ "execution_count": 8, "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + }, "tags": [] }, "outputs": [ @@ -380,45 +379,23 @@ "plt.imshow(emb(ref_times).numpy(), aspect='auto')" ] }, - { - "cell_type": "markdown", - "id": "a972707a-51a7-45ff-987e-80ee0dea4752", - "metadata": {}, - "source": [ - "### Rotary Positional Embeddings" - ] - }, { "cell_type": "code", - "execution_count": 10, - "id": "5d01ca30-c642-4a50-bd5b-802711c4bb16", + "execution_count": 43, + "id": "8b17fdb7", "metadata": { "tags": [] }, "outputs": [], - "source": [ - "import torchtune.modules as tune\n", - "from dreem.models.transformer import TransformerEncoderLayer\n", - "from dreem.models import VisualEncoder\n", - "from dreem.models import GlobalTrackingTransformer" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "8b17fdb7", - "metadata": {}, - "outputs": [], "source": [ "feat_dim = 1024\n", "xfmr_encoder = TransformerEncoderLayer(d_model=feat_dim, nhead=8)\n", - "visual_encoder = VisualEncoder(d_model=feat_dim, model_name=\"resnet18\")\n", - "rope = tune.RotaryPositionalEmbeddings(16,32,10000)" + "visual_encoder = VisualEncoder(d_model=feat_dim, model_name=\"resnet18\")" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 44, "id": "7999fcef-953b-42cf-927c-f3b617f68157", "metadata": { "tags": [] @@ -461,29 +438,129 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 92, + "id": "e299e8a0-61eb-4eee-901c-49aa7e678b3b", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# partial forward pass of the transformer - up until the encoder\n", + "\n", + "def prepare_for_xfmr(ref_instances):\n", + " # extract visual encoder features from instance object; shape=(1,n_instances,d=1024)\n", + " ref_features = torch.cat(\n", + " [instance.features for instance in ref_instances], dim=0\n", + " ).unsqueeze(0)\n", + "\n", + " # window_length = len(frames)\n", + " # instances_per_frame = [frame.num_detected for frame in frames]\n", + " total_instances = len(ref_instances)\n", + " embed_dim = ref_features.shape[-1]\n", + " # print(f'T: {window_length}; N: {total_instances}; N_t: {instances_per_frame} n_reid: {reid_features.shape}')\n", + " ref_boxes = get_boxes(ref_instances) # (n_instances,1,4)\n", + " ref_boxes = torch.nan_to_num(ref_boxes, -1.0)\n", + " ref_times, query_times = get_times(ref_instances, query_instances=None)\n", + "\n", + " # clip length \n", + " window_length = len(ref_times.unique())\n", + "\n", + " # computes the temporal embedding vector for each instance\n", + " ref_temp_emb = emb_t(ref_times)\n", + " # computes the positional embedding vector for each instance\n", + " ref_pos_emb = emb_p(ref_boxes)\n", + "\n", + " return_embedding=False\n", + " if return_embedding:\n", + " for i, instance in enumerate(ref_instances):\n", + " instance.add_embedding(\"pos\", ref_pos_emb[i])\n", + " instance.add_embedding(\"temp\", ref_temp_emb[i])\n", + "\n", + " # we need a single vector so average the temporal and spatial embeddings\n", + " ref_emb = (ref_pos_emb + ref_temp_emb) / 2.0\n", + "\n", + " # add a new dim at the beginning to represent the batch size (in our case 1)\n", + " ref_emb = ref_emb.view(1, total_instances, embed_dim)\n", + "\n", + " ref_emb = ref_emb.permute(1, 0, 2) # (total_instances, batch_size, embed_dim)\n", + "\n", + " batch_size, total_instances, embed_dim = ref_features.shape\n", + "\n", + " ref_features = ref_features.permute(\n", + " 1, 0, 2\n", + " ) # (total_instances, batch_size, embed_dim); note batch_size = 1\n", + "\n", + " return ref_features" + ] + }, + { + "cell_type": "code", + "execution_count": 45, "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", - "metadata": {}, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# pass instances through visual encoder to get the feature vector (q,k,v)\n", + "extract_features(ref_instances, visual_encoder)" + ] + }, + { + "cell_type": "code", + "execution_count": 123, + "id": "f0823cf1-2a35-4920-a62e-896bd9dbb078", + "metadata": { + "tags": [] + }, "outputs": [ { - "ename": "AttributeError", - "evalue": "'Tensor' object has no attribute 'has_crop'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[14], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# pass instances through visual encoder to get the feature vector (q,k,v)\u001b[39;00m\n\u001b[0;32m----> 2\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[43mextract_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43mref_times\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvisual_encoder\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# pass through fixed positional embedding (just to test output)\u001b[39;00m\n", - "Cell \u001b[0;32mIn[12], line 14\u001b[0m, in \u001b[0;36mextract_features\u001b[0;34m(instances, visual_encoder, force_recompute)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Extract features from instances using visual encoder backbone.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \n\u001b[1;32m 8\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124;03m force_recompute: indicate whether to compute features for all instances regardless of if they have instances\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m force_recompute:\n\u001b[0;32m---> 14\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m \u001b[43m[\u001b[49m\n\u001b[1;32m 15\u001b[0m \u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\n\u001b[1;32m 16\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstances\u001b[49m\n\u001b[1;32m 17\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_crop\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mand\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mnot\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_features\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 18\u001b[0m \u001b[43m \u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 20\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m instances\n", - "Cell \u001b[0;32mIn[12], line 17\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Extract features from instances using visual encoder backbone.\u001b[39;00m\n\u001b[1;32m 7\u001b[0m \n\u001b[1;32m 8\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;124;03m force_recompute: indicate whether to compute features for all instances regardless of if they have instances\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m force_recompute:\n\u001b[1;32m 14\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 15\u001b[0m instance\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m instance \u001b[38;5;129;01min\u001b[39;00m instances\n\u001b[0;32m---> 17\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43minstance\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhas_crop\u001b[49m() \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m instance\u001b[38;5;241m.\u001b[39mhas_features()\n\u001b[1;32m 18\u001b[0m ]\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 20\u001b[0m instances_to_compute \u001b[38;5;241m=\u001b[39m instances\n", - "\u001b[0;31mAttributeError\u001b[0m: 'Tensor' object has no attribute 'has_crop'" - ] + "data": { + "text/plain": [ + "torch.Size([1, 491, 1, 1024])" + ] + }, + "execution_count": 123, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "# pass instances through visual encoder to get the feature vector (q,k,v)\n", - "x = extract_features(ref_instances, visual_encoder)\n", - "\n", - "# pass through fixed positional embedding (just to test output)" + "# prepare data and apply rope\n", + "rope = tune.RotaryPositionalEmbeddings(feat_dim)\n", + "\n", + "ref_features = torch.cat(\n", + " [instance.features for instance in ref_instances], dim=0\n", + " ).unsqueeze(0)\n", + "\n", + "# input must be of shape (num_batches, num_instances, num_attn_heads, d_model)\n", + "# use num_heads=1 to use torch ROPE; we pass this into torch multiheadattn later which doesn't \n", + "# use num_heads in the input data\n", + "ref_features = torch.unsqueeze(ref_features, 2)\n", + "rope_ref_feat = rope(ref_features)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 93, + "id": "48894fba-2ffc-4f5a-aceb-26b711b7b51f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "encoder_queries = prepare_for_xfmr(ref_instances)\n", + "encoder_features = xfmr_encoder(\n", + " encoder_queries, pos_emb=ref_emb\n", + ") # (total_instances, batch_size, embed_dim)" + ] + }, + { + "cell_type": "markdown", + "id": "a972707a-51a7-45ff-987e-80ee0dea4752", + "metadata": {}, + "source": [ + "### Rotary Positional Embeddings" ] } ], @@ -491,7 +568,7 @@ "kernelspec": { "display_name": "dreem", "language": "python", - "name": "python3" + "name": "dreem" }, "language_info": { "codemirror_mode": { From 4d27914c7105bc313c379ab8c9a02dbeb47d7238 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Sun, 4 Aug 2024 22:22:11 -0700 Subject: [PATCH 36/63] minor changes - add batch job file to repo --- dreem/models/embedding.py | 7 +------ rope.ipynb | 18 ++++++++-------- run_batch_job.py | 44 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 15 deletions(-) create mode 100644 run_batch_job.py diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 8f439d1..9edb94d 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -92,8 +92,6 @@ def forward(self, x: Tensor, *, input_pos: Optional[Tensor] = None) -> Tensor: - n_h: num heads - h_d: head dim - TODO: The implementation below can be made more efficient - for inference. """ # input tensor has shape [b, s, n_h, h_d] seq_len = x.size(1) @@ -116,10 +114,7 @@ def forward(self, x: Tensor, *, input_pos: Optional[Tensor] = None) -> Tensor: return rope_cache - - - - + class Embedding(torch.nn.Module): """Class that wraps around different embedding types. diff --git a/rope.ipynb b/rope.ipynb index ee920e4..d5a3120 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -502,10 +502,18 @@ }, "outputs": [], "source": [ - "# pass instances through visual encoder to get the feature vector (q,k,v)\n", + "# pass instances through visual encoder to get the feature vector (q,k,v); modifies the feature attribute of each Instance in ref_instances\n", "extract_features(ref_instances, visual_encoder)" ] }, + { + "cell_type": "markdown", + "id": "a972707a-51a7-45ff-987e-80ee0dea4752", + "metadata": {}, + "source": [ + "### Rotary Positional Embeddings" + ] + }, { "cell_type": "code", "execution_count": 123, @@ -554,14 +562,6 @@ " encoder_queries, pos_emb=ref_emb\n", ") # (total_instances, batch_size, embed_dim)" ] - }, - { - "cell_type": "markdown", - "id": "a972707a-51a7-45ff-987e-80ee0dea4752", - "metadata": {}, - "source": [ - "### Rotary Positional Embeddings" - ] } ], "metadata": { diff --git a/run_batch_job.py b/run_batch_job.py new file mode 100644 index 0000000..52345da --- /dev/null +++ b/run_batch_job.py @@ -0,0 +1,44 @@ +import os +import subprocess as sp + +gpu = "0.1" +job_name = "mustafa-test-batch-job" + +base = "/home/runner/talmodata-smb/aadi/biogtr_expts/run/animal/eight_flies" #where to run the job from +dreem_repo = base.replace("biogtr_expts/run/animal/eight_flies", "dreem") #where the dreem repo is stored + +config_dir=os.path.join(base, "configs") #where to find the configs +config_name= "base" #base config name +params_cfg = os.path.join(config_dir, "sample_efficiency.yaml") #override config +# if running just 1 job, comment this line out and delete the ++batch_config command in the command below +task_csv = os.path.join(config_dir, "sample_efficiency.csv") # csv for tasks - each pod is a task + +pods = 1 # total number of tasks for job to run; should be number of rows in csv file +par = 1 # number of tasks that can be run in parallel - max. = # of pods + +cmd = [ + "runai", + "submit", + "--gpu", + gpu, + "--name", + job_name, + "--preemptible", + "-i", + "asheridan/biogtr", + "-v", + "/data/talmolab-smb:/home/runner/talmodata-smb", + "-e", + f"RUNNER_CMD=cp -r {dreem_repo} ~ && mamba env create -n dreem -f ~/dreem/environment.yml && export WANDB_API_KEY=6cc5012a6ecfb9cd970bd07686dbfcefd3190a04 && cd {base} && conda run -n dreem dreem-train --config-dir={config_dir} --config-name={config_name} ++params_config={params_cfg} ++batch_config={task_csv}", + "--parallelism", + str(par), + "--completions", + str(pods), +] + +print(f"base directory: {base}") +print(f"running with {pods} pods") +print(f"max pods that can run concurrently: {par}") +print(f"runner command: {cmd}") + +sp.run(cmd) \ No newline at end of file From be5e630791a893b0eff0161baea41903a58e9395 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 5 Aug 2024 14:34:35 -0700 Subject: [PATCH 37/63] add local train run script, minor changes --- dreem/training/configs/base.yaml | 16 +++--- rope.ipynb | 97 ++++++++++++++++---------------- run_trainer.py | 10 ++++ 3 files changed, 68 insertions(+), 55 deletions(-) create mode 100644 run_trainer.py diff --git a/dreem/training/configs/base.yaml b/dreem/training/configs/base.yaml index 7779cd1..f9ed413 100644 --- a/dreem/training/configs/base.yaml +++ b/dreem/training/configs/base.yaml @@ -66,24 +66,24 @@ runner: dataset: train_dataset: - slp_files: ["../../tests/data/sleap/two_flies.slp"] - video_files: ["../../tests/data/sleap/two_flies.mp4"] + slp_files: ["tests/data/sleap/two_flies.slp"] + video_files: ["tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: true clip_length: 32 val_dataset: - slp_files: ["../../tests/data/sleap/two_flies.slp"] - video_files: ["../../tests/data/sleap/two_flies.mp4"] + slp_files: ["tests/data/sleap/two_flies.slp"] + video_files: ["tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: True clip_length: 32 test_dataset: - slp_files: ["../../tests/data/sleap/two_flies.slp"] - video_files: ["../../tests/data/sleap/two_flies.mp4"] + slp_files: ["tests/data/sleap/two_flies.slp"] + video_files: ["tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: True @@ -137,8 +137,8 @@ trainer: limit_test_batches: 1.0 limit_val_batches: 1.0 log_every_n_steps: 1 - max_epochs: 100 - min_epochs: 10 + max_epochs: 1 + min_epochs: 1 view_batch: enable: False diff --git a/rope.ipynb b/rope.ipynb index d5a3120..8213213 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 35, + "execution_count": 4, "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", "metadata": { "tags": [] @@ -19,15 +19,14 @@ "from dreem.models.mlp import MLP\n", "from dreem.models.model_utils import *\n", "from dreem.datasets import SleapDataset\n", - "import torchtune.modules as tune\n", - "from dreem.models.transformer import TransformerEncoderLayer\n", + "from dreem.models.transformer import *\n", "from dreem.models import VisualEncoder\n", "from dreem.models import GlobalTrackingTransformer" ] }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 5, "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", "metadata": { "tags": [] @@ -283,7 +282,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 6, "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", "metadata": { "tags": [] @@ -297,12 +296,35 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 7, "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", "metadata": { "tags": [] }, - "outputs": [], + "outputs": [ + { + "ename": "FileNotFoundError", + "evalue": "[Errno 2] Unable to open file (unable to open file: name = '/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train/10-1.slp', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[7], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m train_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\u001b[39;00m\n\u001b[0;32m----> 4\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[43mSleapDataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_path\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m10-1.slp\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_path\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m10-1.mp4\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcrop_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m64\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclip_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m32\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43manchors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcentroid\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/Documents/GitHub/dreem/dreem/datasets/sleap_dataset.py:108\u001b[0m, in \u001b[0;36mSleapDataset.__init__\u001b[0;34m(self, slp_files, video_files, padding, crop_size, anchors, chunk, clip_length, mode, handle_missing, augmentations, n_chunks, seed, verbose)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m=\u001b[39m verbose\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# if self.seed is not None:\u001b[39;00m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;66;03m# np.random.seed(self.seed)\u001b[39;00m\n\u001b[0;32m--> 108\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabels \u001b[38;5;241m=\u001b[39m \u001b[43m[\u001b[49m\u001b[43msio\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_slp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mslp_files\u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvideos \u001b[38;5;241m=\u001b[39m [imageio\u001b[38;5;241m.\u001b[39mget_reader(vid_file) \u001b[38;5;28;01mfor\u001b[39;00m vid_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvid_files]\n\u001b[1;32m 110\u001b[0m \u001b[38;5;66;03m# do we need this? would need to update with sleap-io\u001b[39;00m\n\u001b[1;32m 111\u001b[0m \n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# for label in self.labels:\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;66;03m# label.remove_empty_instances(keep_empty_frames=False)\u001b[39;00m\n", + "File \u001b[0;32m~/Documents/GitHub/dreem/dreem/datasets/sleap_dataset.py:108\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m=\u001b[39m verbose\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# if self.seed is not None:\u001b[39;00m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;66;03m# np.random.seed(self.seed)\u001b[39;00m\n\u001b[0;32m--> 108\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabels \u001b[38;5;241m=\u001b[39m [\u001b[43msio\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_slp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m slp_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mslp_files]\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvideos \u001b[38;5;241m=\u001b[39m [imageio\u001b[38;5;241m.\u001b[39mget_reader(vid_file) \u001b[38;5;28;01mfor\u001b[39;00m vid_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvid_files]\n\u001b[1;32m 110\u001b[0m \u001b[38;5;66;03m# do we need this? would need to update with sleap-io\u001b[39;00m\n\u001b[1;32m 111\u001b[0m \n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# for label in self.labels:\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;66;03m# label.remove_empty_instances(keep_empty_frames=False)\u001b[39;00m\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/main.py:19\u001b[0m, in \u001b[0;36mload_slp\u001b[0;34m(filename)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mload_slp\u001b[39m(filename: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Labels:\n\u001b[1;32m 11\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Load a SLEAP dataset.\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \n\u001b[1;32m 13\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[38;5;124;03m The dataset as a `Labels` object.\u001b[39;00m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 19\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mslp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread_labels\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/slp.py:1011\u001b[0m, in \u001b[0;36mread_labels\u001b[0;34m(labels_path)\u001b[0m\n\u001b[1;32m 1002\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_labels\u001b[39m(labels_path: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Labels:\n\u001b[1;32m 1003\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read a SLEAP labels file.\u001b[39;00m\n\u001b[1;32m 1004\u001b[0m \n\u001b[1;32m 1005\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1009\u001b[0m \u001b[38;5;124;03m The processed `Labels` object.\u001b[39;00m\n\u001b[1;32m 1010\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m-> 1011\u001b[0m tracks \u001b[38;5;241m=\u001b[39m \u001b[43mread_tracks\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabels_path\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1012\u001b[0m videos \u001b[38;5;241m=\u001b[39m read_videos(labels_path)\n\u001b[1;32m 1013\u001b[0m skeletons \u001b[38;5;241m=\u001b[39m read_skeletons(labels_path)\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/slp.py:448\u001b[0m, in \u001b[0;36mread_tracks\u001b[0;34m(labels_path)\u001b[0m\n\u001b[1;32m 439\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_tracks\u001b[39m(labels_path: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mlist\u001b[39m[Track]:\n\u001b[1;32m 440\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read `Track` dataset in a SLEAP labels file.\u001b[39;00m\n\u001b[1;32m 441\u001b[0m \n\u001b[1;32m 442\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 446\u001b[0m \u001b[38;5;124;03m A list of `Track` objects.\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 448\u001b[0m tracks \u001b[38;5;241m=\u001b[39m [json\u001b[38;5;241m.\u001b[39mloads(x) \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m \u001b[43mread_hdf5_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabels_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtracks_json\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m]\n\u001b[1;32m 449\u001b[0m track_objects \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m track \u001b[38;5;129;01min\u001b[39;00m tracks:\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/utils.py:21\u001b[0m, in \u001b[0;36mread_hdf5_dataset\u001b[0;34m(filename, dataset)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_hdf5_dataset\u001b[39m(filename: \u001b[38;5;28mstr\u001b[39m, dataset: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m np\u001b[38;5;241m.\u001b[39mndarray:\n\u001b[1;32m 12\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read data from an HDF5 file.\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \n\u001b[1;32m 14\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;124;03m The data as an array.\u001b[39;00m\n\u001b[1;32m 20\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 21\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[43mh5py\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mFile\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 22\u001b[0m data \u001b[38;5;241m=\u001b[39m f[dataset][()]\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m data\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/h5py/_hl/files.py:562\u001b[0m, in \u001b[0;36mFile.__init__\u001b[0;34m(self, name, mode, driver, libver, userblock_size, swmr, rdcc_nslots, rdcc_nbytes, rdcc_w0, track_order, fs_strategy, fs_persist, fs_threshold, fs_page_size, page_buf_size, min_meta_keep, min_raw_keep, locking, alignment_threshold, alignment_interval, meta_block_size, **kwds)\u001b[0m\n\u001b[1;32m 553\u001b[0m fapl \u001b[38;5;241m=\u001b[39m make_fapl(driver, libver, rdcc_nslots, rdcc_nbytes, rdcc_w0,\n\u001b[1;32m 554\u001b[0m locking, page_buf_size, min_meta_keep, min_raw_keep,\n\u001b[1;32m 555\u001b[0m alignment_threshold\u001b[38;5;241m=\u001b[39malignment_threshold,\n\u001b[1;32m 556\u001b[0m alignment_interval\u001b[38;5;241m=\u001b[39malignment_interval,\n\u001b[1;32m 557\u001b[0m meta_block_size\u001b[38;5;241m=\u001b[39mmeta_block_size,\n\u001b[1;32m 558\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwds)\n\u001b[1;32m 559\u001b[0m fcpl \u001b[38;5;241m=\u001b[39m make_fcpl(track_order\u001b[38;5;241m=\u001b[39mtrack_order, fs_strategy\u001b[38;5;241m=\u001b[39mfs_strategy,\n\u001b[1;32m 560\u001b[0m fs_persist\u001b[38;5;241m=\u001b[39mfs_persist, fs_threshold\u001b[38;5;241m=\u001b[39mfs_threshold,\n\u001b[1;32m 561\u001b[0m fs_page_size\u001b[38;5;241m=\u001b[39mfs_page_size)\n\u001b[0;32m--> 562\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mmake_fid\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43muserblock_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfcpl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mswmr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mswmr\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 564\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(libver, \u001b[38;5;28mtuple\u001b[39m):\n\u001b[1;32m 565\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_libver \u001b[38;5;241m=\u001b[39m libver\n", + "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/h5py/_hl/files.py:235\u001b[0m, in \u001b[0;36mmake_fid\u001b[0;34m(name, mode, userblock_size, fapl, fcpl, swmr)\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m swmr \u001b[38;5;129;01mand\u001b[39;00m swmr_support:\n\u001b[1;32m 234\u001b[0m flags \u001b[38;5;241m|\u001b[39m\u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mACC_SWMR_READ\n\u001b[0;32m--> 235\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mh5f\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mopen\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mflags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfapl\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m mode \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mr+\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 237\u001b[0m fid \u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mopen(name, h5f\u001b[38;5;241m.\u001b[39mACC_RDWR, fapl\u001b[38;5;241m=\u001b[39mfapl)\n", + "File \u001b[0;32mh5py/_objects.pyx:54\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32mh5py/_objects.pyx:55\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n", + "File \u001b[0;32mh5py/h5f.pyx:102\u001b[0m, in \u001b[0;36mh5py.h5f.open\u001b[0;34m()\u001b[0m\n", + "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] Unable to open file (unable to open file: name = '/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train/10-1.slp', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)" + ] + } + ], "source": [ "# get sample crops from training data to pass through the network\n", "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", @@ -313,7 +335,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", "metadata": { "tags": [] @@ -329,7 +351,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": null, "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", "metadata": { "tags": [] @@ -343,7 +365,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", "metadata": { "collapsed": true, @@ -381,7 +403,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": null, "id": "8b17fdb7", "metadata": { "tags": [] @@ -395,7 +417,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": null, "id": "7999fcef-953b-42cf-927c-f3b617f68157", "metadata": { "tags": [] @@ -438,7 +460,7 @@ }, { "cell_type": "code", - "execution_count": 92, + "execution_count": null, "id": "e299e8a0-61eb-4eee-901c-49aa7e678b3b", "metadata": { "tags": [] @@ -495,7 +517,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": null, "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", "metadata": { "tags": [] @@ -516,51 +538,32 @@ }, { "cell_type": "code", - "execution_count": 123, + "execution_count": null, "id": "f0823cf1-2a35-4920-a62e-896bd9dbb078", "metadata": { "tags": [] }, "outputs": [ { - "data": { - "text/plain": [ - "torch.Size([1, 491, 1, 1024])" - ] - }, - "execution_count": 123, - "metadata": {}, - "output_type": "execute_result" + "ename": "NameError", + "evalue": "name 'ref_instances' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[3], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# input data for transformer\u001b[39;00m\n\u001b[1;32m 2\u001b[0m ref_features \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mcat(\n\u001b[0;32m----> 3\u001b[0m [instance\u001b[38;5;241m.\u001b[39mfeatures \u001b[38;5;28;01mfor\u001b[39;00m instance \u001b[38;5;129;01min\u001b[39;00m \u001b[43mref_instances\u001b[49m], dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 4\u001b[0m )\u001b[38;5;241m.\u001b[39munsqueeze(\u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# create transformer instance to test embeddings \u001b[39;00m\n\u001b[1;32m 7\u001b[0m tfmr \u001b[38;5;241m=\u001b[39m Transformer()\n", + "\u001b[0;31mNameError\u001b[0m: name 'ref_instances' is not defined" + ] } ], "source": [ - "# prepare data and apply rope\n", - "rope = tune.RotaryPositionalEmbeddings(feat_dim)\n", - "\n", + "# input data for transformer\n", "ref_features = torch.cat(\n", " [instance.features for instance in ref_instances], dim=0\n", " ).unsqueeze(0)\n", "\n", - "# input must be of shape (num_batches, num_instances, num_attn_heads, d_model)\n", - "# use num_heads=1 to use torch ROPE; we pass this into torch multiheadattn later which doesn't \n", - "# use num_heads in the input data\n", - "ref_features = torch.unsqueeze(ref_features, 2)\n", - "rope_ref_feat = rope(ref_features)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 93, - "id": "48894fba-2ffc-4f5a-aceb-26b711b7b51f", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "encoder_queries = prepare_for_xfmr(ref_instances)\n", - "encoder_features = xfmr_encoder(\n", - " encoder_queries, pos_emb=ref_emb\n", - ") # (total_instances, batch_size, embed_dim)" + "# create transformer instance to test embeddings \n", + "tfmr = Transformer()\n" ] } ], @@ -568,7 +571,7 @@ "kernelspec": { "display_name": "dreem", "language": "python", - "name": "dreem" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/run_trainer.py b/run_trainer.py new file mode 100644 index 0000000..c69d93f --- /dev/null +++ b/run_trainer.py @@ -0,0 +1,10 @@ +from dreem.training import train +from omegaconf import OmegaConf + +base_config = "dreem/training/configs/base.yaml" +# params_config = "/path/to/override.yaml" + +cfg = OmegaConf.load(base_config) +# cfg["params_config"] = params_config + +train.run(cfg) \ No newline at end of file From dba9f080187bc967e84da4b5b9ae805670e7000a Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 5 Aug 2024 15:28:39 -0700 Subject: [PATCH 38/63] Update rope.ipynb --- rope.ipynb | 127 ++++++++++++++++++++++++++++------------------------- 1 file changed, 67 insertions(+), 60 deletions(-) diff --git a/rope.ipynb b/rope.ipynb index 8213213..593439b 100644 --- a/rope.ipynb +++ b/rope.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 4, + "execution_count": 156, "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", "metadata": { "tags": [] @@ -21,12 +21,13 @@ "from dreem.datasets import SleapDataset\n", "from dreem.models.transformer import *\n", "from dreem.models import VisualEncoder\n", - "from dreem.models import GlobalTrackingTransformer" + "from dreem.models import GlobalTrackingTransformer\n", + "from dreem.models.gtr_runner import GTRRunner" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 130, "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", "metadata": { "tags": [] @@ -282,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 131, "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", "metadata": { "tags": [] @@ -296,35 +297,12 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 132, "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", "metadata": { "tags": [] }, - "outputs": [ - { - "ename": "FileNotFoundError", - "evalue": "[Errno 2] Unable to open file (unable to open file: name = '/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train/10-1.slp', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[7], line 4\u001b[0m\n\u001b[1;32m 2\u001b[0m train_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\u001b[39;00m\n\u001b[0;32m----> 4\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[43mSleapDataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_path\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m10-1.slp\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpath\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_path\u001b[49m\u001b[43m,\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m10-1.mp4\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcrop_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m64\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtrain\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mclip_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m32\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43manchors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcentroid\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/Documents/GitHub/dreem/dreem/datasets/sleap_dataset.py:108\u001b[0m, in \u001b[0;36mSleapDataset.__init__\u001b[0;34m(self, slp_files, video_files, padding, crop_size, anchors, chunk, clip_length, mode, handle_missing, augmentations, n_chunks, seed, verbose)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m=\u001b[39m verbose\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# if self.seed is not None:\u001b[39;00m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;66;03m# np.random.seed(self.seed)\u001b[39;00m\n\u001b[0;32m--> 108\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabels \u001b[38;5;241m=\u001b[39m \u001b[43m[\u001b[49m\u001b[43msio\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_slp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mslp_files\u001b[49m\u001b[43m]\u001b[49m\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvideos \u001b[38;5;241m=\u001b[39m [imageio\u001b[38;5;241m.\u001b[39mget_reader(vid_file) \u001b[38;5;28;01mfor\u001b[39;00m vid_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvid_files]\n\u001b[1;32m 110\u001b[0m \u001b[38;5;66;03m# do we need this? would need to update with sleap-io\u001b[39;00m\n\u001b[1;32m 111\u001b[0m \n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# for label in self.labels:\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;66;03m# label.remove_empty_instances(keep_empty_frames=False)\u001b[39;00m\n", - "File \u001b[0;32m~/Documents/GitHub/dreem/dreem/datasets/sleap_dataset.py:108\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;241m=\u001b[39m verbose\n\u001b[1;32m 106\u001b[0m \u001b[38;5;66;03m# if self.seed is not None:\u001b[39;00m\n\u001b[1;32m 107\u001b[0m \u001b[38;5;66;03m# np.random.seed(self.seed)\u001b[39;00m\n\u001b[0;32m--> 108\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlabels \u001b[38;5;241m=\u001b[39m [\u001b[43msio\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_slp\u001b[49m\u001b[43m(\u001b[49m\u001b[43mslp_file\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m slp_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mslp_files]\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvideos \u001b[38;5;241m=\u001b[39m [imageio\u001b[38;5;241m.\u001b[39mget_reader(vid_file) \u001b[38;5;28;01mfor\u001b[39;00m vid_file \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mvid_files]\n\u001b[1;32m 110\u001b[0m \u001b[38;5;66;03m# do we need this? would need to update with sleap-io\u001b[39;00m\n\u001b[1;32m 111\u001b[0m \n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# for label in self.labels:\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;66;03m# label.remove_empty_instances(keep_empty_frames=False)\u001b[39;00m\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/main.py:19\u001b[0m, in \u001b[0;36mload_slp\u001b[0;34m(filename)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mload_slp\u001b[39m(filename: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Labels:\n\u001b[1;32m 11\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Load a SLEAP dataset.\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \n\u001b[1;32m 13\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[38;5;124;03m The dataset as a `Labels` object.\u001b[39;00m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 19\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mslp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mread_labels\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/slp.py:1011\u001b[0m, in \u001b[0;36mread_labels\u001b[0;34m(labels_path)\u001b[0m\n\u001b[1;32m 1002\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_labels\u001b[39m(labels_path: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Labels:\n\u001b[1;32m 1003\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read a SLEAP labels file.\u001b[39;00m\n\u001b[1;32m 1004\u001b[0m \n\u001b[1;32m 1005\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1009\u001b[0m \u001b[38;5;124;03m The processed `Labels` object.\u001b[39;00m\n\u001b[1;32m 1010\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m-> 1011\u001b[0m tracks \u001b[38;5;241m=\u001b[39m \u001b[43mread_tracks\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabels_path\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1012\u001b[0m videos \u001b[38;5;241m=\u001b[39m read_videos(labels_path)\n\u001b[1;32m 1013\u001b[0m skeletons \u001b[38;5;241m=\u001b[39m read_skeletons(labels_path)\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/slp.py:448\u001b[0m, in \u001b[0;36mread_tracks\u001b[0;34m(labels_path)\u001b[0m\n\u001b[1;32m 439\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_tracks\u001b[39m(labels_path: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mlist\u001b[39m[Track]:\n\u001b[1;32m 440\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read `Track` dataset in a SLEAP labels file.\u001b[39;00m\n\u001b[1;32m 441\u001b[0m \n\u001b[1;32m 442\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 446\u001b[0m \u001b[38;5;124;03m A list of `Track` objects.\u001b[39;00m\n\u001b[1;32m 447\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 448\u001b[0m tracks \u001b[38;5;241m=\u001b[39m [json\u001b[38;5;241m.\u001b[39mloads(x) \u001b[38;5;28;01mfor\u001b[39;00m x \u001b[38;5;129;01min\u001b[39;00m \u001b[43mread_hdf5_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabels_path\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtracks_json\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m]\n\u001b[1;32m 449\u001b[0m track_objects \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m track \u001b[38;5;129;01min\u001b[39;00m tracks:\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/sleap_io/io/utils.py:21\u001b[0m, in \u001b[0;36mread_hdf5_dataset\u001b[0;34m(filename, dataset)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_hdf5_dataset\u001b[39m(filename: \u001b[38;5;28mstr\u001b[39m, dataset: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m np\u001b[38;5;241m.\u001b[39mndarray:\n\u001b[1;32m 12\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Read data from an HDF5 file.\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \n\u001b[1;32m 14\u001b[0m \u001b[38;5;124;03m Args:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;124;03m The data as an array.\u001b[39;00m\n\u001b[1;32m 20\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m---> 21\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[43mh5py\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mFile\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 22\u001b[0m data \u001b[38;5;241m=\u001b[39m f[dataset][()]\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m data\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/h5py/_hl/files.py:562\u001b[0m, in \u001b[0;36mFile.__init__\u001b[0;34m(self, name, mode, driver, libver, userblock_size, swmr, rdcc_nslots, rdcc_nbytes, rdcc_w0, track_order, fs_strategy, fs_persist, fs_threshold, fs_page_size, page_buf_size, min_meta_keep, min_raw_keep, locking, alignment_threshold, alignment_interval, meta_block_size, **kwds)\u001b[0m\n\u001b[1;32m 553\u001b[0m fapl \u001b[38;5;241m=\u001b[39m make_fapl(driver, libver, rdcc_nslots, rdcc_nbytes, rdcc_w0,\n\u001b[1;32m 554\u001b[0m locking, page_buf_size, min_meta_keep, min_raw_keep,\n\u001b[1;32m 555\u001b[0m alignment_threshold\u001b[38;5;241m=\u001b[39malignment_threshold,\n\u001b[1;32m 556\u001b[0m alignment_interval\u001b[38;5;241m=\u001b[39malignment_interval,\n\u001b[1;32m 557\u001b[0m meta_block_size\u001b[38;5;241m=\u001b[39mmeta_block_size,\n\u001b[1;32m 558\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwds)\n\u001b[1;32m 559\u001b[0m fcpl \u001b[38;5;241m=\u001b[39m make_fcpl(track_order\u001b[38;5;241m=\u001b[39mtrack_order, fs_strategy\u001b[38;5;241m=\u001b[39mfs_strategy,\n\u001b[1;32m 560\u001b[0m fs_persist\u001b[38;5;241m=\u001b[39mfs_persist, fs_threshold\u001b[38;5;241m=\u001b[39mfs_threshold,\n\u001b[1;32m 561\u001b[0m fs_page_size\u001b[38;5;241m=\u001b[39mfs_page_size)\n\u001b[0;32m--> 562\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mmake_fid\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43muserblock_size\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfcpl\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mswmr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mswmr\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 564\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(libver, \u001b[38;5;28mtuple\u001b[39m):\n\u001b[1;32m 565\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_libver \u001b[38;5;241m=\u001b[39m libver\n", - "File \u001b[0;32m~/miniforge3/envs/dreem/lib/python3.11/site-packages/h5py/_hl/files.py:235\u001b[0m, in \u001b[0;36mmake_fid\u001b[0;34m(name, mode, userblock_size, fapl, fcpl, swmr)\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m swmr \u001b[38;5;129;01mand\u001b[39;00m swmr_support:\n\u001b[1;32m 234\u001b[0m flags \u001b[38;5;241m|\u001b[39m\u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mACC_SWMR_READ\n\u001b[0;32m--> 235\u001b[0m fid \u001b[38;5;241m=\u001b[39m \u001b[43mh5f\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mopen\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mflags\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfapl\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfapl\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m mode \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mr+\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 237\u001b[0m fid \u001b[38;5;241m=\u001b[39m h5f\u001b[38;5;241m.\u001b[39mopen(name, h5f\u001b[38;5;241m.\u001b[39mACC_RDWR, fapl\u001b[38;5;241m=\u001b[39mfapl)\n", - "File \u001b[0;32mh5py/_objects.pyx:54\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32mh5py/_objects.pyx:55\u001b[0m, in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[0;34m()\u001b[0m\n", - "File \u001b[0;32mh5py/h5f.pyx:102\u001b[0m, in \u001b[0;36mh5py.h5f.open\u001b[0;34m()\u001b[0m\n", - "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] Unable to open file (unable to open file: name = '/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train/10-1.slp', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)" - ] - } - ], + "outputs": [], "source": [ "# get sample crops from training data to pass through the network\n", "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", @@ -335,14 +313,14 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 133, "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", "metadata": { "tags": [] }, "outputs": [], "source": [ - "# get a list of all instances; this is the format that the model pipeline uses as input data\n", + "# get a list of all instances in the first clip; this is the format that the model pipeline uses as input data\n", "ref_instances = []\n", "for frame in data[0]:\n", " for instance in frame.instances:\n", @@ -351,7 +329,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 134, "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", "metadata": { "tags": [] @@ -365,7 +343,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 135, "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", "metadata": { "collapsed": true, @@ -378,10 +356,10 @@ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 8, + "execution_count": 135, "metadata": {}, "output_type": "execute_result" }, @@ -403,7 +381,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 136, "id": "8b17fdb7", "metadata": { "tags": [] @@ -417,7 +395,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 137, "id": "7999fcef-953b-42cf-927c-f3b617f68157", "metadata": { "tags": [] @@ -460,7 +438,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 138, "id": "e299e8a0-61eb-4eee-901c-49aa7e678b3b", "metadata": { "tags": [] @@ -517,7 +495,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 139, "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", "metadata": { "tags": [] @@ -538,32 +516,61 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 140, "id": "f0823cf1-2a35-4920-a62e-896bd9dbb078", "metadata": { "tags": [] }, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'ref_instances' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[3], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# input data for transformer\u001b[39;00m\n\u001b[1;32m 2\u001b[0m ref_features \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mcat(\n\u001b[0;32m----> 3\u001b[0m [instance\u001b[38;5;241m.\u001b[39mfeatures \u001b[38;5;28;01mfor\u001b[39;00m instance \u001b[38;5;129;01min\u001b[39;00m \u001b[43mref_instances\u001b[49m], dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 4\u001b[0m )\u001b[38;5;241m.\u001b[39munsqueeze(\u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# create transformer instance to test embeddings \u001b[39;00m\n\u001b[1;32m 7\u001b[0m tfmr \u001b[38;5;241m=\u001b[39m Transformer()\n", - "\u001b[0;31mNameError\u001b[0m: name 'ref_instances' is not defined" - ] - } - ], + "outputs": [], "source": [ - "# input data for transformer\n", - "ref_features = torch.cat(\n", - " [instance.features for instance in ref_instances], dim=0\n", - " ).unsqueeze(0)\n", - "\n", "# create transformer instance to test embeddings \n", - "tfmr = Transformer()\n" + "tfmr = Transformer()" + ] + }, + { + "cell_type": "code", + "execution_count": 143, + "id": "5e0b9d31-34be-40f8-91dc-b91d59aee170", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "assoc = tfmr(ref_instances)" + ] + }, + { + "cell_type": "code", + "execution_count": 157, + "id": "9f29ca35-9ff2-4e9a-bba0-37a3a14ad522", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "gtr = GTRRunner()" + ] + }, + { + "cell_type": "code", + "execution_count": 160, + "id": "0aa3876a-6246-4d02-80a5-013d382f6d38", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "metrics = gtr._shared_eval_step(data[0],\"train\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "aee0d129-83f2-4f76-b452-132391554b4c", + "metadata": {}, + "outputs": [], + "source": [ + "metrics" ] } ], @@ -571,7 +578,7 @@ "kernelspec": { "display_name": "dreem", "language": "python", - "name": "python3" + "name": "dreem" }, "language_info": { "codemirror_mode": { From 0dd6a60a279c07141d24fdfe9cd92dac7d7b1735 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Tue, 6 Aug 2024 12:32:46 -0700 Subject: [PATCH 39/63] refactor transformer encoder - add support for stack/avg/concatenate - move embedding processing out of transformer and into encoder --- dreem/io/config.py | 2 + dreem/models/embedding.py | 1 + dreem/models/transformer.py | 102 +++++++++++++++++++++---------- dreem/training/configs/base.yaml | 19 +++--- dreem/training/train.py | 2 + run_trainer.py | 4 +- 6 files changed, 90 insertions(+), 40 deletions(-) diff --git a/dreem/io/config.py b/dreem/io/config.py index 36d53df..3be43c5 100644 --- a/dreem/io/config.py +++ b/dreem/io/config.py @@ -40,6 +40,8 @@ def __init__(self, cfg: DictConfig, params_cfg: DictConfig | None = None): else: self.cfg = cfg + OmegaConf.set_struct(self.cfg, False) + def __repr__(self): """Object representation of config class.""" return f"Config({self.cfg})" diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 9edb94d..56dbda8 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -221,6 +221,7 @@ def __init__( self._emb_func = self._sine_temp_embedding elif self.mode == "rope": + # TODO: pos/temp uses the same processing but takes the input differently self._emb_func = self._rope_embedding diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 13e4529..6139b43 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -19,7 +19,7 @@ import copy import torch import torch.nn.functional as F -from typing import List +from typing import Dict, Tuple # todo: add named tensors # todo: add flash attention @@ -161,7 +161,6 @@ def forward( [instance.features for instance in ref_instances], dim=0 ).unsqueeze(0) - # window_length = len(frames) # instances_per_frame = [frame.num_detected for frame in frames] total_instances = len(ref_instances) embed_dim = ref_features.shape[-1] @@ -170,23 +169,6 @@ def forward( ref_boxes = torch.nan_to_num(ref_boxes, -1.0) ref_times, query_times = get_times(ref_instances, query_instances) - window_length = len(ref_times.unique()) - - ref_temp_emb = self.temp_emb(ref_times) - - ref_pos_emb = self.pos_emb(ref_boxes) - - if self.return_embedding: - for i, instance in enumerate(ref_instances): - instance.add_embedding("pos", ref_pos_emb[i]) - instance.add_embedding("temp", ref_temp_emb[i]) - - ref_emb = (ref_pos_emb + ref_temp_emb) / 2.0 - - ref_emb = ref_emb.view(1, total_instances, embed_dim) - - ref_emb = ref_emb.permute(1, 0, 2) # (total_instances, batch_size, embed_dim) - batch_size, total_instances, embed_dim = ref_features.shape ref_features = ref_features.permute( @@ -195,10 +177,20 @@ def forward( encoder_queries = ref_features - encoder_features = self.encoder( - encoder_queries, pos_emb=ref_emb + encoder_features, ref_pos_emb, ref_temp_emb = self.encoder( + encoder_queries, + embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + ref_boxes=ref_boxes, + ref_times=ref_times, + embedding_agg_method=self.embedding_meta["embedding_agg_method"] ) # (total_instances, batch_size, embed_dim) + # TODO: check if instance.add_embedding() supports rotation matrices + if self.return_embedding: + for i, instance in enumerate(ref_instances): + instance.add_embedding("pos", ref_pos_emb[i]) + instance.add_embedding("temp", ref_temp_emb[i]) + n_query = total_instances query_features = ref_features @@ -299,13 +291,13 @@ def __init__( self.activation = _get_activation_fn(activation) def forward( - self, queries: torch.Tensor, embeddings : List[Embedding] + self, queries: torch.Tensor ) -> torch.Tensor: """Execute a forward pass of the encoder layer. Args: - queries: Input sequence for encoder (n_query, batch_size, embed_dim); transformed with embedding - pos_emb: Position embedding, if provided is added to src + queries: Input sequence for encoder (n_query, batch_size, embed_dim); + data is already transformed with embedding Returns: The output tensor of shape (n_query, batch_size, embed_dim). @@ -461,24 +453,71 @@ def __init__( self.norm = norm if norm is not None else nn.Identity() def forward( - self, queries: torch.Tensor, pos_emb: torch.Tensor = None - ) -> torch.Tensor: + self, queries: torch.Tensor, embedding_map: Dict[str, Embedding], + ref_boxes: torch.Tensor, ref_times: torch.Tensor, + embedding_agg_method: str + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Execute a forward pass of encoder layer. Args: queries: The input tensor of shape (n_query, batch_size, embed_dim). - pos_emb: The positional embedding tensor of shape (n_query, embed_dim). + embedding_map: Dict of Embedding objects defining the pos/temp embeddings to be applied to + the input data before it passes to the EncoderLayer + ref_boxes: + ref_times: + embedding_agg_method: Returns: The output tensor of shape (n_query, batch_size, embed_dim). """ + for layer in self.layers: - # TODO: add embedding object call - # TODO: add the embedding object into the argument list to the forward() call - queries = layer(queries, pos_emb=pos_emb) + pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] + # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings + queries = queries.permute(1,0,2) + # queries is now of shape (batch_size, n_query, embed_dim) + # calculate temporal embeddings and transform queries + queries_t, ref_temp_emb = temp_emb(queries, ref_times) + # if avg temp and pos, need bounding boxes + if embedding_agg_method == "average": + _, ref_pos_emb = pos_emb(queries, ref_boxes) + ref_emb = (ref_pos_emb + ref_temp_emb) / 2 + queries = queries + ref_emb + queries = queries.permute(1, 0, 2) + else: + # todo: input for x,y should be different (not ref_boxes) + # just extract the x,y coordinates from ref_boxes? + # calculate spatial embedding for x, y separately + queries_x, ref_pos_emb = pos_emb(queries, ref_x) + queries_y, ref_pos_emb = pos_emb(queries, ref_y) + + # concatenate, stack, or average the queries + queries = self.collate_queries( + (queries, queries_t, queries_x, queries_y), + embedding_agg_method) + + # todo: encoderLayer needs to be made compatible with stack/concatenate; + # need to pass in embedding_agg_method + queries = layer(queries) encoder_features = self.norm(queries) - return encoder_features + + return encoder_features, ref_pos_emb, ref_temp_emb + + def collate_queries(self, _queries: Tuple[torch.Tensor], embedding_agg_method + ) -> torch.Tensor: + queries, queries_t, queries_x, queries_y = _queries + + if embedding_agg_method == "average": + return queries + elif embedding_agg_method == "stack": + # stacked of shape (3, batch_size, n_query, embed_dim) + stacked = torch.stack((queries_t, queries_x, queries_y), dim=-1) + # transpose for input to EncoderLayer + return stacked.permute(0, 2, 1, 3) + elif embedding_agg_method == "concatenate": + # todo: complete this, and transpose output + return class TransformerDecoder(nn.Module): @@ -577,3 +616,4 @@ def _get_activation_fn(activation: str) -> callable: if activation == "glu": return F.glu raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") + diff --git a/dreem/training/configs/base.yaml b/dreem/training/configs/base.yaml index f9ed413..6a507cc 100644 --- a/dreem/training/configs/base.yaml +++ b/dreem/training/configs/base.yaml @@ -16,10 +16,11 @@ model: dropout_attn_head: 0.1 embedding_meta: pos: - mode: "fixed" + mode: "fixed" # supports fixed, learned, rope normalize: true temp: - mode: "fixed" + mode: "fixed" # supports fixed, learned, rope + embedding_agg_method: "stack" # supports stack, average, concatenate return_embedding: False decoder_self_attn: False @@ -66,24 +67,24 @@ runner: dataset: train_dataset: - slp_files: ["tests/data/sleap/two_flies.slp"] - video_files: ["tests/data/sleap/two_flies.mp4"] + slp_files: ["../../tests/data/sleap/two_flies.slp"] + video_files: ["../../tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: true clip_length: 32 val_dataset: - slp_files: ["tests/data/sleap/two_flies.slp"] - video_files: ["tests/data/sleap/two_flies.mp4"] + slp_files: ["../../tests/data/sleap/two_flies.slp"] + video_files: ["../../tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: True clip_length: 32 test_dataset: - slp_files: ["tests/data/sleap/two_flies.slp"] - video_files: ["tests/data/sleap/two_flies.mp4"] + slp_files: ["../../tests/data/sleap/two_flies.slp"] + video_files: ["../../tests/data/sleap/two_flies.mp4"] padding: 5 crop_size: 128 chunk: True @@ -130,6 +131,8 @@ checkpointing: every_n_epochs: 10 trainer: + accelerator: "mps" + devices: 1 check_val_every_n_epoch: 1 enable_checkpointing: true gradient_clip_val: null diff --git a/dreem/training/train.py b/dreem/training/train.py index 372bfa6..c34b499 100644 --- a/dreem/training/train.py +++ b/dreem/training/train.py @@ -53,6 +53,7 @@ def run(cfg: DictConfig): logger.info(f"Final train config: {train_cfg}") model = train_cfg.get_model() + train_dataset = train_cfg.get_dataset(mode="train") train_dataloader = train_cfg.get_dataloader(train_dataset, mode="train") @@ -83,6 +84,7 @@ def run(cfg: DictConfig): _ = callbacks.append(train_cfg.get_early_stopping()) accelerator = "gpu" if torch.cuda.is_available() else "cpu" + devices = torch.cuda.device_count() if torch.cuda.is_available() else cpu_count() trainer = train_cfg.get_trainer( diff --git a/run_trainer.py b/run_trainer.py index c69d93f..5b129ab 100644 --- a/run_trainer.py +++ b/run_trainer.py @@ -1,7 +1,9 @@ from dreem.training import train from omegaconf import OmegaConf +import os -base_config = "dreem/training/configs/base.yaml" +os.chdir("./dreem/training") +base_config = "./configs/base.yaml" # params_config = "/path/to/override.yaml" cfg = OmegaConf.load(base_config) From e492909b90bf3e8f132a2e7de83c517cb083be7e Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Tue, 6 Aug 2024 13:51:21 -0700 Subject: [PATCH 40/63] further changes for rope - get centroid from x,y for spatial embedding - complete stack agg method - add docstrings --- dreem/models/transformer.py | 60 +++++++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 15 deletions(-) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 6139b43..9e9d18d 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -191,6 +191,8 @@ def forward( instance.add_embedding("pos", ref_pos_emb[i]) instance.add_embedding("temp", ref_temp_emb[i]) + # -------------- Begin decoder pre-processing --------------- # + n_query = total_instances query_features = ref_features @@ -243,6 +245,7 @@ def forward( asso_output = [] for frame_features in decoder_features: + # todo: this needs to handle the 3x queries that come out of the encoder/decoder asso_matrix = self.attn_head(frame_features, encoder_features).view( n_query, total_instances ) @@ -474,22 +477,20 @@ def forward( for layer in self.layers: pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings - queries = queries.permute(1,0,2) - # queries is now of shape (batch_size, n_query, embed_dim) + queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) # calculate temporal embeddings and transform queries queries_t, ref_temp_emb = temp_emb(queries, ref_times) - # if avg temp and pos, need bounding boxes + # if avg. of temp and pos, need bounding boxes if embedding_agg_method == "average": _, ref_pos_emb = pos_emb(queries, ref_boxes) ref_emb = (ref_pos_emb + ref_temp_emb) / 2 queries = queries + ref_emb - queries = queries.permute(1, 0, 2) + queries = queries.permute(1, 0, 2) # transpose back before input to EncoderLayer else: - # todo: input for x,y should be different (not ref_boxes) - # just extract the x,y coordinates from ref_boxes? - # calculate spatial embedding for x, y separately - queries_x, ref_pos_emb = pos_emb(queries, ref_x) - queries_y, ref_pos_emb = pos_emb(queries, ref_y) + # calculate embedding array for x,y from bounding box centroids + ref_x, ref_y = self._spatial_emb_from_bb(ref_boxes) + queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) + queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) # concatenate, stack, or average the queries queries = self.collate_queries( @@ -504,20 +505,49 @@ def forward( return encoder_features, ref_pos_emb, ref_temp_emb - def collate_queries(self, _queries: Tuple[torch.Tensor], embedding_agg_method + def collate_queries(self, _queries: Tuple[torch.Tensor], embedding_agg_method: str ) -> torch.Tensor: + """ + + Args: + _queries: 3-tuple of queries (already transformed by embeddings) for x, y, t + each of shape (batch_size, n_query, embed_dim) + embedding_agg_method: String representing the aggregation method for embeddings + + Returns: Tensor of aggregated queries; can be concatenated (increased length of tokens), + stacked (increased number of tokens), or averaged (original token number and length) + """ + queries, queries_t, queries_x, queries_y = _queries if embedding_agg_method == "average": return queries elif embedding_agg_method == "stack": - # stacked of shape (3, batch_size, n_query, embed_dim) - stacked = torch.stack((queries_t, queries_x, queries_y), dim=-1) + # stacked is of shape (batch_size, 3*n_query, embed_dim) + stacked = torch.cat((queries_t, queries_x, queries_y), dim=1) # transpose for input to EncoderLayer - return stacked.permute(0, 2, 1, 3) + return stacked.permute(1, 0, 2) elif embedding_agg_method == "concatenate": - # todo: complete this, and transpose output - return + # todo: complete this, pass it through an MLP and transpose output + + return concatenated.permute(1, 0, 2) + + + def _spatial_emb_from_bb(self, bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes + Args: + bb: Bounding boxes of shape (n_query, batch_size, 4) from which to compute x,y centroids; + each bounding box is [ymin, xmin, ymax, xmax] + + Returns: + A tuple of tensors containing the emebdding array for x,y dimensions + """ + + centroid_x, centroid_y = bb[:,:,[1,3]].mean(axis=2), bb[:,:,[0,2]].mean(axis=2) + + return + class TransformerDecoder(nn.Module): From 4140524f19b3d3129e1b07624c1135effaa9bbc0 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Tue, 6 Aug 2024 16:49:48 -0700 Subject: [PATCH 41/63] complete encoder section of rope - concatenation method with mlp - complete pre-processing for input to EncoderLayer - fix shape issues in rope/additive_embedding/forward modules in embedding.py --- dreem/models/embedding.py | 75 ++++++++++++++++++++++++++----------- dreem/models/mlp.py | 2 + dreem/models/transformer.py | 56 ++++++++++++++------------- 3 files changed, 84 insertions(+), 49 deletions(-) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 56dbda8..16dd8da 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -36,7 +36,7 @@ def __init__( self, dim: int, max_seq_len: int = 4096, - base: int = 10_000, + base: int = 10000, ) -> None: super().__init__() self.dim = dim @@ -258,9 +258,20 @@ def _transform(self, x, emb): def _apply_rope(self, x, emb): + """ + Applies Rotary Positional Embedding to input queries + + Args: + x: Input queries of shape (batch_size, n_query, embed_dim) + emb: Rotation matrix of shape (batch_size, n_query, num_heads, embed_dim // 2, 2) - - # tensor has shape [b, s, n_h, h_d // 2, 2] + Returns: + Tensor of input queries transformed by RoPE + """ + x_out = torch.unsqueeze(x, 2) + # input needs shape [batch_size, n_query, num_heads, embed_dim // 2, 2] + x_out = x_out.float().reshape(*x_out.shape[:-1], -1, 2) + # apply RoPE to each query token x_out = torch.stack( [ x[..., 0] * emb[..., 0] @@ -270,15 +281,24 @@ def _apply_rope(self, x, emb): ], -1, ) - # tensor has shape [b, s, n_h, h_d] + # output has shape [batch_size, n_query, num_heads, embed_dim] x_out = x_out.flatten(3) return x_out def _apply_additive_embeddings(self, x, emb): + """ + Applies additive embeddings to input queries + + Args: + x: Input tensor of shape (batch_size, N, embed_dim) + emb: Embedding array of shape (N, embed_dim) - return x + emb + Returns: + Tensor: Input queries with embeddings added - shape (batch_size, N, embed_dim) + """ + return x + emb.unsqueeze(0) def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: @@ -286,25 +306,29 @@ def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: Args: seq_positions: - * An (`N`, 1) tensor where seq_positions[i] represents the temporal position of instance_i in the sequence. - * An (`N`, n_anchors x 4) tensor where seq_positions[i, j, :] represents the [y1, x1, y2, x2] spatial locations of jth point of instance_i in the sequence. + * An (N,) tensor where seq_positions[i] represents the temporal position of instance_i in the sequence. + * An (N, n_anchors x 4) tensor where seq_positions[i, j, :] represents the [y1, x1, y2, x2] spatial locations of jth point of instance_i in the sequence. + x: Input data of shape ((batch_size, N, embed_dim)) Returns: - An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. + - Tensor: input queries transformed by embedding + - An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. """ - # create embedding array (_emb_func selects appropriate callback based on config input) + # create embedding array; either rotation matrix of shape + # (batch_size, n_query, num_heads, embed_dim // 2, 2), + # or (N, embed_dim) array emb = self._emb_func(seq_positions) # transform the input data with the embedding - x = self._transform(emb, x) - - if emb.shape[-1] != self.features: - raise RuntimeError( - ( - f"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \n" - f"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions." - ) - ) + x = self._transform(x, emb) + + # if emb.shape[-1] != self.features: + # raise RuntimeError( + # ( + # f"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \n" + # f"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions." + # ) + # ) return x, emb def _torch_int_div( @@ -322,12 +346,19 @@ def _torch_int_div( return torch.div(tensor1, tensor2, rounding_mode="floor") - def _rope_embedding(self, x: torch.Tensor, emb_ids: torch.Tensor) -> torch.Tensor: - - # input must be of shape (num_batches, num_instances, num_attn_heads, d_model) + def _rope_embedding(self, x: torch.Tensor) -> torch.Tensor: + """ + Computes the rotation matrix to apply RoPE to input queries + Args: + x: Input queries of shape (num_batches, n_queries, embed_dim) + Returns: + Tensor: (N, embed_dim) rotation matrix + """ + # input must be of shape (num_batches, num_instances, num_attn_heads, embed_dim) # use num_heads=1 for compatibility with torch ROPE x_rope = torch.unsqueeze(x, 2) - rope = RotaryPositionalEmbeddings(self.features) + # RoPE module takes in dimension, num_queries as input to calculate rotation matrix + rope = RotaryPositionalEmbeddings(self.features, x.shape[1]) rot_mat = rope(x_rope) return rot_mat diff --git a/dreem/models/mlp.py b/dreem/models/mlp.py index 872d715..4f09551 100644 --- a/dreem/models/mlp.py +++ b/dreem/models/mlp.py @@ -34,8 +34,10 @@ def __init__( self.layers = torch.nn.ModuleList( [ torch.nn.Linear(n, k) + # list concatenations to ensure layer shape compability for n, k in zip([input_dim] + h, h + [output_dim]) ] + ) if self.dropout > 0.0: self.dropouts = torch.nn.ModuleList( diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 9e9d18d..c40cc33 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -14,6 +14,7 @@ from dreem.io import AssociationMatrix from dreem.models.attention_head import ATTWeightHead from dreem.models import Embedding +from dreem.models.mlp import MLP from dreem.models.model_utils import get_boxes, get_times from torch import nn import copy @@ -245,7 +246,7 @@ def forward( asso_output = [] for frame_features in decoder_features: - # todo: this needs to handle the 3x queries that come out of the encoder/decoder + # TODO: this needs to handle the 3x queries that come out of the encoder/decoder asso_matrix = self.attn_head(frame_features, encoder_features).view( n_query, total_instances ) @@ -460,13 +461,13 @@ def forward( ref_boxes: torch.Tensor, ref_times: torch.Tensor, embedding_agg_method: str ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Execute a forward pass of encoder layer. + """Execute a forward pass of encoder layer. Computes and applies embeddings before input to EncoderLayer Args: queries: The input tensor of shape (n_query, batch_size, embed_dim). embedding_map: Dict of Embedding objects defining the pos/temp embeddings to be applied to the input data before it passes to the EncoderLayer - ref_boxes: + ref_boxes: Bounding box based embedding ids of shape (n_query, batch_size, 4) ref_times: embedding_agg_method: @@ -485,52 +486,55 @@ def forward( _, ref_pos_emb = pos_emb(queries, ref_boxes) ref_emb = (ref_pos_emb + ref_temp_emb) / 2 queries = queries + ref_emb - queries = queries.permute(1, 0, 2) # transpose back before input to EncoderLayer else: - # calculate embedding array for x,y from bounding box centroids + # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) ref_x, ref_y = self._spatial_emb_from_bb(ref_boxes) + # forward pass of Embedding object transforms input queries with embeddings queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) - # concatenate, stack, or average the queries + # concatenate or stack the queries (avg. method done above since it applies differently) queries = self.collate_queries( (queries, queries_t, queries_x, queries_y), embedding_agg_method) - - # todo: encoderLayer needs to be made compatible with stack/concatenate; - # need to pass in embedding_agg_method + # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) + queries = queries.permute(1, 0, 2) + # pass through EncoderLayer queries = layer(queries) encoder_features = self.norm(queries) return encoder_features, ref_pos_emb, ref_temp_emb - def collate_queries(self, _queries: Tuple[torch.Tensor], embedding_agg_method: str + + def collate_queries(self, queries: Tuple[torch.Tensor], embedding_agg_method: str ) -> torch.Tensor: """ Args: - _queries: 3-tuple of queries (already transformed by embeddings) for x, y, t + _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t each of shape (batch_size, n_query, embed_dim) embedding_agg_method: String representing the aggregation method for embeddings - Returns: Tensor of aggregated queries; can be concatenated (increased length of tokens), + Returns: Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), stacked (increased number of tokens), or averaged (original token number and length) """ - queries, queries_t, queries_x, queries_y = _queries + queries_t, queries_x, queries_y = queries + + mlp = MLP(input_dim=queries_t.shape[-1]*3, hidden_dim=queries_t.shape[-1]*2, + output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) - if embedding_agg_method == "average": - return queries - elif embedding_agg_method == "stack": + if embedding_agg_method == "stack": # stacked is of shape (batch_size, 3*n_query, embed_dim) - stacked = torch.cat((queries_t, queries_x, queries_y), dim=1) - # transpose for input to EncoderLayer - return stacked.permute(1, 0, 2) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) elif embedding_agg_method == "concatenate": - # todo: complete this, pass it through an MLP and transpose output - - return concatenated.permute(1, 0, 2) + # concatenated is of shape (batch_size, n_query, 3*embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) + # pass through MLP to project into space of (batch_size, n_query, embed_dim) + collated_queries = mlp(collated_queries) + + return collated_queries def _spatial_emb_from_bb(self, bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: @@ -541,12 +545,10 @@ def _spatial_emb_from_bb(self, bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Te each bounding box is [ymin, xmin, ymax, xmax] Returns: - A tuple of tensors containing the emebdding array for x,y dimensions + A tuple of tensors containing the emebdding array for x,y dimensions, each of shape (n_query,) """ - - centroid_x, centroid_y = bb[:,:,[1,3]].mean(axis=2), bb[:,:,[0,2]].mean(axis=2) - - return + # compute avg of xmin,xmax and ymin,ymax + return bb[:,:,[1,3]].mean(axis=2).squeeze(), bb[:,:,[0,2]].mean(axis=2).squeeze() From a1ca23e6d1813a2a649629ef274e3b7af9c4eafd Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 11:35:04 -0700 Subject: [PATCH 42/63] setup batch training --- dreem/training/configs/override.yaml | 142 ++++++++++++++++++++ dreem/training/configs/test_batch_train.csv | 4 - dreem/training/demo_batch.csv | 3 + run_batch_job.py | 28 ++-- 4 files changed, 165 insertions(+), 12 deletions(-) create mode 100644 dreem/training/configs/override.yaml delete mode 100644 dreem/training/configs/test_batch_train.csv create mode 100644 dreem/training/demo_batch.csv diff --git a/dreem/training/configs/override.yaml b/dreem/training/configs/override.yaml new file mode 100644 index 0000000..6d1ccb4 --- /dev/null +++ b/dreem/training/configs/override.yaml @@ -0,0 +1,142 @@ +model: + ckpt_path: null + encoder_cfg: + model_name: "resnet18" + in_chans: 3 + backend: "torchvision" + pretrained: false + d_model: 128 + nhead: 1 + num_encoder_layers: 1 + num_decoder_layers: 1 + dropout: 0.1 + activation: "relu" + return_intermediate_dec: True + norm: False + num_layers_attn_head: 1 + dropout_attn_head: 0.1 + embedding_meta: + pos: + mode: "fixed" + normalize: true + n_points: 1 + temp: + mode: "fixed" + return_embedding: False + decoder_self_attn: True + +loss: + epsilon: 0.0001 + asso_weight: 10.0 + +optimizer: + lr: 0.0001 + weight_decay: 0 + +scheduler: + factor: 0.5 + patience: 5 + threshold: 0.001 + +dataset: + train_dataset: + dir: + # note: if using batch runner, use format: /home/runner/talmodata-smb/... + # if using interactive, use format: "/home/jovyan/talmolab-smb/datasets/..." + path: "/home/runner/talmolab-smb/datasets/mot/animal/sleap/btc/large_run/als/train" + labels_suffix: ".slp" + vid_suffix: ".mp4" + clip_length: 32 + crop_size: 64 + padding: 0 + anchors: "centroid" + augmentations: + Rotate: + limit: 45 + p: 0.3 + GaussianBlur: + blur_limit: [3,7] + sigma_limit: 0 + p: 0.3 + RandomBrightnessContrast: + brightness_limit: 0.1 + contrast_limit: 0.3 + p: 0.3 + MotionBlur: + blur_limit: [3,7] + p: 0.3 + NodeDropout: + p: 0.3 + n: 5 + InstanceDropout: + p: 0.3 + n: 1 + n_chunks: 1000 + handle_missing: "centroid" + + val_dataset: + dir: + # note: if using batch runner, use format: /home/runner/talmodata-smb/... + path: "/home/runner/talmolab-smb/datasets/mot/animal/sleap/btc/large_run/als/val" + labels_suffix: ".slp" + vid_suffix: ".mp4" + crop_size: 64 + padding: 0 + anchors: "centroid" + n_chunks: 300 + handle_missing: "centroid" + + # to not run test, just use empty lists to override the paths in the base.yaml + test_dataset: + slp_files: [] + video_files: [] + +dataloader: + train_dataloader: + num_workers: 0 + val_dataloader: + num_workers: 0 + test_dataloader: + num_workers: 0 + +checkpointing: + save_top_k: -1 + +trainer: + max_epochs: 50 + min_epochs: -1 + # limit_train_batches: 0.001 + # limit_test_batches: 1.0 + # limit_val_batches: 0.004 + # profiler: "advanced" + + +logging: + project: "dreem" + group: "test-batch-job" # experiment/test + entity: "mushaikh" + name: "sample-efficiency" # name of the run (within a group) + notes: "test `dreem-train" + logger_type: "WandbLogger" + +tracker: + window_size: 8 + use_vis_feats: true + overlap_thresh: 0.1 + mult_thresh: true + decay_time: null + iou: null + max_center_dist: null + +runner: + persistent_tracking: + train: false + val: false + test: false + metrics: + train: [] + +# view_batch: +# enable: True +# num_frames: 5 +# no_train: True \ No newline at end of file diff --git a/dreem/training/configs/test_batch_train.csv b/dreem/training/configs/test_batch_train.csv deleted file mode 100644 index a0303c7..0000000 --- a/dreem/training/configs/test_batch_train.csv +++ /dev/null @@ -1,4 +0,0 @@ -model.d_model,model.dim_feedforward,model.feature_dim_attn_head,model.num_encoder_layers,model.num_decoder_layers -256,256,256,1,1 -512,512,512,2,2 -1024,1024,1024,4,4 diff --git a/dreem/training/demo_batch.csv b/dreem/training/demo_batch.csv new file mode 100644 index 0000000..67f08f5 --- /dev/null +++ b/dreem/training/demo_batch.csv @@ -0,0 +1,3 @@ +logging.name,dataset.train_dataset.n_chunks +n_chunks:1.1,1.1 +n_chunks:10,10 diff --git a/run_batch_job.py b/run_batch_job.py index 52345da..f5910c9 100644 --- a/run_batch_job.py +++ b/run_batch_job.py @@ -1,21 +1,31 @@ import os import subprocess as sp +import pandas as pd -gpu = "0.1" +# to use this, just run python run_batch_job.py in cmd + +gpu = "0.1" # amount of GPU to use per task job_name = "mustafa-test-batch-job" -base = "/home/runner/talmodata-smb/aadi/biogtr_expts/run/animal/eight_flies" #where to run the job from -dreem_repo = base.replace("biogtr_expts/run/animal/eight_flies", "dreem") #where the dreem repo is stored +base = "/home/runner/talmodata-smb/mustafa/dreem-experiments/run/mice-btc" #where to run the job from +dreem_repo = "/home/runner/talmodata-smb/mustafa/dreem-experiments/src/dreem" #where the dreem repo is stored config_dir=os.path.join(base, "configs") #where to find the configs config_name= "base" #base config name -params_cfg = os.path.join(config_dir, "sample_efficiency.yaml") #override config +params_cfg = os.path.join(config_dir, "override.yaml") #override config + # if running just 1 job, comment this line out and delete the ++batch_config command in the command below -task_csv = os.path.join(config_dir, "sample_efficiency.csv") # csv for tasks - each pod is a task +# each row in this file is a separate run with overrides +# naming method: have the first column as logging.name (wandb logging); this creates the directory ./models/logging.name +task_csv = os.path.join(config_dir, "demo_batch.csv") # csv for tasks - each pod is a task -pods = 1 # total number of tasks for job to run; should be number of rows in csv file -par = 1 # number of tasks that can be run in parallel - max. = # of pods +# number of VMs that are spun up (also the number of tasks that you are running) +# note that the server must be mounted locally as a network location to use this if the csv is on the cluster +pods = len(pd.read_csv(task_csv.replace("/home/runner/talmodata-smb", "/Volumes/talmodata"))) +par = min(int(1/float(gpu)), pods) #number of tasks that can be run in parallel (always smaller than pods) +# enter your WANDB API KEY in the cmd section +# mount both smb and vast volumes cmd = [ "runai", "submit", @@ -27,7 +37,9 @@ "-i", "asheridan/biogtr", "-v", - "/data/talmolab-smb:/home/runner/talmodata-smb", + "/data/talmolab-smb:/home/runner/talmodata-smb", + "-v", + "/talmo:/home/runner/vast" "-e", f"RUNNER_CMD=cp -r {dreem_repo} ~ && mamba env create -n dreem -f ~/dreem/environment.yml && export WANDB_API_KEY=6cc5012a6ecfb9cd970bd07686dbfcefd3190a04 && cd {base} && conda run -n dreem dreem-train --config-dir={config_dir} --config-name={config_name} ++params_config={params_cfg} ++batch_config={task_csv}", "--parallelism", From b5fa58d12af73f551b45d831358bf1dd440a4409 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:08:34 -0700 Subject: [PATCH 43/63] remove batch run commands from repo --- run_batch_job.py => dreem/training/configs/run_batch_job.py | 0 dreem/training/demo_batch.csv | 3 --- 2 files changed, 3 deletions(-) rename run_batch_job.py => dreem/training/configs/run_batch_job.py (100%) delete mode 100644 dreem/training/demo_batch.csv diff --git a/run_batch_job.py b/dreem/training/configs/run_batch_job.py similarity index 100% rename from run_batch_job.py rename to dreem/training/configs/run_batch_job.py diff --git a/dreem/training/demo_batch.csv b/dreem/training/demo_batch.csv deleted file mode 100644 index 67f08f5..0000000 --- a/dreem/training/demo_batch.csv +++ /dev/null @@ -1,3 +0,0 @@ -logging.name,dataset.train_dataset.n_chunks -n_chunks:1.1,1.1 -n_chunks:10,10 From c721e90795d0a60e8e75fcdd99eab018ac739d39 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:11:04 -0700 Subject: [PATCH 44/63] Update base.yaml --- dreem/training/configs/base.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/dreem/training/configs/base.yaml b/dreem/training/configs/base.yaml index 6a507cc..a93c21d 100644 --- a/dreem/training/configs/base.yaml +++ b/dreem/training/configs/base.yaml @@ -73,6 +73,7 @@ dataset: crop_size: 128 chunk: true clip_length: 32 + mode: "train" val_dataset: slp_files: ["../../tests/data/sleap/two_flies.slp"] @@ -81,6 +82,7 @@ dataset: crop_size: 128 chunk: True clip_length: 32 + mode: "val" test_dataset: slp_files: ["../../tests/data/sleap/two_flies.slp"] @@ -89,6 +91,7 @@ dataset: crop_size: 128 chunk: True clip_length: 32 + mode: "test" dataloader: train_dataloader: @@ -131,6 +134,7 @@ checkpointing: every_n_epochs: 10 trainer: + # only use mps and devices params for apple silicon runs accelerator: "mps" devices: 1 check_val_every_n_epoch: 1 From 671169746c4b0e692a73ce75274d0324709d5bac Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:10:01 -0700 Subject: [PATCH 45/63] remove batch training script --- dreem/training/configs/base.yaml | 10 ++--- dreem/training/configs/run_batch_job.py | 56 ------------------------- 2 files changed, 5 insertions(+), 61 deletions(-) delete mode 100644 dreem/training/configs/run_batch_job.py diff --git a/dreem/training/configs/base.yaml b/dreem/training/configs/base.yaml index a93c21d..15d2898 100644 --- a/dreem/training/configs/base.yaml +++ b/dreem/training/configs/base.yaml @@ -114,7 +114,7 @@ logging: group: "example" save_dir: './logs' project: "GTR" - log_model: "all" + log_model: null early_stopping: monitor: "val_loss" @@ -131,12 +131,12 @@ checkpointing: save_last: true dirpath: null auto_insert_metric_name: true - every_n_epochs: 10 + every_n_epochs: 1 trainer: - # only use mps and devices params for apple silicon runs - accelerator: "mps" - devices: 1 + # only use this for local apple silicon runs; change for cluster runs + # accelerator: "mps" + # devices: 1 check_val_every_n_epoch: 1 enable_checkpointing: true gradient_clip_val: null diff --git a/dreem/training/configs/run_batch_job.py b/dreem/training/configs/run_batch_job.py deleted file mode 100644 index f5910c9..0000000 --- a/dreem/training/configs/run_batch_job.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -import subprocess as sp -import pandas as pd - -# to use this, just run python run_batch_job.py in cmd - -gpu = "0.1" # amount of GPU to use per task -job_name = "mustafa-test-batch-job" - -base = "/home/runner/talmodata-smb/mustafa/dreem-experiments/run/mice-btc" #where to run the job from -dreem_repo = "/home/runner/talmodata-smb/mustafa/dreem-experiments/src/dreem" #where the dreem repo is stored - -config_dir=os.path.join(base, "configs") #where to find the configs -config_name= "base" #base config name -params_cfg = os.path.join(config_dir, "override.yaml") #override config - -# if running just 1 job, comment this line out and delete the ++batch_config command in the command below -# each row in this file is a separate run with overrides -# naming method: have the first column as logging.name (wandb logging); this creates the directory ./models/logging.name -task_csv = os.path.join(config_dir, "demo_batch.csv") # csv for tasks - each pod is a task - -# number of VMs that are spun up (also the number of tasks that you are running) -# note that the server must be mounted locally as a network location to use this if the csv is on the cluster -pods = len(pd.read_csv(task_csv.replace("/home/runner/talmodata-smb", "/Volumes/talmodata"))) -par = min(int(1/float(gpu)), pods) #number of tasks that can be run in parallel (always smaller than pods) - -# enter your WANDB API KEY in the cmd section -# mount both smb and vast volumes -cmd = [ - "runai", - "submit", - "--gpu", - gpu, - "--name", - job_name, - "--preemptible", - "-i", - "asheridan/biogtr", - "-v", - "/data/talmolab-smb:/home/runner/talmodata-smb", - "-v", - "/talmo:/home/runner/vast" - "-e", - f"RUNNER_CMD=cp -r {dreem_repo} ~ && mamba env create -n dreem -f ~/dreem/environment.yml && export WANDB_API_KEY=6cc5012a6ecfb9cd970bd07686dbfcefd3190a04 && cd {base} && conda run -n dreem dreem-train --config-dir={config_dir} --config-name={config_name} ++params_config={params_cfg} ++batch_config={task_csv}", - "--parallelism", - str(par), - "--completions", - str(pods), -] - -print(f"base directory: {base}") -print(f"running with {pods} pods") -print(f"max pods that can run concurrently: {par}") -print(f"runner command: {cmd}") - -sp.run(cmd) \ No newline at end of file From 20fd4a717bff5ec31596fc986bcdb72918bb7d71 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:11:44 -0700 Subject: [PATCH 46/63] Update run_trainer.py --- run_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_trainer.py b/run_trainer.py index 5b129ab..c538cc3 100644 --- a/run_trainer.py +++ b/run_trainer.py @@ -4,7 +4,7 @@ os.chdir("./dreem/training") base_config = "./configs/base.yaml" -# params_config = "/path/to/override.yaml" +# params_config = "./configs/override.yaml" cfg = OmegaConf.load(base_config) # cfg["params_config"] = params_config From 9ac41a875a74b0ec9bc3c92b758075b3ff05910f Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 12:11:56 -0700 Subject: [PATCH 47/63] Update .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index fb6ee36..4e1fa69 100644 --- a/.gitignore +++ b/.gitignore @@ -142,3 +142,4 @@ dreem/training/models/* # docs site/ +*.xml From c43ee75ba1d560fdc7fa523998d9fee891014923 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 14:43:36 -0700 Subject: [PATCH 48/63] comments for tracker.py --- dreem/inference/tracker.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/dreem/inference/tracker.py b/dreem/inference/tracker.py index f7c29b4..8426e84 100644 --- a/dreem/inference/tracker.py +++ b/dreem/inference/tracker.py @@ -138,8 +138,10 @@ def track( # asso_preds, pred_boxes, pred_time, embeddings = self.model( # instances, reid_features # ) + # get reference and query instances from TrackQueue and calls _run_global_tracker() instances_pred = self.sliding_inference(model, frames) + # e.g. during train/val, don't track across batches so persistent_tracking is switched off if not self.persistent_tracking: logger.debug(f"Clearing Queue after tracking") self.track_queue.end_tracks() @@ -164,7 +166,9 @@ def sliding_inference( # H: height. # W: width. + # frames is untracked clip for inference for batch_idx, frame_to_track in enumerate(frames): + # tracked_frames is a list of reference frames that have been tracked (associated) tracked_frames = self.track_queue.collate_tracks( device=frame_to_track.frame_id.device ) @@ -188,10 +192,11 @@ def sliding_inference( ) curr_track_id = 0 + # if track ids exist from another tracking program i.e. sleap, init with those for i, instance in enumerate(frames[batch_idx].instances): instance.pred_track_id = instance.gt_track_id curr_track_id = max(curr_track_id, instance.pred_track_id) - + # if no track ids, then assign new ones for i, instance in enumerate(frames[batch_idx].instances): if instance.pred_track_id == -1: curr_track += 1 @@ -201,6 +206,7 @@ def sliding_inference( if ( frame_to_track.has_instances() ): # Check if there are detections. If there are skip and increment gap count + # combine the tracked frames with the latest frame; inference pipeline uses latest frame as pred frames_to_track = tracked_frames + [ frame_to_track ] # better var name? @@ -217,7 +223,7 @@ def sliding_inference( self.track_queue.add_frame(frame_to_track) else: self.track_queue.increment_gaps([]) - + # update the frame object from the input inference untracked clip frames[batch_idx] = frame_to_track return frames @@ -252,7 +258,7 @@ def _run_global_tracker( # E.g.: instances_per_frame: [4, 5, 6, 7]; window of length 4 with 4 detected instances in the first frame of the window. _ = model.eval() - + # get the last frame in the clip to perform inference on query_frame = frames[query_ind] query_instances = query_frame.instances @@ -279,8 +285,10 @@ def _run_global_tracker( # (L=1, n_query, total_instances) with torch.no_grad(): + # GTR knows this is for inference since query_instances is not None asso_matrix = model(all_instances, query_instances) + # GTR output is n_query x n_instances - split this into per-frame to softmax each frame separately asso_output = asso_matrix[-1].matrix.split( instances_per_frame, dim=1 ) # (window_size, n_query, N_i) @@ -296,7 +304,7 @@ def _run_global_tracker( asso_output_df.index.name = "Instances" asso_output_df.columns.name = "Instances" - + # save the association matrix to the Frame object query_frame.add_traj_score("asso_output", asso_output_df) query_frame.asso_output = asso_matrix[-1] @@ -374,7 +382,7 @@ def _run_global_tracker( query_frame.add_traj_score("decay_time", decay_time_traj_score) ################################################################################ - + # reduce association matrix - aggregating reference instance association scores by tracks # (n_query x n_nonquery) x (n_nonquery x n_traj) --> n_query x n_traj traj_score = torch.mm(traj_score, id_inds.cpu()) # (n_query, n_traj) @@ -387,6 +395,7 @@ def _run_global_tracker( query_frame.add_traj_score("traj_score", traj_score_df) ################################################################################ + # IOU-based post-processing; add a weighted IOU across successive frames to association scores # with iou -> combining with location in tracker, they set to True # todo -> should also work without pos_embed @@ -421,6 +430,7 @@ def _run_global_tracker( query_frame.add_traj_score("weight_iou", iou_traj_score) ################################################################################ + # filters association matrix such that instances too far from each other get scores=0 # threshold for continuing a tracking or starting a new track -> they use 1.0 # todo -> should also work without pos_embed @@ -439,6 +449,7 @@ def _run_global_tracker( query_frame.add_traj_score("max_center_dist", max_center_dist_traj_score) ################################################################################ + # softmax along tracks for each instance, for interpretability scaled_traj_score = torch.softmax(traj_score, dim=1) scaled_traj_score_df = pd.DataFrame( scaled_traj_score.numpy(), columns=unique_ids.cpu().numpy() @@ -449,8 +460,10 @@ def _run_global_tracker( query_frame.add_traj_score("scaled", scaled_traj_score_df) ################################################################################ + # hungarian matching match_i, match_j = linear_sum_assignment((-traj_score)) + track_ids = instance_ids.new_full((n_query,), -1) for i, j in zip(match_i, match_j): # The overlap threshold is multiplied by the number of times the unique track j is matched to an @@ -462,6 +475,7 @@ def _run_global_tracker( thresh = ( overlap_thresh * id_inds[:, j].sum() if mult_thresh else overlap_thresh ) + # if the association score for a query instance is lower than the threshold, create a new track for it if n_traj >= self.max_tracks or traj_score[i, j] > thresh: logger.debug( f"Assigning instance {i} to track {j} with id {unique_ids[j]}" From fe1eecab09f95eb2aac7d91363889d44e926be1c Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 7 Aug 2024 18:48:03 -0700 Subject: [PATCH 49/63] embedding bug fixes for encoder - bounding box embedding only for method "average" - modify emb_funcs routing - temporarily remove support for adding embeddings into instance objects - need to make compatible with x,y,t embeddings - remove config yamls from updates - current versions serve as templates - runs through to end of encoder forward pass --- .gitignore | 4 ++++ dreem/models/embedding.py | 42 ++++++++++++++++++++++++++++++++++--- dreem/models/transformer.py | 37 +++++++++++++++++++------------- 3 files changed, 65 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index 4e1fa69..3af1399 100644 --- a/.gitignore +++ b/.gitignore @@ -143,3 +143,7 @@ dreem/training/models/* # docs site/ *.xml +dreem/training/configs/base.yaml +dreem/training/configs/override.yaml +dreem/training/configs/override.yaml +dreem/training/configs/base.yaml diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 16dd8da..0c68a24 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -146,6 +146,7 @@ def __init__( normalize: bool = False, scale: float | None = None, mlp_cfg: dict | None = None, + embedding_agg_method: str = "average" ): """Initialize embeddings. @@ -164,12 +165,14 @@ def __init__( mlp_cfg: A dictionary of mlp hyperparameters for projecting embedding to correct space. Example: {"hidden_dims": 256, "num_layers":3, "dropout": 0.3} """ + self._check_init_args(emb_type, mode) super().__init__() self.emb_type = emb_type self.mode = mode + self.embedding_agg_method = embedding_agg_method self.features = features self.emb_num = emb_num self.over_boxes = over_boxes @@ -216,12 +219,15 @@ def __init__( elif self.mode == "fixed": if self.emb_type == "pos": - self._emb_func = self._sine_box_embedding + if self.embedding_agg_method == "average": + self._emb_func = self._sine_box_embedding + else: + self._emb_func = self._sine_pos_embedding elif self.emb_type == "temp": self._emb_func = self._sine_temp_embedding elif self.mode == "rope": - # TODO: pos/temp uses the same processing but takes the input differently + # pos/temp embeddings processed the same way with different embedding array inputs self._emb_func = self._rope_embedding @@ -363,7 +369,37 @@ def _rope_embedding(self, x: torch.Tensor) -> torch.Tensor: return rot_mat - + + def _sine_pos_embedding(self, centroids: torch.Tensor) -> torch.Tensor: + """Compute fixed sine temporal embeddings per dimension (x,y) + + Args: + centroids: the input centroids for either the x,y dimension represented + by fraction of distance of original image that the instance centroid lies at; + of shape (N,) or (N,1) where N = # of query tokens (i.e. instances) + values between [0,1] + + Returns: + an n_instances x D embedding representing the temporal embedding. + """ + d = self.features + n = self.temperature + + positions = centroids.unsqueeze(1) + temp_lookup = torch.zeros(len(centroids), d, device=centroids.device) + + denominators = torch.pow( + n, 2 * torch.arange(0, d // 2, device=centroids.device) / d + ) # 10000^(2i/d_model), i is the index of embedding + temp_lookup[:, 0::2] = torch.sin( + positions / denominators + ) # sin(pos/10000^(2i/d_model)) + temp_lookup[:, 1::2] = torch.cos( + positions / denominators + ) # cos(pos/10000^(2i/d_model)) + + return temp_lookup # .view(len(times), self.features) + def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor: """Compute sine positional embeddings for boxes using given parameters. diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index c40cc33..6ff0eee 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -85,13 +85,17 @@ def __init__( pos_emb_cfg = self.embedding_meta["pos"] if pos_emb_cfg: self.pos_emb = Embedding( - emb_type="pos", features=self.d_model, **pos_emb_cfg - ) + emb_type="pos", features=self.d_model, + embedding_agg_method=self.embedding_meta["embedding_agg_method"], + **pos_emb_cfg + ) # agg method must be the same for pos and temp embeddings if "temp" in self.embedding_meta: temp_emb_cfg = self.embedding_meta["temp"] if temp_emb_cfg: self.temp_emb = Embedding( - emb_type="temp", features=self.d_model, **temp_emb_cfg + emb_type="temp", features=self.d_model, + embedding_agg_method=self.embedding_meta["embedding_agg_method"], + **temp_emb_cfg ) # Transformer Encoder @@ -178,7 +182,8 @@ def forward( encoder_queries = ref_features - encoder_features, ref_pos_emb, ref_temp_emb = self.encoder( + # (encoder_features, ref_pos_emb, ref_temp_emb) \ + encoder_features = self.encoder( encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, ref_boxes=ref_boxes, @@ -187,10 +192,11 @@ def forward( ) # (total_instances, batch_size, embed_dim) # TODO: check if instance.add_embedding() supports rotation matrices - if self.return_embedding: - for i, instance in enumerate(ref_instances): - instance.add_embedding("pos", ref_pos_emb[i]) - instance.add_embedding("temp", ref_temp_emb[i]) + # TODO: include support for adding x,y,t embeddings to the instance + # if self.return_embedding: + # for i, instance in enumerate(ref_instances): + # instance.add_embedding("pos", ref_pos_emb[i]) + # instance.add_embedding("temp", ref_temp_emb[i]) # -------------- Begin decoder pre-processing --------------- # @@ -225,10 +231,11 @@ def forward( else: query_instances = ref_instances - if self.return_embedding: - for i, instance in enumerate(query_instances): - instance.add_embedding("pos", query_pos_emb[i]) - instance.add_embedding("temp", query_temp_emb[i]) + # TODO: include support for x,y,t embeddings and uncomment this + # if self.return_embedding: + # for i, instance in enumerate(query_instances): + # instance.add_embedding("pos", query_pos_emb[i]) + # instance.add_embedding("temp", query_temp_emb[i]) decoder_features = self.decoder( query_features, @@ -481,7 +488,7 @@ def forward( queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) # calculate temporal embeddings and transform queries queries_t, ref_temp_emb = temp_emb(queries, ref_times) - # if avg. of temp and pos, need bounding boxes + # if avg. of temp and pos, need bounding boxes; bb only used for method "average" if embedding_agg_method == "average": _, ref_pos_emb = pos_emb(queries, ref_boxes) ref_emb = (ref_pos_emb + ref_temp_emb) / 2 @@ -495,7 +502,7 @@ def forward( # concatenate or stack the queries (avg. method done above since it applies differently) queries = self.collate_queries( - (queries, queries_t, queries_x, queries_y), + (queries_t, queries_x, queries_y), embedding_agg_method) # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) @@ -504,7 +511,7 @@ def forward( encoder_features = self.norm(queries) - return encoder_features, ref_pos_emb, ref_temp_emb + return encoder_features# , ref_pos_emb, ref_temp_emb def collate_queries(self, queries: Tuple[torch.Tensor], embedding_agg_method: str From 2da8c09891e54114eecc5bfd8c18987d020d1cca Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Thu, 8 Aug 2024 17:58:50 -0700 Subject: [PATCH 50/63] implement rope for decoder - implement embeddings for decoder + refactor - add 1x1 conv to final attn head to deal with stacked embeddings (3x tokens) and create channels for each dim - bug fix in rope rotation matrix product with input data --- dreem/models/attention_head.py | 21 ++- dreem/models/embedding.py | 14 +- dreem/models/transformer.py | 257 ++++++++++++++++----------------- run_trainer.py | 3 +- tests/test_models.py | 3 +- 5 files changed, 155 insertions(+), 143 deletions(-) diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 2b16055..3dde1f5 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -14,6 +14,7 @@ def __init__( feature_dim: int, num_layers: int, dropout: float, + embedding_agg_method: str ): """Initialize an instance of ATTWeightHead. @@ -21,11 +22,20 @@ def __init__( feature_dim: The dimensionality of input features. num_layers: The number of hidden layers in the MLP. dropout: Dropout probability. + embedding_agg_method: how the embeddings are aggregated; average/stack/concatenate """ super().__init__() - - self.q_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) - self.k_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) + self.embedding_agg_method = embedding_agg_method + + # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels + if self.embedding_agg_method == "stack": + self.conv_1x1 = torch.nn.Conv2d(in_channels=3,out_channels=1, + kernel_size=1,stride=1,padding=0) + self.q_proj = self.conv_1x1 + self.k_proj = self.conv_1x1 + else: + self.q_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) + self.k_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) def forward( self, @@ -41,6 +51,11 @@ def forward( Returns: Output tensor of shape (batch_size, num_frame_instances, num_window_instances). """ + # if stacked embeddings, create channels for each x,y,t embedding dimension + if self.embedding_agg_method == "stack": + key = + query = + k = self.k_proj(key) q = self.q_proj(query) attn_weights = torch.bmm(q, k.transpose(1, 2)) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 0c68a24..a21d5ab 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -274,16 +274,16 @@ def _apply_rope(self, x, emb): Returns: Tensor of input queries transformed by RoPE """ - x_out = torch.unsqueeze(x, 2) + xout = torch.unsqueeze(x, 2) # input needs shape [batch_size, n_query, num_heads, embed_dim // 2, 2] - x_out = x_out.float().reshape(*x_out.shape[:-1], -1, 2) + xout = xout.float().reshape(*xout.shape[:-1], -1, 2) # apply RoPE to each query token - x_out = torch.stack( + xout = torch.stack( [ - x[..., 0] * emb[..., 0] - - x[..., 1] * emb[..., 1], - x[..., 1] * emb[..., 0] - + x[..., 0] * emb[..., 1], + xout[..., 0] * emb[..., 0] + - xout[..., 1] * emb[..., 1], + xout[..., 1] * emb[..., 0] + + xout[..., 0] * emb[..., 1], ], -1, ) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 6ff0eee..33c904b 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -184,12 +184,11 @@ def forward( # (encoder_features, ref_pos_emb, ref_temp_emb) \ encoder_features = self.encoder( - encoder_queries, - embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, - ref_boxes=ref_boxes, - ref_times=ref_times, + encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + boxes=ref_boxes, times=ref_times, embedding_agg_method=self.embedding_meta["embedding_agg_method"] - ) # (total_instances, batch_size, embed_dim) + ) # (total_instances, batch_size, embed_dim) or + # (3*total_instances,batch_size,embed_dim) if using stacked embeddings # TODO: check if instance.add_embedding() supports rotation matrices # TODO: include support for adding x,y,t embeddings to the instance @@ -198,18 +197,11 @@ def forward( # instance.add_embedding("pos", ref_pos_emb[i]) # instance.add_embedding("temp", ref_temp_emb[i]) - # -------------- Begin decoder pre-processing --------------- # - - n_query = total_instances - - query_features = ref_features - query_pos_emb = ref_pos_emb - query_temp_emb = ref_temp_emb - query_emb = ref_emb + # -------------- Begin decoder --------------- # + # for inference, query_instances is not None if query_instances is not None: n_query = len(query_instances) - query_features = torch.cat( [instance.features for instance in query_instances], dim=0 ).unsqueeze(0) @@ -218,18 +210,15 @@ def forward( 1, 0, 2 ) # (n_query, batch_size, embed_dim) + # just get boxes, we already have query_times from above query_boxes = get_boxes(query_instances) query_boxes = torch.nan_to_num(query_boxes, -1.0) - query_temp_emb = self.temp_emb(query_times) - - query_pos_emb = self.pos_emb(query_boxes) - - query_emb = (query_pos_emb + query_temp_emb) / 2.0 - query_emb = query_emb.view(1, n_query, embed_dim) - query_emb = query_emb.permute(1, 0, 2) # (n_query, batch_size, embed_dim) - - else: + else: # for training, query_instances is None so just pass in the ref data + n_query = total_instances query_instances = ref_instances + query_features = ref_features + query_boxes = ref_boxes + query_times = ref_times # TODO: include support for x,y,t embeddings and uncomment this # if self.return_embedding: @@ -238,25 +227,28 @@ def forward( # instance.add_embedding("temp", query_temp_emb[i]) decoder_features = self.decoder( - query_features, - encoder_features, - ref_pos_emb=ref_emb, - query_pos_emb=query_emb, + query_features, encoder_features, + embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + boxes=query_boxes, times=query_times, + embedding_agg_method=self.embedding_meta["embedding_agg_method"] ) # (L, n_query, batch_size, embed_dim) + decoder_features = decoder_features.transpose( 1, 2 - ) # # (L, batch_size, n_query, embed_dim) - encoder_features = encoder_features.permute(1, 0, 2).view( - batch_size, total_instances, embed_dim - ) # (batch_size, total_instances, embed_dim) + ) # # (L, batch_size, n_query, embed_dim) or ((L, batch_size, 3*n_query, embed_dim)) if using stacked embeddings + encoder_features = encoder_features.permute(1, 0, 2) + # (batch_size, total_instances, embed_dim) or (batch_size, 3*total_instances, embed_dim) asso_output = [] for frame_features in decoder_features: - # TODO: this needs to handle the 3x queries that come out of the encoder/decoder + # TODO: attn_head handles the 3x queries that can come out of the encoder/decoder if using stacked embeddings; + # does this by altering the MLP dimensions prior to attention outer product + # n_query should be the number of instances in the last frame if running inference, + # or number of ref instances for training. total_instances is always the number of reference instances asso_matrix = self.attn_head(frame_features, encoder_features).view( n_query, total_instances - ) + ) # call to view() just removes the batch dimension; output of attn_head is (1,n_query,total_instances) asso_matrix = AssociationMatrix(asso_matrix, ref_instances, query_instances) asso_output.append(asso_matrix) @@ -313,13 +305,6 @@ def forward( Returns: The output tensor of shape (n_query, batch_size, embed_dim). """ - # TODO: delete this section; keep to check that pos_emb None is taken care of automatically by config -# if pos_emb is None: -# pos_emb = torch.zeros_like(queries) - -# queries = queries + pos_emb - - # q = k = src attn_features = self.self_attn( query=queries, @@ -386,8 +371,6 @@ def forward( self, decoder_queries: torch.Tensor, encoder_features: torch.Tensor, - ref_pos_emb: torch.Tensor | None = None, - query_pos_emb: torch.Tensor | None = None, ) -> torch.Tensor: """Execute forward pass of decoder layer. @@ -395,19 +378,10 @@ def forward( decoder_queries: Target sequence for decoder to generate (n_query, batch_size, embed_dim). encoder_features: Output from encoder, that decoder uses to attend to relevant parts of input sequence (total_instances, batch_size, embed_dim) - ref_pos_emb: The input positional embedding tensor of shape (n_query, embed_dim). - query_pos_emb: The target positional embedding of shape (n_query, embed_dim) Returns: The output tensor of shape (n_query, batch_size, embed_dim). """ - if query_pos_emb is None: - query_pos_emb = torch.zeros_like(decoder_queries) - if ref_pos_emb is None: - ref_pos_emb = torch.zeros_like(encoder_features) - - decoder_queries = decoder_queries + query_pos_emb - encoder_features = encoder_features + ref_pos_emb if self.decoder_self_attn: self_attn_features = self.self_attn( @@ -416,6 +390,7 @@ def forward( decoder_queries = decoder_queries + self.dropout1(self_attn_features) decoder_queries = self.norm1(decoder_queries) + # cross attention x_attn_features = self.multihead_attn( query=decoder_queries, # (n_query, batch_size, embed_dim) key=encoder_features, # (total_instances, batch_size, embed_dim) @@ -465,7 +440,7 @@ def __init__( def forward( self, queries: torch.Tensor, embedding_map: Dict[str, Embedding], - ref_boxes: torch.Tensor, ref_times: torch.Tensor, + boxes: torch.Tensor, times: torch.Tensor, embedding_agg_method: str ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Execute a forward pass of encoder layer. Computes and applies embeddings before input to EncoderLayer @@ -474,8 +449,8 @@ def forward( queries: The input tensor of shape (n_query, batch_size, embed_dim). embedding_map: Dict of Embedding objects defining the pos/temp embeddings to be applied to the input data before it passes to the EncoderLayer - ref_boxes: Bounding box based embedding ids of shape (n_query, batch_size, 4) - ref_times: + boxes: Bounding box based embedding ids of shape (n_query, batch_size, 4) + times: embedding_agg_method: Returns: @@ -483,29 +458,8 @@ def forward( """ for layer in self.layers: - pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] - # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings - queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) - # calculate temporal embeddings and transform queries - queries_t, ref_temp_emb = temp_emb(queries, ref_times) - # if avg. of temp and pos, need bounding boxes; bb only used for method "average" - if embedding_agg_method == "average": - _, ref_pos_emb = pos_emb(queries, ref_boxes) - ref_emb = (ref_pos_emb + ref_temp_emb) / 2 - queries = queries + ref_emb - else: - # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) - ref_x, ref_y = self._spatial_emb_from_bb(ref_boxes) - # forward pass of Embedding object transforms input queries with embeddings - queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) - queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) - - # concatenate or stack the queries (avg. method done above since it applies differently) - queries = self.collate_queries( - (queries_t, queries_x, queries_y), - embedding_agg_method) - # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) - queries = queries.permute(1, 0, 2) + # compute embeddings and apply to the input queries + queries = apply_embeddings(queries, embedding_map, boxes, times, embedding_agg_method) # pass through EncoderLayer queries = layer(queries) @@ -514,51 +468,6 @@ def forward( return encoder_features# , ref_pos_emb, ref_temp_emb - def collate_queries(self, queries: Tuple[torch.Tensor], embedding_agg_method: str - ) -> torch.Tensor: - """ - - Args: - _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t - each of shape (batch_size, n_query, embed_dim) - embedding_agg_method: String representing the aggregation method for embeddings - - Returns: Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), - stacked (increased number of tokens), or averaged (original token number and length) - """ - - queries_t, queries_x, queries_y = queries - - mlp = MLP(input_dim=queries_t.shape[-1]*3, hidden_dim=queries_t.shape[-1]*2, - output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) - - if embedding_agg_method == "stack": - # stacked is of shape (batch_size, 3*n_query, embed_dim) - collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) - elif embedding_agg_method == "concatenate": - # concatenated is of shape (batch_size, n_query, 3*embed_dim) - collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) - # pass through MLP to project into space of (batch_size, n_query, embed_dim) - collated_queries = mlp(collated_queries) - - return collated_queries - - - def _spatial_emb_from_bb(self, bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes - Args: - bb: Bounding boxes of shape (n_query, batch_size, 4) from which to compute x,y centroids; - each bounding box is [ymin, xmin, ymax, xmax] - - Returns: - A tuple of tensors containing the emebdding array for x,y dimensions, each of shape (n_query,) - """ - # compute avg of xmin,xmax and ymin,ymax - return bb[:,:,[1,3]].mean(axis=2).squeeze(), bb[:,:,[0,2]].mean(axis=2).squeeze() - - - class TransformerDecoder(nn.Module): """Transformer Decoder Block composed of Transformer Decoder Layers.""" @@ -587,8 +496,9 @@ def forward( self, decoder_queries: torch.Tensor, encoder_features: torch.Tensor, - ref_pos_emb: torch.Tensor | None = None, - query_pos_emb: torch.Tensor | None = None, + embedding_map: Dict[str, Embedding], + boxes: torch.Tensor, times: torch.Tensor, + embedding_agg_method: str ) -> torch.Tensor: """Execute a forward pass of the decoder block. @@ -596,22 +506,28 @@ def forward( decoder_queries: Query sequence for decoder to generate (n_query, batch_size, embed_dim). encoder_features: Output from encoder, that decoder uses to attend to relevant parts of input sequence (total_instances, batch_size, embed_dim) - ref_pos_emb: The input positional embedding tensor of shape (total_instances, batch_size, embed_dim). - query_pos_emb: The query positional embedding of shape (n_query, batch_size, embed_dim) + Returns: The output tensor of shape (L, n_query, batch_size, embed_dim). """ decoder_features = decoder_queries - intermediate = [] + # since the encoder output doesn't change for any number of decoder layer inputs, + # we can process its embedding outside the loop + if embedding_agg_method == "average": + encoder_features = apply_embeddings(encoder_features, embedding_map, + boxes, times, embedding_agg_method) + # TODO: ^ should embeddings really be applied to encoder output again before cross attention? + # switched off for stack and concatenate methods as those further split the tokens. Kept for "average" + # for backward compatibility + for layer in self.layers: + decoder_features = apply_embeddings(decoder_features, embedding_map, + boxes, times, embedding_agg_method) decoder_features = layer( - decoder_features, - encoder_features, - ref_pos_emb=ref_pos_emb, - query_pos_emb=query_pos_emb, + decoder_features, encoder_features ) if self.return_intermediate: intermediate.append(self.norm(decoder_features)) @@ -626,6 +542,40 @@ def forward( return decoder_features.unsqueeze(0) +def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], + boxes: torch.Tensor, times: torch.Tensor, + embedding_agg_method: str): + """ + Enter docstring here + """ + + pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] + # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings + queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) + # calculate temporal embeddings and transform queries + queries_t, ref_temp_emb = temp_emb(queries, times) + # if avg. of temp and pos, need bounding boxes; bb only used for method "average" + if embedding_agg_method == "average": + _, ref_pos_emb = pos_emb(queries, boxes) + ref_emb = (ref_pos_emb + ref_temp_emb) / 2 + queries = queries + ref_emb + else: + # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) + ref_x, ref_y = spatial_emb_from_bb(boxes) + # forward pass of Embedding object transforms input queries with embeddings + queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) + queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) + + # concatenate or stack the queries (avg. method done above since it applies differently) + queries = collate_queries( + (queries_t, queries_x, queries_y), + embedding_agg_method) + # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) + queries = queries.permute(1, 0, 2) + + return queries + + def _get_clones(module: nn.Module, N: int) -> nn.ModuleList: """Generate repeated clones of same layer type. @@ -656,3 +606,48 @@ def _get_activation_fn(activation: str) -> callable: return F.glu raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") + +def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str + ) -> torch.Tensor: + """ + + Args: + _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t + each of shape (batch_size, n_query, embed_dim) + embedding_agg_method: String representing the aggregation method for embeddings + + Returns: Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), + stacked (increased number of tokens), or averaged (original token number and length) + """ + + queries_t, queries_x, queries_y = queries + + mlp = MLP(input_dim=queries_t.shape[-1]*3, hidden_dim=queries_t.shape[-1]*2, + output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) + + if embedding_agg_method == "stack": + # TODO: try changing order of stacking so that order is by query token (x1,y1,t1),(x2,y2,t2) rather than + # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) + # stacked is of shape (batch_size, 3*n_query, embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) + elif embedding_agg_method == "concatenate": + # concatenated is of shape (batch_size, n_query, 3*embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) + # pass through MLP to project into space of (batch_size, n_query, embed_dim) + collated_queries = mlp(collated_queries) + + return collated_queries + + +def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes + Args: + bb: Bounding boxes of shape (n_query, batch_size, 4) from which to compute x,y centroids; + each bounding box is [ymin, xmin, ymax, xmax] + + Returns: + A tuple of tensors containing the emebdding array for x,y dimensions, each of shape (n_query,) + """ + # compute avg of xmin,xmax and ymin,ymax + return bb[:,:,[1,3]].mean(axis=2).squeeze(), bb[:,:,[0,2]].mean(axis=2).squeeze() \ No newline at end of file diff --git a/run_trainer.py b/run_trainer.py index c538cc3..684a727 100644 --- a/run_trainer.py +++ b/run_trainer.py @@ -2,7 +2,8 @@ from omegaconf import OmegaConf import os -os.chdir("./dreem/training") +os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") + base_config = "./configs/base.yaml" # params_config = "./configs/override.yaml" diff --git a/tests/test_models.py b/tests/test_models.py index 3eaf9c2..187ea21 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -166,7 +166,8 @@ def test_embedding_validity(): _ = Embedding(emb_type="pos", mode="learned", features=128) - +# TODO: create test_embedding_rope and test the xshaped vs xout in the apply_rope function; +# how did the shapes match if i was using x vs xshaped? def test_embedding_basic(): """Test embedding logic.""" From 65a4ae0d4b3fccb45b5294f5c495b54bab9edc2f Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Thu, 8 Aug 2024 20:01:04 -0700 Subject: [PATCH 51/63] final attn head supports stack embeddings - 1x1 conv for stack embedding - stack into 3 channels for x,y,t --- dreem/models/attention_head.py | 34 +++++++++++++++++++++++++--------- dreem/models/transformer.py | 4 ++-- run_trainer.py | 4 +++- 3 files changed, 30 insertions(+), 12 deletions(-) diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 3dde1f5..559292c 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -27,12 +27,15 @@ def __init__( super().__init__() self.embedding_agg_method = embedding_agg_method - # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels + # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels + # ensures output represents ref instances by query instances if self.embedding_agg_method == "stack": - self.conv_1x1 = torch.nn.Conv2d(in_channels=3,out_channels=1, - kernel_size=1,stride=1,padding=0) - self.q_proj = self.conv_1x1 - self.k_proj = self.conv_1x1 + self.q_proj = torch.nn.Conv1d(in_channels=3, out_channels=1, + kernel_size=1, stride=1, padding=0 + ) + self.k_proj = torch.nn.Conv1d(in_channels=3, out_channels=1, + kernel_size=1, stride=1, padding=0 + ) else: self.q_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) self.k_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) @@ -51,13 +54,26 @@ def forward( Returns: Output tensor of shape (batch_size, num_frame_instances, num_window_instances). """ + batch_size, num_query_instances, feature_dim = query.size() + num_window_instances = key.shape[1] + # if stacked embeddings, create channels for each x,y,t embedding dimension + # maps shape (1,192,1024) -> (1,64,3,1024) if self.embedding_agg_method == "stack": - key = - query = + key = key.view( + batch_size, 3, num_window_instances//3, feature_dim + ).permute(0, 2, 1, 3).squeeze(0) + query = query.view( + batch_size, 3, num_query_instances//3, feature_dim + ).permute(0, 2, 1, 3).squeeze(0) + # key, query of shape (batch_size, num_instances, 3, feature_dim) + k = self.k_proj(key).transpose(1, 0) + q = self.q_proj(query).transpose(1, 0) + # k,q of shape (batch_size, num_instances, feature_dim) + else: + k = self.k_proj(key) + q = self.q_proj(query) - k = self.k_proj(key) - q = self.q_proj(query) attn_weights = torch.bmm(q, k.transpose(1, 2)) return attn_weights # (B, N_t, N) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 33c904b..adfb371 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -131,6 +131,7 @@ def __init__( feature_dim=feature_dim_attn_head, num_layers=num_layers_attn_head, dropout=dropout_attn_head, + embedding_agg_method=self.embedding_meta["embedding_agg_method"] ) self._reset_parameters() @@ -242,8 +243,7 @@ def forward( asso_output = [] for frame_features in decoder_features: - # TODO: attn_head handles the 3x queries that can come out of the encoder/decoder if using stacked embeddings; - # does this by altering the MLP dimensions prior to attention outer product + # attn_head handles the 3x queries that can come out of the encoder/decoder if using stacked embeddings # n_query should be the number of instances in the last frame if running inference, # or number of ref instances for training. total_instances is always the number of reference instances asso_matrix = self.attn_head(frame_features, encoder_features).view( diff --git a/run_trainer.py b/run_trainer.py index 684a727..fcf38ff 100644 --- a/run_trainer.py +++ b/run_trainer.py @@ -2,7 +2,9 @@ from omegaconf import OmegaConf import os -os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") +# /Users/mustafashaikh/dreem/dreem/training +# /Users/main/Documents/GitHub/dreem/dreem/training +os.chdir("/Users/mustafashaikh/dreem/dreem/training") base_config = "./configs/base.yaml" # params_config = "./configs/override.yaml" From 7c38ad461f856744e4a2c25208887d26cebcf438 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Fri, 9 Aug 2024 18:29:13 -0700 Subject: [PATCH 52/63] Update tests, add new unit tests for rope - add unit tests for rope - Update existing tests to use new args/return params related to tfmr - Modify test to remove return_embedding=True support - need to address this --- dreem/models/attention_head.py | 2 +- dreem/models/embedding.py | 70 +++++------ dreem/models/transformer.py | 44 +++---- tests/test_models.py | 204 ++++++++++++++++++++++----------- 4 files changed, 201 insertions(+), 119 deletions(-) diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 559292c..537cee1 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -14,7 +14,7 @@ def __init__( feature_dim: int, num_layers: int, dropout: float, - embedding_agg_method: str + embedding_agg_method: str = "average" ): """Initialize an instance of ATTWeightHead. diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index a21d5ab..74d99b0 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -72,7 +72,7 @@ def build_rope_cache(self, max_seq_len: int = 4096) -> None: cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1) self.register_buffer("cache", cache, persistent=False) - def forward(self, x: Tensor, *, input_pos: Optional[Tensor] = None) -> Tensor: + def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: """ Args: x (Tensor): input tensor with shape @@ -131,6 +131,7 @@ class Embedding(torch.nn.Module): EMB_MODES = { "fixed": {"temperature", "scale", "normalize"}, "learned": {"emb_num"}, + "rope": {"embedding_agg_method"}, "off": {}, } # dict of valid args:keyword params @@ -166,8 +167,6 @@ def __init__( Example: {"hidden_dims": 256, "num_layers":3, "dropout": 0.3} """ - self._check_init_args(emb_type, mode) - super().__init__() self.emb_type = emb_type @@ -181,6 +180,8 @@ def __init__( self.scale = scale self.n_points = n_points + self._check_init_args(emb_type, mode) + if self.normalize and self.scale is None: self.scale = 2 * math.pi @@ -201,8 +202,8 @@ def __init__( else: self.mlp = torch.nn.Identity() - self._emb_func = lambda tensor: torch.zeros( - (tensor.shape[0], self.features), dtype=tensor.dtype, device=tensor.device + self._emb_func = lambda seq, x: torch.zeros( + (seq.shape[0], self.features), dtype=seq.dtype, device=seq.device ) # turn off embedding by returning zeros self.lookup = None @@ -254,10 +255,15 @@ def _check_init_args(self, emb_type: str, mode: str): f"Embedding `mode` must be one of {self.EMB_MODES} not {mode}" ) + if mode.lower() == "rope" and self.embedding_agg_method == "average": + raise ValueError( + f"Cannot use aggregation method 'average' for rope embedding; must use 'stack' or 'concatenate'" + ) + def _transform(self, x, emb): - if emb==self._rope_embedding: + if self._emb_func == self._rope_embedding: return self._apply_rope(x, emb) else: return self._apply_additive_embeddings(x, emb) @@ -274,6 +280,7 @@ def _apply_rope(self, x, emb): Returns: Tensor of input queries transformed by RoPE """ + xout = torch.unsqueeze(x, 2) # input needs shape [batch_size, n_query, num_heads, embed_dim // 2, 2] xout = xout.float().reshape(*xout.shape[:-1], -1, 2) @@ -288,9 +295,9 @@ def _apply_rope(self, x, emb): -1, ) # output has shape [batch_size, n_query, num_heads, embed_dim] - x_out = x_out.flatten(3) - - return x_out + xout = xout.flatten(3).squeeze(2) + + return xout def _apply_additive_embeddings(self, x, emb): @@ -320,22 +327,15 @@ def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: - Tensor: input queries transformed by embedding - An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. """ + # create embedding array; either rotation matrix of shape # (batch_size, n_query, num_heads, embed_dim // 2, 2), # or (N, embed_dim) array - emb = self._emb_func(seq_positions) - + emb = self._emb_func(seq_positions, x.size()) # transform the input data with the embedding - x = self._transform(x, emb) + xout = self._transform(x, emb) - # if emb.shape[-1] != self.features: - # raise RuntimeError( - # ( - # f"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \n" - # f"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions." - # ) - # ) - return x, emb + return xout, emb def _torch_int_div( self, tensor1: torch.Tensor, tensor2: torch.Tensor @@ -352,25 +352,29 @@ def _torch_int_div( return torch.div(tensor1, tensor2, rounding_mode="floor") - def _rope_embedding(self, x: torch.Tensor) -> torch.Tensor: + def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) -> torch.Tensor: """ Computes the rotation matrix to apply RoPE to input queries Args: - x: Input queries of shape (num_batches, n_queries, embed_dim) + seq_positions: Pos array of shape (embed_dim,) used to compute rotational embedding + input_shape: Shape of the input queries; needed for rope Returns: Tensor: (N, embed_dim) rotation matrix """ - # input must be of shape (num_batches, num_instances, num_attn_heads, embed_dim) + # create dummy input of shape (num_batches, num_instances, num_attn_heads, embed_dim) # use num_heads=1 for compatibility with torch ROPE - x_rope = torch.unsqueeze(x, 2) + x_rope = torch.rand(input_shape).unsqueeze(2) # RoPE module takes in dimension, num_queries as input to calculate rotation matrix - rope = RotaryPositionalEmbeddings(self.features, x.shape[1]) - rot_mat = rope(x_rope) - + rope = RotaryPositionalEmbeddings(self.features, input_shape[1]) + # convert seq_positions (indicates relative position in frame) to int + # to index into the theta array for rope + seq_pos = 100*seq_positions.unsqueeze(0) + rot_mat = rope(x_rope, seq_pos.int()) + return rot_mat - def _sine_pos_embedding(self, centroids: torch.Tensor) -> torch.Tensor: + def _sine_pos_embedding(self, centroids: torch.Tensor, *args) -> torch.Tensor: """Compute fixed sine temporal embeddings per dimension (x,y) Args: @@ -400,7 +404,7 @@ def _sine_pos_embedding(self, centroids: torch.Tensor) -> torch.Tensor: return temp_lookup # .view(len(times), self.features) - def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor: + def _sine_box_embedding(self, boxes: torch.Tensor, *args) -> torch.Tensor: """Compute sine positional embeddings for boxes using given parameters. Args: @@ -445,7 +449,7 @@ def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor: return pos_emb - def _sine_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: + def _sine_temp_embedding(self, times: torch.Tensor, *args) -> torch.Tensor: """Compute fixed sine temporal embeddings. Args: @@ -477,7 +481,7 @@ def _sine_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: temp_emb = temp_lookup[times.int()] return temp_emb # .view(len(times), self.features) - def _learned_pos_embedding(self, boxes: torch.Tensor) -> torch.Tensor: + def _learned_pos_embedding(self, boxes: torch.Tensor, *args) -> torch.Tensor: """Compute learned positional embeddings for boxes using given parameters. Args: @@ -537,7 +541,7 @@ def _learned_pos_embedding(self, boxes: torch.Tensor) -> torch.Tensor: return pos_emb.view(N, self.features) - def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: + def _learned_temp_embedding(self, times: torch.Tensor, *args) -> torch.Tensor: """Compute learned temporal embeddings for times using given parameters. Args: @@ -566,7 +570,7 @@ def _learned_temp_embedding(self, times: torch.Tensor) -> torch.Tensor: return temp_emb.view(N, self.features) - def _compute_weights(self, data: torch.Tensor) -> tuple[torch.Tensor, ...]: + def _compute_weights(self, data: torch.Tensor, *args) -> tuple[torch.Tensor, ...]: """Compute left and right learned embedding weights. Args: diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index adfb371..fb8f424 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -79,6 +79,7 @@ def __init__( self.pos_emb = Embedding(emb_type="off", mode="off", features=self.d_model) self.temp_emb = Embedding(emb_type="off", mode="off", features=self.d_model) + self.embedding_agg_method = "average" # default arg in case it's not passed into configs if self.embedding_meta: if "pos" in self.embedding_meta: @@ -86,7 +87,7 @@ def __init__( if pos_emb_cfg: self.pos_emb = Embedding( emb_type="pos", features=self.d_model, - embedding_agg_method=self.embedding_meta["embedding_agg_method"], + embedding_agg_method=self.embedding_agg_method, **pos_emb_cfg ) # agg method must be the same for pos and temp embeddings if "temp" in self.embedding_meta: @@ -94,9 +95,11 @@ def __init__( if temp_emb_cfg: self.temp_emb = Embedding( emb_type="temp", features=self.d_model, - embedding_agg_method=self.embedding_meta["embedding_agg_method"], + embedding_agg_method=self.embedding_agg_method, **temp_emb_cfg ) + self.embedding_agg_method = embedding_meta["embedding_agg_method"] \ + if "embedding_agg_method" in embedding_meta else "average" # Transformer Encoder encoder_layer = TransformerEncoderLayer( @@ -131,7 +134,7 @@ def __init__( feature_dim=feature_dim_attn_head, num_layers=num_layers_attn_head, dropout=dropout_attn_head, - embedding_agg_method=self.embedding_meta["embedding_agg_method"] + embedding_agg_method=self.embedding_agg_method ) self._reset_parameters() @@ -187,11 +190,10 @@ def forward( encoder_features = self.encoder( encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, boxes=ref_boxes, times=ref_times, - embedding_agg_method=self.embedding_meta["embedding_agg_method"] + embedding_agg_method=self.embedding_agg_method ) # (total_instances, batch_size, embed_dim) or # (3*total_instances,batch_size,embed_dim) if using stacked embeddings - # TODO: check if instance.add_embedding() supports rotation matrices # TODO: include support for adding x,y,t embeddings to the instance # if self.return_embedding: # for i, instance in enumerate(ref_instances): @@ -221,19 +223,19 @@ def forward( query_boxes = ref_boxes query_times = ref_times - # TODO: include support for x,y,t embeddings and uncomment this - # if self.return_embedding: - # for i, instance in enumerate(query_instances): - # instance.add_embedding("pos", query_pos_emb[i]) - # instance.add_embedding("temp", query_temp_emb[i]) decoder_features = self.decoder( query_features, encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, boxes=query_boxes, times=query_times, - embedding_agg_method=self.embedding_meta["embedding_agg_method"] + embedding_agg_method=self.embedding_agg_method ) # (L, n_query, batch_size, embed_dim) + # TODO: include support for x,y,t embeddings and uncomment this + # if self.return_embedding: + # for i, instance in enumerate(query_instances): + # instance.add_embedding("pos", query_pos_emb[i]) + # instance.add_embedding("temp", query_temp_emb[i]) decoder_features = decoder_features.transpose( 1, 2 @@ -558,7 +560,8 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], if embedding_agg_method == "average": _, ref_pos_emb = pos_emb(queries, boxes) ref_emb = (ref_pos_emb + ref_temp_emb) / 2 - queries = queries + ref_emb + queries_avg = queries + ref_emb + queries_t = queries_x = queries_y = None else: # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) ref_x, ref_y = spatial_emb_from_bb(boxes) @@ -568,7 +571,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( - (queries_t, queries_x, queries_y), + (queries_avg, queries_t, queries_x, queries_y), embedding_agg_method) # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) @@ -612,7 +615,7 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str """ Args: - _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t + _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t each of shape (batch_size, n_query, embed_dim) embedding_agg_method: String representing the aggregation method for embeddings @@ -620,17 +623,18 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str stacked (increased number of tokens), or averaged (original token number and length) """ - queries_t, queries_x, queries_y = queries - - mlp = MLP(input_dim=queries_t.shape[-1]*3, hidden_dim=queries_t.shape[-1]*2, - output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) + queries_avg, queries_t, queries_x, queries_y = queries - if embedding_agg_method == "stack": + if embedding_agg_method == "average": + collated_queries = queries_avg + elif embedding_agg_method == "stack": # TODO: try changing order of stacking so that order is by query token (x1,y1,t1),(x2,y2,t2) rather than # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) # stacked is of shape (batch_size, 3*n_query, embed_dim) collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) elif embedding_agg_method == "concatenate": + mlp = MLP(input_dim=queries_t.shape[-1] * 3, hidden_dim=queries_t.shape[-1] * 2, + output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) # concatenated is of shape (batch_size, n_query, 3*embed_dim) collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) # pass through MLP to project into space of (batch_size, n_query, embed_dim) @@ -643,7 +647,7 @@ def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes Args: - bb: Bounding boxes of shape (n_query, batch_size, 4) from which to compute x,y centroids; + bb: Bounding boxes of shape (n_query, n_anchors, 4) from which to compute x,y centroids; each bounding box is [ymin, xmin, ymax, xmax] Returns: diff --git a/tests/test_models.py b/tests/test_models.py index 187ea21..51d3a72 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -14,6 +14,8 @@ from dreem.models.transformer import ( TransformerEncoderLayer, TransformerDecoderLayer, + spatial_emb_from_bb, + apply_embeddings ) @@ -33,7 +35,7 @@ def test_att_weight_head(): """Test self-attention head logic.""" b, n, f = 1, 10, 1024 # batch size, num instances, features - att_weight_head = ATTWeightHead(feature_dim=f, num_layers=2, dropout=0.1) + att_weight_head = ATTWeightHead(feature_dim=f, num_layers=2, dropout=0.1, embedding_agg_method="average") q = k = torch.rand(size=(b, n, f)) @@ -161,10 +163,77 @@ def test_embedding_validity(): with pytest.raises(Exception): _ = Embedding(emb_type="temporal", mode="learn", features=128) + with pytest.raises(Exception): + # embedding_agg_method cannot be average for rope + _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="average") + _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="stacked") + + _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="stack") + _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="concatenate") + + _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="average") + _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="stack") + _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="concatenate") + + _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="average") + _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="stack") + _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="concatenate") + _ = Embedding(emb_type="temp", mode="learned", features=128) _ = Embedding(emb_type="pos", mode="learned", features=128) - _ = Embedding(emb_type="pos", mode="learned", features=128) + +def test_rope_embedding(): + "Test RoPE embedding" + frames = 32 + objects = 10 + d_model = 256 + n_anchors = 1 + + N = frames * objects + + boxes = torch.rand(size=(N, n_anchors, 4)) + times = torch.rand(size=(N,)) + # input data of shape (batch_size, N, num_heads, embed_dim) + x = torch.rand(size=(1, N, d_model)) + + pos_emb = Embedding( + emb_type="pos", + mode="rope", + features=d_model, + embedding_agg_method="stack" + ) + temp_emb = Embedding( + emb_type="temp", + mode="rope", + features=d_model, + embedding_agg_method="stack" + ) + + ref_x, ref_y = spatial_emb_from_bb(boxes) + x_rope, rot_mat_x = pos_emb(x, ref_x) + y_rope, rot_mat_y = pos_emb(x, ref_y) + t_rope, ref_temp_emb = temp_emb(x, times) + + assert x_rope.size() == (1, N, d_model) + assert y_rope.size() == (1, N, d_model) + assert t_rope.size() == (1, N, d_model) + + assert not torch.equal(x, x_rope) + assert not torch.equal(x, y_rope) + assert not torch.equal(x, t_rope) + + assert not torch.equal(x_rope, y_rope) + assert not torch.equal(x_rope, t_rope) + assert not torch.equal(y_rope, t_rope) + + assert ref_x.size() == ref_y.size() + assert x_rope.size() == x.size() + assert y_rope.size() == x.size() + +def test_embedding_aggregation(): + """Test stack, concatenate agg methods""" + # TODO: create test_embedding_rope and test the xshaped vs xout in the apply_rope function; # how did the shapes match if i was using x vs xshaped? @@ -180,6 +249,8 @@ def test_embedding_basic(): boxes = torch.rand(size=(N, n_anchors, 4)) times = torch.rand(size=(N,)) + # input data of shape (batch_size, N, embed_dim) + x = torch.rand(size=(1, N, d_model)) pos_emb = Embedding( emb_type="pos", @@ -190,31 +261,31 @@ def test_embedding_basic(): scale=10, ) - sine_pos_emb = pos_emb(boxes) + _, sine_pos_emb = pos_emb(x, boxes) pos_emb = Embedding(emb_type="pos", mode="learned", features=d_model, emb_num=100) - learned_pos_emb = pos_emb(boxes) + _, learned_pos_emb = pos_emb(x, boxes) temp_emb = Embedding(emb_type="temp", mode="learned", features=d_model, emb_num=16) - learned_temp_emb = temp_emb(times) + _, learned_temp_emb = temp_emb(x, times) pos_emb_off = Embedding(emb_type="pos", mode="off", features=d_model) - off_pos_emb = pos_emb_off(boxes) + _, off_pos_emb = pos_emb_off(x, boxes) temp_emb_off = Embedding(emb_type="temp", mode="off", features=d_model) - off_temp_emb = temp_emb_off(times) + _, off_temp_emb = temp_emb_off(x, times) learned_emb_off = Embedding(emb_type="off", mode="learned", features=d_model) - off_learned_emb_boxes = learned_emb_off(boxes) - off_learned_emb_times = learned_emb_off(times) + _, off_learned_emb_boxes = learned_emb_off(x, boxes) + _, off_learned_emb_times = learned_emb_off(x, times) fixed_emb_off = Embedding(emb_type="off", mode="fixed", features=d_model) - off_fixed_emb_boxes = fixed_emb_off(boxes) - off_fixed_emb_times = fixed_emb_off(times) + _, off_fixed_emb_boxes = fixed_emb_off(x, boxes) + _, off_fixed_emb_times = fixed_emb_off(x, times) off_emb = Embedding(emb_type="off", mode="off", features=d_model) - off_emb_boxes = off_emb(boxes) - off_emb_times = off_emb(times) + _, off_emb_boxes = off_emb(x, boxes) + _, off_emb_times = off_emb(x, times) assert sine_pos_emb.size() == (N, d_model) assert learned_pos_emb.size() == (N, d_model) @@ -248,12 +319,14 @@ def test_embedding_kwargs(): frames = 32 objects = 10 + d_model = 128 N = frames * objects n_anchors = 1 boxes = torch.rand(N, n_anchors, 4) - + # input data of shape (batch_size, N, embed_dim) + x = torch.rand(size=(1, N, d_model)) # sine embedding sine_args = { @@ -261,32 +334,32 @@ def test_embedding_kwargs(): "scale": frames, "normalize": True, } - sine_no_args = Embedding("pos", "fixed", 128) - sine_with_args = Embedding("pos", "fixed", 128, **sine_args) + sine_no_args = Embedding("pos", "fixed", d_model) + sine_with_args = Embedding("pos", "fixed", d_model, **sine_args) assert sine_no_args.temperature != sine_with_args.temperature - sine_no_args = sine_no_args(boxes) - sine_with_args = sine_with_args(boxes) + _, sine_no_args = sine_no_args(x, boxes) + _, sine_with_args = sine_with_args(x, boxes) assert not torch.equal(sine_no_args, sine_with_args) # learned pos embedding - lp_no_args = Embedding("pos", "learned", 128) + lp_no_args = Embedding("pos", "learned", d_model) lp_args = {"emb_num": 100, "over_boxes": False} - lp_with_args = Embedding("pos", "learned", 128, **lp_args) + lp_with_args = Embedding("pos", "learned", d_model, **lp_args) assert lp_no_args.lookup.weight.shape != lp_with_args.lookup.weight.shape # learned temp embedding - lt_no_args = Embedding("temp", "learned", 128) + lt_no_args = Embedding("temp", "learned", d_model) lt_args = {"emb_num": 100} - lt_with_args = Embedding("temp", "learned", 128, **lt_args) + lt_with_args = Embedding("temp", "learned", d_model, **lt_args) assert lt_no_args.lookup.weight.shape != lt_with_args.lookup.weight.shape @@ -300,6 +373,8 @@ def test_multianchor_embedding(): N = frames * objects boxes = torch.rand(size=(N, n_anchors, 4)) + # input data of shape (batch_size, N, embed_dim) + x = torch.rand(size=(1, N, d_model)) fixed_emb = Embedding( "pos", @@ -318,18 +393,18 @@ def test_multianchor_embedding(): assert not isinstance(fixed_emb.mlp, torch.nn.Identity) assert not isinstance(learned_emb.mlp, torch.nn.Identity) - emb = fixed_emb(boxes) + _, emb = fixed_emb(x, boxes) assert emb.size() == (N, features) - emb = learned_emb(boxes) + _, emb = learned_emb(x, boxes) assert emb.size() == (N, features) fixed_emb = Embedding("pos", "fixed", features=features) learned_emb = Embedding("pos", "learned", features=features) with pytest.raises(RuntimeError): - _ = fixed_emb(boxes) + _, _ = fixed_emb(x, boxes) with pytest.raises(RuntimeError): - _ = learned_emb(boxes) + _, _ = learned_emb(x, boxes) def test_transformer_encoder(): @@ -352,7 +427,7 @@ def test_transformer_encoder(): # with position pos_emb = torch.ones_like(queries) - encoder_features = transformer_encoder(queries, pos_emb=pos_emb) + encoder_features = transformer_encoder(queries) assert encoder_features.size() == encoder_features.size() @@ -384,9 +459,7 @@ def test_transformer_decoder(): decoder_features = transformer_decoder( decoder_queries, - encoder_features, - ref_pos_emb=pos_emb, - query_pos_emb=query_pos_emb, + encoder_features ) assert decoder_features.size() == decoder_queries.size() @@ -445,14 +518,15 @@ def test_transformer_embedding(): embedding_meta = { "pos": {"mode": "learned", "emb_num": 16, "normalize": True}, "temp": {"mode": "learned", "emb_num": 16, "normalize": True}, + "embedding_agg_method": "average" } - + # TODO: add support for return_embedding=True transformer = Transformer( d_model=feats, num_encoder_layers=1, num_decoder_layers=1, embedding_meta=embedding_meta, - return_embedding=True, + return_embedding=False, ) assert transformer.pos_emb.mode == "learned" @@ -462,22 +536,22 @@ def test_transformer_embedding(): assert asso_preds[0].matrix.size() == (num_detected * num_frames,) * 2 - pos_emb = torch.concat( - [instance.get_embedding("pos") for instance in instances], axis=0 - ) - temp_emb = torch.concat( - [instance.get_embedding("pos") for instance in instances], axis=0 - ) - - assert pos_emb.size() == ( - len(instances), - feats, - ), pos_emb.shape - - assert temp_emb.size() == ( - len(instances), - feats, - ), temp_emb.shape + # pos_emb = torch.concat( + # [instance.get_embedding("pos") for instance in instances], axis=0 + # ) + # temp_emb = torch.concat( + # [instance.get_embedding("pos") for instance in instances], axis=0 + # ) + # + # assert pos_emb.size() == ( + # len(instances), + # feats, + # ), pos_emb.shape + # + # assert temp_emb.size() == ( + # len(instances), + # feats, + # ), temp_emb.shape def test_tracking_transformer(): @@ -512,7 +586,7 @@ def test_tracking_transformer(): } encoder_cfg = {"model_name": "resnet18", "pretrained": False, "in_chans": 3} - + # TODO: add support for return_embedding=True and uncomment lines below tracking_transformer = GlobalTrackingTransformer( encoder_cfg=encoder_cfg, d_model=feats, @@ -526,19 +600,19 @@ def test_tracking_transformer(): assert asso_preds[0].matrix.size() == (num_detected * num_frames,) * 2 - pos_emb = torch.concat( - [instance.get_embedding("pos") for instance in instances], axis=0 - ) - temp_emb = torch.concat( - [instance.get_embedding("pos") for instance in instances], axis=0 - ) - - assert pos_emb.size() == ( - len(instances), - feats, - ), pos_emb.shape - - assert temp_emb.size() == ( - len(instances), - feats, - ), temp_emb.shape + # pos_emb = torch.concat( + # [instance.get_embedding("pos") for instance in instances], axis=0 + # ) + # temp_emb = torch.concat( + # [instance.get_embedding("pos") for instance in instances], axis=0 + # ) + # + # assert pos_emb.size() == ( + # len(instances), + # feats, + # ), pos_emb.shape + # + # assert temp_emb.size() == ( + # len(instances), + # feats, + # ), temp_emb.shape From 8b552ef1f56f2b3f09a714175e5ec396b17cf999 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 12 Aug 2024 15:07:58 -0700 Subject: [PATCH 53/63] rope bug fixes - create rope isntance once rather than each fwd pass - construct embedding lookup array each fwd pass based on num instances passed in to embedding - scale only pos embs * 100 rather than also temp embs --- dreem/models/embedding.py | 30 +++++++++++++++++++----------- dreem/models/transformer.py | 8 ++++---- tests/test_models.py | 3 +-- 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 74d99b0..17682b5 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -35,13 +35,13 @@ class RotaryPositionalEmbeddings(nn.Module): def __init__( self, dim: int, - max_seq_len: int = 4096, + # max_seq_len: int, base: int = 10000, ) -> None: super().__init__() self.dim = dim self.base = base - self.max_seq_len = max_seq_len + # self.max_seq_len = max_seq_len self._rope_init() # We need to explicitly define reset_parameters for FSDP initialization, see @@ -55,10 +55,10 @@ def _rope_init(self): ** (torch.arange(0, self.dim, 2)[: (self.dim // 2)].float() / self.dim) ) self.register_buffer("theta", theta, persistent=False) - self.build_rope_cache(self.max_seq_len) - def build_rope_cache(self, max_seq_len: int = 4096) -> None: + def build_rope_cache(self, max_seq_len: int) -> None: # Create position indexes `[0, 1, ..., max_seq_len - 1]` + seq_idx = torch.arange( max_seq_len, dtype=self.theta.dtype, device=self.theta.device ) @@ -96,6 +96,12 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: # input tensor has shape [b, s, n_h, h_d] seq_len = x.size(1) + # create the lookup array based on how many instances there are + # max(101, seq_len) is for positional vs temporal; pos can only have idx up to + # 100 since it's a fraction of [0,1]*100. temp is from [0, clip_len]; since clip_len + # not available, we use # of instances from input x; this is always >= clip_len + self.build_rope_cache(max(101, seq_len)) # registers cache + self.cache = self.cache.to(input_pos.device) # extract the values based on whether input_pos is set or not rope_cache = ( self.cache[:seq_len] if input_pos is None else self.cache[input_pos] @@ -113,7 +119,6 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: return rope_cache - class Embedding(torch.nn.Module): @@ -222,7 +227,7 @@ def __init__( if self.emb_type == "pos": if self.embedding_agg_method == "average": self._emb_func = self._sine_box_embedding - else: + else: # if using stacked/concatenated agg method self._emb_func = self._sine_pos_embedding elif self.emb_type == "temp": self._emb_func = self._sine_temp_embedding @@ -230,6 +235,8 @@ def __init__( elif self.mode == "rope": # pos/temp embeddings processed the same way with different embedding array inputs self._emb_func = self._rope_embedding + # create instance so embedding lookup array is created only once + self.rope_instance = RotaryPositionalEmbeddings(self.features) def _check_init_args(self, emb_type: str, mode: str): @@ -364,12 +371,13 @@ def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) # create dummy input of shape (num_batches, num_instances, num_attn_heads, embed_dim) # use num_heads=1 for compatibility with torch ROPE x_rope = torch.rand(input_shape).unsqueeze(2) + # infer whether it is a positional or temporal embedding + is_pos_emb = 1 if seq_positions.max() <= 1 else 0 + # if it is positional, scale seq_positions since these are fractions + # in [0,1] and we need int indexes for embedding lookup + seq_positions = seq_positions*100 if is_pos_emb else seq_positions # RoPE module takes in dimension, num_queries as input to calculate rotation matrix - rope = RotaryPositionalEmbeddings(self.features, input_shape[1]) - # convert seq_positions (indicates relative position in frame) to int - # to index into the theta array for rope - seq_pos = 100*seq_positions.unsqueeze(0) - rot_mat = rope(x_rope, seq_pos.int()) + rot_mat = self.rope_instance(x_rope, seq_positions.unsqueeze(0).int()) return rot_mat diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index fb8f424..e88bfe0 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -79,9 +79,10 @@ def __init__( self.pos_emb = Embedding(emb_type="off", mode="off", features=self.d_model) self.temp_emb = Embedding(emb_type="off", mode="off", features=self.d_model) - self.embedding_agg_method = "average" # default arg in case it's not passed into configs if self.embedding_meta: + self.embedding_agg_method = embedding_meta["embedding_agg_method"] \ + if "embedding_agg_method" in embedding_meta else "average" if "pos" in self.embedding_meta: pos_emb_cfg = self.embedding_meta["pos"] if pos_emb_cfg: @@ -98,8 +99,6 @@ def __init__( embedding_agg_method=self.embedding_agg_method, **temp_emb_cfg ) - self.embedding_agg_method = embedding_meta["embedding_agg_method"] \ - if "embedding_agg_method" in embedding_meta else "average" # Transformer Encoder encoder_layer = TransformerEncoderLayer( @@ -568,6 +567,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], # forward pass of Embedding object transforms input queries with embeddings queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) + queries_avg = None # pass dummy var in to collate_queries # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( @@ -613,7 +613,7 @@ def _get_activation_fn(activation: str) -> callable: def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str ) -> torch.Tensor: """ - + Aggregates queries transformed by embeddings Args: _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t each of shape (batch_size, n_query, embed_dim) diff --git a/tests/test_models.py b/tests/test_models.py index 51d3a72..80d3277 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -235,8 +235,7 @@ def test_embedding_aggregation(): """Test stack, concatenate agg methods""" -# TODO: create test_embedding_rope and test the xshaped vs xout in the apply_rope function; -# how did the shapes match if i was using x vs xshaped? + def test_embedding_basic(): """Test embedding logic.""" From 8fdfba179e8901f6a0462ca71711a668856805e8 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 12 Aug 2024 15:52:36 -0700 Subject: [PATCH 54/63] minor update to previous commit --- dreem/models/transformer.py | 1 - tests/test_models.py | 9 +++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index e88bfe0..2235c04 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -628,7 +628,6 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str if embedding_agg_method == "average": collated_queries = queries_avg elif embedding_agg_method == "stack": - # TODO: try changing order of stacking so that order is by query token (x1,y1,t1),(x2,y2,t2) rather than # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) # stacked is of shape (batch_size, 3*n_query, embed_dim) collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) diff --git a/tests/test_models.py b/tests/test_models.py index 80d3277..8193ee6 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -231,10 +231,6 @@ def test_rope_embedding(): assert x_rope.size() == x.size() assert y_rope.size() == x.size() -def test_embedding_aggregation(): - """Test stack, concatenate agg methods""" - - def test_embedding_basic(): """Test embedding logic.""" @@ -470,8 +466,9 @@ def test_transformer_basic(): num_frames = 32 num_detected = 10 img_shape = (1, 100, 100) - - transformer = Transformer(d_model=feats, num_encoder_layers=1, num_decoder_layers=1) + embedding_meta = {"embedding_agg_method": "stack"} + transformer = Transformer(d_model=feats, num_encoder_layers=1, num_decoder_layers=1, + embedding_meta=embedding_meta) frames = [] From 03df33f1e55442aabdba312c02696831e615017c Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 14 Aug 2024 17:16:01 -0700 Subject: [PATCH 55/63] fix device mismatch in mlp module --- dreem/models/mlp.py | 1 + 1 file changed, 1 insertion(+) diff --git a/dreem/models/mlp.py b/dreem/models/mlp.py index 4f09551..a6c5ab3 100644 --- a/dreem/models/mlp.py +++ b/dreem/models/mlp.py @@ -56,6 +56,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: Output tensor of shape (batch_size, num_instances, output_dim). """ for i, layer in enumerate(self.layers): + layer.to(x.device) x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) if i < self.num_layers - 1 and self.dropout > 0.0: x = self.dropouts[i](x) From 1d2f5a54bf0481cf5f96344897eca4c42e937429 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 14 Aug 2024 22:26:06 -0700 Subject: [PATCH 56/63] support for adding embedding to instance --- dreem/io/instance.py | 4 ++- dreem/models/transformer.py | 52 ++++++++++++++++----------- tests/test_models.py | 70 ++++++++++++++++++------------------- 3 files changed, 70 insertions(+), 56 deletions(-) diff --git a/dreem/io/instance.py b/dreem/io/instance.py index 65be3c0..c3aa568 100644 --- a/dreem/io/instance.py +++ b/dreem/io/instance.py @@ -565,7 +565,9 @@ def add_embedding(self, emb_type: str, embedding: torch.Tensor) -> None: emb_type: Key/embedding type to be saved to dictionary embedding: The actual torch tensor embedding. """ - embedding = _expand_to_rank(embedding, 2) + if type(embedding) != dict: # for embedding agg method "average", input is array + # for method stack and concatenate, input is dict + embedding = _expand_to_rank(embedding, 2) self._embeddings[emb_type] = embedding @property diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 2235c04..421d70c 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -186,18 +186,22 @@ def forward( encoder_queries = ref_features # (encoder_features, ref_pos_emb, ref_temp_emb) \ - encoder_features = self.encoder( + encoder_features, pos_emb_traceback, temp_emb_traceback = self.encoder( encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, boxes=ref_boxes, times=ref_times, embedding_agg_method=self.embedding_agg_method ) # (total_instances, batch_size, embed_dim) or # (3*total_instances,batch_size,embed_dim) if using stacked embeddings - # TODO: include support for adding x,y,t embeddings to the instance - # if self.return_embedding: - # for i, instance in enumerate(ref_instances): - # instance.add_embedding("pos", ref_pos_emb[i]) - # instance.add_embedding("temp", ref_temp_emb[i]) + if self.return_embedding: + for i, instance in enumerate(ref_instances): + if self.embedding_agg_method == "average": + ref_pos_emb = pos_emb_traceback[0][i] # array + else: + ref_pos_emb = {"x": pos_emb_traceback[0][0][i], "y": pos_emb_traceback[1][0][i]} # dict + + instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict + instance.add_embedding("temp", temp_emb_traceback) # -------------- Begin decoder --------------- # @@ -223,18 +227,22 @@ def forward( query_times = ref_times - decoder_features = self.decoder( + decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( query_features, encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, boxes=query_boxes, times=query_times, embedding_agg_method=self.embedding_agg_method ) # (L, n_query, batch_size, embed_dim) - # TODO: include support for x,y,t embeddings and uncomment this - # if self.return_embedding: - # for i, instance in enumerate(query_instances): - # instance.add_embedding("pos", query_pos_emb[i]) - # instance.add_embedding("temp", query_temp_emb[i]) + if self.return_embedding: + for i, instance in enumerate(ref_instances): + if self.embedding_agg_method == "average": + ref_pos_emb = pos_emb_traceback[0][i] # array + else: + ref_pos_emb = {"x": pos_emb_traceback[0][0][i], "y": pos_emb_traceback[1][0][i]} # dict + + instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict + instance.add_embedding("temp", temp_emb_traceback) decoder_features = decoder_features.transpose( 1, 2 @@ -460,13 +468,15 @@ def forward( for layer in self.layers: # compute embeddings and apply to the input queries - queries = apply_embeddings(queries, embedding_map, boxes, times, embedding_agg_method) + queries, pos_emb_traceback, temp_emb_traceback = apply_embeddings( + queries, embedding_map, boxes, times, embedding_agg_method + ) # pass through EncoderLayer queries = layer(queries) encoder_features = self.norm(queries) - return encoder_features# , ref_pos_emb, ref_temp_emb + return encoder_features, pos_emb_traceback, temp_emb_traceback class TransformerDecoder(nn.Module): @@ -518,15 +528,16 @@ def forward( # since the encoder output doesn't change for any number of decoder layer inputs, # we can process its embedding outside the loop if embedding_agg_method == "average": - encoder_features = apply_embeddings(encoder_features, embedding_map, + encoder_features, *_ = apply_embeddings(encoder_features, embedding_map, boxes, times, embedding_agg_method) # TODO: ^ should embeddings really be applied to encoder output again before cross attention? # switched off for stack and concatenate methods as those further split the tokens. Kept for "average" # for backward compatibility for layer in self.layers: - decoder_features = apply_embeddings(decoder_features, embedding_map, - boxes, times, embedding_agg_method) + decoder_features, pos_emb_traceback, temp_emb_traceback = apply_embeddings( + decoder_features, embedding_map, boxes, times, embedding_agg_method + ) decoder_features = layer( decoder_features, encoder_features ) @@ -537,10 +548,9 @@ def forward( if self.return_intermediate: intermediate.pop() intermediate.append(decoder_features) - return torch.stack(intermediate) - return decoder_features.unsqueeze(0) + return decoder_features.unsqueeze(0), pos_emb_traceback, temp_emb_traceback def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], @@ -561,6 +571,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], ref_emb = (ref_pos_emb + ref_temp_emb) / 2 queries_avg = queries + ref_emb queries_t = queries_x = queries_y = None + pos_emb_traceback = (ref_pos_emb,) else: # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) ref_x, ref_y = spatial_emb_from_bb(boxes) @@ -568,6 +579,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) queries_avg = None # pass dummy var in to collate_queries + pos_emb_traceback = (ref_pos_emb_x, ref_pos_emb_y) # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( @@ -576,7 +588,7 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) - return queries + return queries, pos_emb_traceback, ref_temp_emb def _get_clones(module: nn.Module, N: int) -> nn.ModuleList: diff --git a/tests/test_models.py b/tests/test_models.py index 8193ee6..76ef074 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -516,13 +516,13 @@ def test_transformer_embedding(): "temp": {"mode": "learned", "emb_num": 16, "normalize": True}, "embedding_agg_method": "average" } - # TODO: add support for return_embedding=True + transformer = Transformer( d_model=feats, num_encoder_layers=1, num_decoder_layers=1, embedding_meta=embedding_meta, - return_embedding=False, + return_embedding=True, ) assert transformer.pos_emb.mode == "learned" @@ -532,22 +532,22 @@ def test_transformer_embedding(): assert asso_preds[0].matrix.size() == (num_detected * num_frames,) * 2 - # pos_emb = torch.concat( - # [instance.get_embedding("pos") for instance in instances], axis=0 - # ) - # temp_emb = torch.concat( - # [instance.get_embedding("pos") for instance in instances], axis=0 - # ) - # - # assert pos_emb.size() == ( - # len(instances), - # feats, - # ), pos_emb.shape - # - # assert temp_emb.size() == ( - # len(instances), - # feats, - # ), temp_emb.shape + pos_emb = torch.concat( + [instance.get_embedding("pos") for instance in instances], axis=0 + ) + temp_emb = torch.concat( + [instance.get_embedding("pos") for instance in instances], axis=0 + ) + + assert pos_emb.size() == ( + len(instances), + feats, + ), pos_emb.shape + + assert temp_emb.size() == ( + len(instances), + feats, + ), temp_emb.shape def test_tracking_transformer(): @@ -582,7 +582,7 @@ def test_tracking_transformer(): } encoder_cfg = {"model_name": "resnet18", "pretrained": False, "in_chans": 3} - # TODO: add support for return_embedding=True and uncomment lines below + tracking_transformer = GlobalTrackingTransformer( encoder_cfg=encoder_cfg, d_model=feats, @@ -596,19 +596,19 @@ def test_tracking_transformer(): assert asso_preds[0].matrix.size() == (num_detected * num_frames,) * 2 - # pos_emb = torch.concat( - # [instance.get_embedding("pos") for instance in instances], axis=0 - # ) - # temp_emb = torch.concat( - # [instance.get_embedding("pos") for instance in instances], axis=0 - # ) - # - # assert pos_emb.size() == ( - # len(instances), - # feats, - # ), pos_emb.shape - # - # assert temp_emb.size() == ( - # len(instances), - # feats, - # ), temp_emb.shape + pos_emb = torch.concat( + [instance.get_embedding("pos") for instance in instances], axis=0 + ) + temp_emb = torch.concat( + [instance.get_embedding("pos") for instance in instances], axis=0 + ) + + assert pos_emb.size() == ( + len(instances), + feats, + ), pos_emb.shape + + assert temp_emb.size() == ( + len(instances), + feats, + ), temp_emb.shape From 5a5f75ffed732482d115f51c8a5777f149808883 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Thu, 15 Aug 2024 17:14:51 -0700 Subject: [PATCH 57/63] bug fixes to pass unit tests - times array for embedding for encoder queries inside decoder was of query size rather than ref size --- dreem/models/attention_head.py | 7 +++-- dreem/models/embedding.py | 3 +- dreem/models/transformer.py | 55 +++++++++++++++++++++------------- tests/test_training.py | 6 ++-- 4 files changed, 44 insertions(+), 27 deletions(-) diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 537cee1..8ea04b2 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -14,7 +14,7 @@ def __init__( feature_dim: int, num_layers: int, dropout: float, - embedding_agg_method: str = "average" + **kwargs ): """Initialize an instance of ATTWeightHead. @@ -25,7 +25,10 @@ def __init__( embedding_agg_method: how the embeddings are aggregated; average/stack/concatenate """ super().__init__() - self.embedding_agg_method = embedding_agg_method + if 'embedding_agg_method' in kwargs: + self.embedding_agg_method = kwargs['embedding_agg_method'] + else: + self.embedding_agg_method = None # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels # ensures output represents ref instances by query instances diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 17682b5..134960e 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -318,7 +318,8 @@ def _apply_additive_embeddings(self, x, emb): Returns: Tensor: Input queries with embeddings added - shape (batch_size, N, embed_dim) """ - return x + emb.unsqueeze(0) + _emb = emb.unsqueeze(0) + return x + _emb def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 421d70c..f64d6b2 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -99,6 +99,9 @@ def __init__( embedding_agg_method=self.embedding_agg_method, **temp_emb_cfg ) + else: + self.embedding_meta = {} + self.embedding_agg_method = None # Transformer Encoder encoder_layer = TransformerEncoderLayer( @@ -133,7 +136,7 @@ def __init__( feature_dim=feature_dim_attn_head, num_layers=num_layers_attn_head, dropout=dropout_attn_head, - embedding_agg_method=self.embedding_agg_method + **self.embedding_meta ) self._reset_parameters() @@ -230,6 +233,7 @@ def forward( decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( query_features, encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + enc_boxes=ref_boxes, enc_times=ref_times, boxes=query_boxes, times=query_times, embedding_agg_method=self.embedding_agg_method ) # (L, n_query, batch_size, embed_dim) @@ -450,7 +454,7 @@ def __init__( def forward( self, queries: torch.Tensor, embedding_map: Dict[str, Embedding], boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str + embedding_agg_method: str = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Execute a forward pass of encoder layer. Computes and applies embeddings before input to EncoderLayer @@ -508,8 +512,9 @@ def forward( decoder_queries: torch.Tensor, encoder_features: torch.Tensor, embedding_map: Dict[str, Embedding], + enc_boxes: torch.Tensor, enc_times: torch.Tensor, boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str + embedding_agg_method: str = None ) -> torch.Tensor: """Execute a forward pass of the decoder block. @@ -529,7 +534,7 @@ def forward( # we can process its embedding outside the loop if embedding_agg_method == "average": encoder_features, *_ = apply_embeddings(encoder_features, embedding_map, - boxes, times, embedding_agg_method) + enc_boxes, enc_times, embedding_agg_method) # TODO: ^ should embeddings really be applied to encoder output again before cross attention? # switched off for stack and concatenate methods as those further split the tokens. Kept for "average" # for backward compatibility @@ -565,25 +570,31 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) # calculate temporal embeddings and transform queries queries_t, ref_temp_emb = temp_emb(queries, times) - # if avg. of temp and pos, need bounding boxes; bb only used for method "average" - if embedding_agg_method == "average": - _, ref_pos_emb = pos_emb(queries, boxes) - ref_emb = (ref_pos_emb + ref_temp_emb) / 2 - queries_avg = queries + ref_emb - queries_t = queries_x = queries_y = None - pos_emb_traceback = (ref_pos_emb,) + + if embedding_agg_method is None: + pos_emb_traceback = (torch.zeros_like(queries),) + queries_avg = queries_t = queries_x = queries_y = None else: - # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) - ref_x, ref_y = spatial_emb_from_bb(boxes) - # forward pass of Embedding object transforms input queries with embeddings - queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) - queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) - queries_avg = None # pass dummy var in to collate_queries - pos_emb_traceback = (ref_pos_emb_x, ref_pos_emb_y) + # if avg. of temp and pos, need bounding boxes; bb only used for method "average" + if embedding_agg_method == "average": + _, ref_pos_emb = pos_emb(queries, boxes) + ref_emb = (ref_pos_emb + ref_temp_emb) / 2 + queries_avg = queries + ref_emb + queries_t = queries_x = queries_y = None + pos_emb_traceback = (ref_pos_emb,) + else: + # calculate embedding array for x,y from bb centroids; ref_x, ref_y of shape (n_query,) + ref_x, ref_y = spatial_emb_from_bb(boxes) + # forward pass of Embedding object transforms input queries with embeddings + queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) + queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) + queries_avg = None # pass dummy var in to collate_queries + pos_emb_traceback = (ref_pos_emb_x, ref_pos_emb_y) + # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( - (queries_avg, queries_t, queries_x, queries_y), + (queries_avg, queries_t, queries_x, queries_y, queries), embedding_agg_method) # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) @@ -627,7 +638,7 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str """ Aggregates queries transformed by embeddings Args: - _queries: 4-tuple of queries (already transformed by embeddings) for _, x, y, t + _queries: 5-tuple of queries (already transformed by embeddings) for _, x, y, t, original input each of shape (batch_size, n_query, embed_dim) embedding_agg_method: String representing the aggregation method for embeddings @@ -635,7 +646,7 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str stacked (increased number of tokens), or averaged (original token number and length) """ - queries_avg, queries_t, queries_x, queries_y = queries + queries_avg, queries_t, queries_x, queries_y, orig_queries = queries if embedding_agg_method == "average": collated_queries = queries_avg @@ -650,6 +661,8 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) # pass through MLP to project into space of (batch_size, n_query, embed_dim) collated_queries = mlp(collated_queries) + else: + collated_queries = orig_queries return collated_queries diff --git a/tests/test_training.py b/tests/test_training.py index bd8bbe7..8c5206e 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -138,9 +138,9 @@ def test_config_gtr_runner(tmp_path, base_config, params_config, two_flies): "dataset.clip_length": 8, "trainer.min_epochs": 1, "checkpointing.dirpath": model_dir, - "logging.save_dir": logs_dir, + "logging.save_dir": logs_dir } cfg.set_hparams(hparams) - with torch.autograd.set_detect_anomaly(True): - run(cfg.cfg) + # with torch.autograd.set_detect_anomaly(True): + run(cfg.cfg) From 3ff1ab0fa999e1c7cf1068af688016d7bacfdc90 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Fri, 16 Aug 2024 12:24:04 -0700 Subject: [PATCH 58/63] minor updates from PR review --- dreem/models/embedding.py | 22 +- dreem/models/transformer.py | 23 +- rope.ipynb | 598 ----------------------- run_trainer.py => scripts/run_trainer.py | 2 +- 4 files changed, 30 insertions(+), 615 deletions(-) delete mode 100644 rope.ipynb rename run_trainer.py => scripts/run_trainer.py (86%) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 134960e..7ef9b0b 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -99,8 +99,9 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: # create the lookup array based on how many instances there are # max(101, seq_len) is for positional vs temporal; pos can only have idx up to # 100 since it's a fraction of [0,1]*100. temp is from [0, clip_len]; since clip_len - # not available, we use # of instances from input x; this is always >= clip_len - self.build_rope_cache(max(101, seq_len)) # registers cache + # not available, we use the last value in the indexing array since this will be the + # last possible frame that we would need to index since no instances in a frame after that + self.build_rope_cache(max(101, input_pos[:, -1].max() + 1)) # registers cache self.cache = self.cache.to(input_pos.device) # extract the values based on whether input_pos is set or not rope_cache = ( @@ -269,7 +270,13 @@ def _check_init_args(self, emb_type: str, mode: str): def _transform(self, x, emb): - + """Routes to the relevant embedding function to transform the input queries + + Args: + x: Input queries of shape (batch_size, N, embed_dim) + emb: Embedding array to apply to data; can be (N, embed_dim) or + (batch_size, n_query, num_heads, embed_dim // 2, 2) if using RoPE + """ if self._emb_func == self._rope_embedding: return self._apply_rope(x, emb) else: @@ -277,8 +284,7 @@ def _transform(self, x, emb): def _apply_rope(self, x, emb): - """ - Applies Rotary Positional Embedding to input queries + """Applies Rotary Positional Embedding to input queries Args: x: Input queries of shape (batch_size, n_query, embed_dim) @@ -308,8 +314,7 @@ def _apply_rope(self, x, emb): def _apply_additive_embeddings(self, x, emb): - """ - Applies additive embeddings to input queries + """Applies additive embeddings to input queries Args: x: Input tensor of shape (batch_size, N, embed_dim) @@ -361,8 +366,7 @@ def _torch_int_div( def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) -> torch.Tensor: - """ - Computes the rotation matrix to apply RoPE to input queries + """Computes the rotation matrix to apply RoPE to input queries Args: seq_positions: Pos array of shape (embed_dim,) used to compute rotational embedding input_shape: Shape of the input queries; needed for rope diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index f64d6b2..272d688 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -229,7 +229,6 @@ def forward( query_boxes = ref_boxes query_times = ref_times - decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( query_features, encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, @@ -553,7 +552,7 @@ def forward( if self.return_intermediate: intermediate.pop() intermediate.append(decoder_features) - return torch.stack(intermediate) + return torch.stack(intermediate), pos_emb_traceback, temp_emb_traceback return decoder_features.unsqueeze(0), pos_emb_traceback, temp_emb_traceback @@ -561,8 +560,16 @@ def forward( def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], boxes: torch.Tensor, times: torch.Tensor, embedding_agg_method: str): - """ - Enter docstring here + """ Applies embeddings to input queries for various aggregation methods. This function + is called from the transformer encoder and decoder + + Args: + queries: The input tensor of shape (n_query, batch_size, embed_dim). + embedding_map: Dict of Embedding objects defining the pos/temp embeddings to be applied + to the input data + boxes: Bounding box based embedding ids of shape (n_query, n_anchors, 4) + times: Times based embedding ids of shape (n_query,) + embedding_agg_method: method of aggregation of embeddings e.g. stack/concatenate/average """ pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] @@ -635,14 +642,15 @@ def _get_activation_fn(activation: str) -> callable: def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str ) -> torch.Tensor: - """ - Aggregates queries transformed by embeddings + """Aggregates queries transformed by embeddings + Args: _queries: 5-tuple of queries (already transformed by embeddings) for _, x, y, t, original input each of shape (batch_size, n_query, embed_dim) embedding_agg_method: String representing the aggregation method for embeddings - Returns: Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), + Returns: + Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), stacked (increased number of tokens), or averaged (original token number and length) """ @@ -670,6 +678,7 @@ def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Computes embedding arrays for x,y spatial dimensions using centroids from bounding boxes + Args: bb: Bounding boxes of shape (n_query, n_anchors, 4) from which to compute x,y centroids; each bounding box is [ymin, xmin, ymax, xmax] diff --git a/rope.ipynb b/rope.ipynb deleted file mode 100644 index 593439b..0000000 --- a/rope.ipynb +++ /dev/null @@ -1,598 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 156, - "id": "1bd666a7-0ad1-4ae7-a56e-43429a1228d8", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "import dreem\n", - "import os\n", - "import matplotlib.pyplot as plt\n", - "import math\n", - "import torch\n", - "import logging\n", - "from dreem.models.mlp import MLP\n", - "from dreem.models.model_utils import *\n", - "from dreem.datasets import SleapDataset\n", - "from dreem.models.transformer import *\n", - "from dreem.models import VisualEncoder\n", - "from dreem.models import GlobalTrackingTransformer\n", - "from dreem.models.gtr_runner import GTRRunner" - ] - }, - { - "cell_type": "code", - "execution_count": 130, - "id": "a8736593-71f7-4ab6-a594-eb52d2fd94ac", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "\"\"\"Module containing different position and temporal embeddings.\"\"\"\n", - "\n", - "logger = logging.getLogger(\"dreem.models\")\n", - "# todo: add named tensors, clean variable names\n", - "\n", - "\n", - "class Embedding(torch.nn.Module):\n", - " \"\"\"Class that wraps around different embedding types.\n", - "\n", - " Used for both learned and fixed embeddings.\n", - " \"\"\"\n", - "\n", - " EMB_TYPES = {\n", - " \"temp\": {},\n", - " \"pos\": {\"over_boxes\"},\n", - " \"off\": {},\n", - " None: {},\n", - " } # dict of valid args:keyword params\n", - " EMB_MODES = {\n", - " \"fixed\": {\"temperature\", \"scale\", \"normalize\"},\n", - " \"learned\": {\"emb_num\"},\n", - " \"off\": {},\n", - " } # dict of valid args:keyword params\n", - "\n", - " def __init__(\n", - " self,\n", - " emb_type: str,\n", - " mode: str,\n", - " features: int,\n", - " n_points: int = 1,\n", - " emb_num: int = 16,\n", - " over_boxes: bool = True,\n", - " temperature: int = 10000,\n", - " normalize: bool = False,\n", - " scale: float | None = None,\n", - " mlp_cfg: dict | None = None,\n", - " ):\n", - " \"\"\"Initialize embeddings.\n", - "\n", - " Args:\n", - " emb_type: The type of embedding to compute. Must be one of `{\"temp\", \"pos\", \"off\"}`\n", - " mode: The mode or function used to map positions to vector embeddings.\n", - " Must be one of `{\"fixed\", \"learned\", \"off\"}`\n", - " features: The embedding dimensions. Must match the dimension of the\n", - " input vectors for the transformer model.\n", - " n_points: the number of points that will be embedded.\n", - " emb_num: the number of embeddings in the `self.lookup` table (Only used in learned embeddings).\n", - " over_boxes: Whether to compute the position embedding for each bbox coordinate (y1x1y2x2) or the centroid + bbox size (yxwh).\n", - " temperature: the temperature constant to be used when computing the sinusoidal position embedding\n", - " normalize: whether or not to normalize the positions (Only used in fixed embeddings).\n", - " scale: factor by which to scale the positions after normalizing (Only used in fixed embeddings).\n", - " mlp_cfg: A dictionary of mlp hyperparameters for projecting embedding to correct space.\n", - " Example: {\"hidden_dims\": 256, \"num_layers\":3, \"dropout\": 0.3}\n", - " \"\"\"\n", - " self._check_init_args(emb_type, mode)\n", - "\n", - " super().__init__()\n", - "\n", - " self.emb_type = emb_type\n", - " self.mode = mode\n", - " self.features = features\n", - " self.emb_num = emb_num\n", - " self.over_boxes = over_boxes\n", - " self.temperature = temperature\n", - " self.normalize = normalize\n", - " self.scale = scale\n", - " self.n_points = n_points\n", - "\n", - " if self.normalize and self.scale is None:\n", - " self.scale = 2 * math.pi\n", - "\n", - " if self.emb_type == \"pos\" and mlp_cfg is not None and mlp_cfg[\"num_layers\"] > 0:\n", - " if self.mode == \"fixed\":\n", - " self.mlp = MLP(\n", - " input_dim=n_points * self.features,\n", - " output_dim=self.features,\n", - " **mlp_cfg,\n", - " )\n", - " else:\n", - " in_dim = (self.features // (4 * n_points)) * (4 * n_points)\n", - " self.mlp = MLP(\n", - " input_dim=in_dim,\n", - " output_dim=self.features,\n", - " **mlp_cfg,\n", - " )\n", - " else:\n", - " self.mlp = torch.nn.Identity()\n", - "\n", - " self._emb_func = lambda tensor: torch.zeros(\n", - " (tensor.shape[0], self.features), dtype=tensor.dtype, device=tensor.device\n", - " ) # turn off embedding by returning zeros\n", - "\n", - " self.lookup = None\n", - "\n", - " if self.mode == \"learned\":\n", - " if self.emb_type == \"pos\":\n", - " self.lookup = torch.nn.Embedding(\n", - " self.emb_num * 4 * self.n_points, self.features // (4 * n_points)\n", - " )\n", - " self._emb_func = self._learned_pos_embedding\n", - " elif self.emb_type == \"temp\":\n", - " self.lookup = torch.nn.Embedding(self.emb_num, self.features)\n", - " self._emb_func = self._learned_temp_embedding\n", - "\n", - " elif self.mode == \"fixed\":\n", - " if self.emb_type == \"pos\":\n", - " self._emb_func = self._sine_box_embedding\n", - " elif self.emb_type == \"temp\":\n", - " self._emb_func = self._sine_temp_embedding\n", - "\n", - " def _check_init_args(self, emb_type: str, mode: str):\n", - " \"\"\"Check whether the correct arguments were passed to initialization.\n", - "\n", - " Args:\n", - " emb_type: The type of embedding to compute. Must be one of `{\"temp\", \"pos\", \"\"}`\n", - " mode: The mode or function used to map positions to vector embeddings.\n", - " Must be one of `{\"fixed\", \"learned\"}`\n", - "\n", - " Raises:\n", - " ValueError:\n", - " * if the incorrect `emb_type` or `mode` string are passed\n", - " NotImplementedError: if `emb_type` is `temp` and `mode` is `fixed`.\n", - " \"\"\"\n", - " if emb_type.lower() not in self.EMB_TYPES:\n", - " raise ValueError(\n", - " f\"Embedding `emb_type` must be one of {self.EMB_TYPES} not {emb_type}\"\n", - " )\n", - "\n", - " if mode.lower() not in self.EMB_MODES:\n", - " raise ValueError(\n", - " f\"Embedding `mode` must be one of {self.EMB_MODES} not {mode}\"\n", - " )\n", - "\n", - " def forward(self, seq_positions: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Get the sequence positional embeddings.\n", - "\n", - " Args:\n", - " seq_positions:\n", - " * An (`N`, 1) tensor where seq_positions[i] represents the temporal position of instance_i in the sequence.\n", - " * An (`N`, n_anchors x 4) tensor where seq_positions[i, j, :] represents the [y1, x1, y2, x2] spatial locations of jth point of instance_i in the sequence.\n", - "\n", - " Returns:\n", - " An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding.\n", - " \"\"\"\n", - " emb = self._emb_func(seq_positions)\n", - "\n", - " if emb.shape[-1] != self.features:\n", - " raise RuntimeError(\n", - " (\n", - " f\"Output embedding dimension is {emb.shape[-1]} but requested {self.features} dimensions! \\n\"\n", - " f\"hint: Try turning the MLP on by passing `mlp_cfg` to the constructor to project to the correct embedding dimensions.\"\n", - " )\n", - " )\n", - " return emb\n", - "\n", - " def _torch_int_div(\n", - " self, tensor1: torch.Tensor, tensor2: torch.Tensor\n", - " ) -> torch.Tensor:\n", - " \"\"\"Perform integer division of two tensors.\n", - "\n", - " Args:\n", - " tensor1: dividend tensor.\n", - " tensor2: divisor tensor.\n", - "\n", - " Returns:\n", - " torch.Tensor, resulting tensor.\n", - " \"\"\"\n", - " return torch.div(tensor1, tensor2, rounding_mode=\"floor\")\n", - "\n", - " def _sine_box_embedding(self, boxes: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute sine positional embeddings for boxes using given parameters.\n", - "\n", - " Args:\n", - " boxes: the input boxes of shape N, n_anchors, 4 or B, N, n_anchors, 4\n", - " where the last dimension is the bbox coords in [y1, x1, y2, x2].\n", - " (Note currently `B=batch_size=1`).\n", - "\n", - " Returns:\n", - " torch.Tensor, the sine positional embeddings\n", - " (embedding[:, 4i] = sin(x)\n", - " embedding[:, 4i+1] = cos(x)\n", - " embedding[:, 4i+2] = sin(y)\n", - " embedding[:, 4i+3] = cos(y)\n", - " )\n", - " \"\"\"\n", - " if self.scale is not None and self.normalize is False:\n", - " raise ValueError(\"normalize should be True if scale is passed\")\n", - "\n", - " if len(boxes.size()) == 3:\n", - " boxes = boxes.unsqueeze(0)\n", - "\n", - " if self.normalize:\n", - " boxes = boxes / (boxes[:, :, -1:] + 1e-6) * self.scale\n", - "\n", - " dim_t = torch.arange(self.features // 4, dtype=torch.float32)\n", - "\n", - " dim_t = self.temperature ** (\n", - " 2 * self._torch_int_div(dim_t, 2) / (self.features // 4)\n", - " )\n", - "\n", - " # (b, n_t, n_anchors, 4, D//4)\n", - " pos_emb = boxes[:, :, :, :, None] / dim_t.to(boxes.device)\n", - "\n", - " pos_emb = torch.stack(\n", - " (pos_emb[:, :, :, :, 0::2].sin(), pos_emb[:, :, :, :, 1::2].cos()), dim=4\n", - " )\n", - " pos_emb = pos_emb.flatten(2).squeeze(0) # (N_t, n_anchors * D)\n", - "\n", - " pos_emb = self.mlp(pos_emb)\n", - "\n", - " pos_emb = pos_emb.view(boxes.shape[1], self.features)\n", - "\n", - " return pos_emb\n", - "\n", - " def _sine_temp_embedding(self, times: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute fixed sine temporal embeddings.\n", - "\n", - " Args:\n", - " times: the input times of shape (N,) or (N,1) where N = (sum(instances_per_frame))\n", - " which is the frame index of the instance relative\n", - " to the batch size\n", - " (e.g. `torch.tensor([0, 0, ..., 0, 1, 1, ..., 1, 2, 2, ..., 2,..., B, B, ...B])`).\n", - "\n", - " Returns:\n", - " an n_instances x D embedding representing the temporal embedding.\n", - " \"\"\"\n", - " T = times.int().max().item() + 1\n", - " d = self.features\n", - " n = self.temperature\n", - "\n", - " positions = torch.arange(0, T).unsqueeze(1)\n", - " temp_lookup = torch.zeros(T, d, device=times.device)\n", - "\n", - " denominators = torch.pow(\n", - " n, 2 * torch.arange(0, d // 2) / d\n", - " ) # 10000^(2i/d_model), i is the index of embedding\n", - " temp_lookup[:, 0::2] = torch.sin(\n", - " positions / denominators\n", - " ) # sin(pos/10000^(2i/d_model))\n", - " temp_lookup[:, 1::2] = torch.cos(\n", - " positions / denominators\n", - " ) # cos(pos/10000^(2i/d_model))\n", - "\n", - " temp_emb = temp_lookup[times.int()]\n", - " return temp_emb # .view(len(times), self.features)" - ] - }, - { - "cell_type": "code", - "execution_count": 131, - "id": "fc8aa9bf-7e83-4fa6-892e-8a7703777f95", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# create Embedding object\n", - "emb_t = Embedding(emb_type=\"temp\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)\n", - "emb_p = Embedding(emb_type=\"pos\",mode=\"fixed\",features=1024,emb_num=16,n_points=1,temperature=10000)" - ] - }, - { - "cell_type": "code", - "execution_count": 132, - "id": "4903f6c3-1cc8-412b-b988-ebeb2757c3b7", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# get sample crops from training data to pass through the network\n", - "train_path = \"/home/jovyan/talmolab-smb/datasets/mot/microscopy/airyscan_proofread/Final/dreem-train\"\n", - "# train_path = \"/Users/mustafashaikh/dreem-data/dreem-train\"\n", - "data = SleapDataset([os.path.join(train_path,\"10-1.slp\")], [os.path.join(train_path,\"10-1.mp4\")], crop_size=64,\n", - " mode=\"train\", clip_length=32, anchors=\"centroid\")" - ] - }, - { - "cell_type": "code", - "execution_count": 133, - "id": "27bfdb50-2eee-4207-8a16-6481a9905e90", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# get a list of all instances in the first clip; this is the format that the model pipeline uses as input data\n", - "ref_instances = []\n", - "for frame in data[0]:\n", - " for instance in frame.instances:\n", - " ref_instances.append(instance)" - ] - }, - { - "cell_type": "code", - "execution_count": 134, - "id": "8ea441b2-b12a-4f10-8821-aef889a063ba", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# get the vector of times using the list of crops+labels\n", - "# query_instance is the instances in last frame (set to None)\n", - "ref_times, query_times = get_times(ref_instances, None)" - ] - }, - { - "cell_type": "code", - "execution_count": 135, - "id": "b863dbfe-d9fc-4ed1-bf97-3f304d3d03a6", - "metadata": { - "collapsed": true, - "jupyter": { - "outputs_hidden": true - }, - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 135, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi4AAAGiCAYAAADA0E3hAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB5bElEQVR4nO39e5Bkd33f/z8/n885fZnLzt6kXS2SQNgiGC84WDgEQgwOIBuDKf+oCo7BGCf8YcwlKECwMamy7DKSTcpAImJSdlFAmRC5UgbHztehELEtTAkMERALcLAdy+hirVbSzs61L+d8Pu/fH59zek73dM90z2Vnevf9qOo63adPn+tMn/ec7nm/jIgISimllFJTwB70CiillFJKjUsLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNjQMtXH7zN3+TG264gUajwU033cSf/dmfHeTqKKWUUuqQO7DC5Xd/93e55ZZbeM973sPXvvY1/uk//ae87GUv44EHHjioVVJKKaXUIWcOKmTxuc99Lt///d/Phz/84d647/me7+HHf/zHuf322w9ilZRSSil1yCUHsdBut8u9997LL/zCL/SNv/nmm7nnnns2Td/pdOh0Or3HIQQuXLjAiRMnMMbs+/oqpZRSavdEhJWVFc6cOYO1O/vQ50AKl8cffxzvPadOneobf+rUKc6dO7dp+ttvv51f/uVfvlSrp5RSSql99OCDD3Lttdfu6LUHUriUBq+WiMjQKyjvfve7efvb3957vLS0xPXXX8//7w9eTTpb640Psrl6syZsHsfwT8eGjQ9sXp+h43TZumxdti5bl63L1mVvuZz57CK//SP/H/Pz80PnMY4DKVxOnjyJc27T1ZXz589vugoDUK/Xqdfrm8ans7UdFS7O7O6Ae9n5Addl67J12bpsXbYu+0pddq2bApsvXEziQP6rqFarcdNNN3HXXXf1jb/rrrt4/vOffxCrpJRSSqkpcGAfFb397W/nda97Hc95znN43vOex2/91m/xwAMP8MY3vvGgVkkppZRSh9yBFS4/8RM/wRNPPMGv/Mqv8Mgjj3D27Fn+6I/+iCc/+ckHtUpKKaWUOuQO9Mu5b3rTm3jTm950kKuglFJKqSmiWUVKKaWUmhpauCillFJqamjhopRSSqmpcaDfcdmtp8+eozGX9o0b9j/ne2Hw/9v9wOPB/1UffH6iZW0xr1D533vP8PG7NTivrZYz7P/+xzHqOA3Or7ovBvsObLfs7X4WBnsRjJp/YsrHledN2HScRjZlGtHzYNN0xeuryylfW12WM75vfSfpqTBsfYKYTdMGzND9s9U+HbYeo9Zh2DT92yi9ZW6s9/a/25Oug0U2HUdnZNPPgjWh7+dt3N4W+7HsSYyz//eLLvvKWvalNNWFy9FknWaS4Bj2JjJeATNugeEHLk6NLiA2z88Paf6z1fSDrymn6Stg+k7ow8ePY9T2j1MgBTFQnkTHvHg37FhVl2OL+ZXzt737tu+EG0+snp0Y+cu9adcNruvANg6ZzVZFyjgn3k3bWezvrU7u/Ws4efHUK46qRcKYJ9XdrMtgETzpMnezDjtZ7m4KiL20F3+cDdvmUcs6DNusVJX+RCql1AT266quUmo8WrgopZRSampo4aKUGupK+bxcKbV7l/L9QgsXpdRQk35fSil15bqU7xdauCillFJqamjhopRSSqmpoYWLUmoo/Y6LUuow0sJFKTWUfsdlOO1rotTB0t9ApZSawJXUx0WLNHUY6U+lUkqpoa6kIk1Nj6lu+f//PfpMGuspNZuT2EBiAtYEUhOwRkitxyLY8rHxWCO9tvPWCM4ELBtDiHEB1gRc8bj85R18HMeFvjb2w6IG0k1jKsa4Gr85bmAgN2lIPMDw+Wy/sOrrh0UJjBM7sBPj5iONiiLYK9X5J6Z/f4Rtso2qRuUHVQ1v4+8H8nn618EiW+ZS7TRyYFjUwDj5RZuWMUHkwKTRBpPEDYxal93GDYzah3sZNzB0um1a7+90+YPz2C5e4VLGDajpcim/EzfVhctDS0dphITEBVLnSW3A2UBqfa+QSSr3hxU1ifU44riyqCmf6ytqTCAURRDFG8jgL3H1jXirTJ7eNGO8CXixpAxk8pgtspOKwaiT+raFTZH/E8SS9nKIts9NSgdygyYtZIIZOEHIxgnGVbapeuKx9OcaTWrYScxt2rcbx8gysP+2WOQkv8TVaQeLg8Fgw8ECY9t5j3miKacLvZ/t8fKLqs8Nn++YAZNGtl3WsBPrToqWYcsbtcyDsFUBcSVdAdGcJDWK/lQopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKqQM1Tt8jpUpauCillDpQ2zW+U6pKCxellFJKTQ0tXJRSSik1Naa65X/2jQX8bB1JIDiQROLNCVRuxgnWCdYGrAtYKzgX2/wnzuOskDqPM4KzgZr1OBuKqABPzXkSU0QJVKIDyriAxIZebMBg9lHZ1r+adzSYfdT/fKXF/MDnvn2ZSAORAm6g1b8djAkomdFt8quZSt5Us4Iq+UVmc/v/uNzhl3rHadk9GA9QjRAYmZdkBrZhF1eaq9sKG3ED1WVCGTlQRiKYXVX9ozJzGNxfm7Zr8pbv43x/oLxUv1W8wLD8omEmyUvqy2Ea0vYfhucWjZz/hFlJ27X9Hye/Z7c5QZPkFY27/EvpUuUkKVWa6sLlyN8JZhZCAiE1SGIIaVnExPGSSHzswCdCPqqwSQLWBpyLRY2zgcR5UhcLk9TFzKPUemrW92UglQVNHG4EOW5V0MBAmKO4Ig9pIwfJC32BkB47NAOpmnk0KiNpWHaQG/E+4bH9hY/xvTf4lEqWj/G9E08cP2SGlWlG2fJkVjl5uYFxsDnjCCbPSbL4kXlF1ZNbdbrNmUY7s/mEOlA4bJNjVDXuCWxwntXX9gc8hi0Li03z3UHA47BlwPgZSX3z2kFe0nbbBFufVCcpGkYWqxMuc6xl7TDgcVSw5GHIDNKAR1U6+J9GpZRSSqkxaeGilFJKqamhhYu6rEz6MZFSSqnpou/y6rLi9uBzcKWUUoeXFi5KKaWUmhpauCillFJqamjhoi4r+h0XpZS6vOm7vLqs6HdclFLq8qaFi7qs6BUXpZS6vE1159x0PWAldsYNSeyOGxIIidnonOtMHFpiZ91EkKKTrjhBkjj0xS0ruunaJMSOujaQJAFXdNBNXNFRt+ymazaGg910LRJjAYxs3C866ZbddUfFBJQddXtRABLvZ7hNsQDlyXpU19xJVedT7SALsetn33KqTShHFQ3bXQXZ1OZ+yPTVaarPj/PasVQjDja2Mbb539j2kfthl4bt5/LYl11Lh7WjH8dWXVS3a3E/VmfbHXTMHZxm2+7K23Q83UnX3J0uqzRq/0/a7n5a7EXn2mlxWLoFq+GmunCZ+84aicvBGMQasHEoziLOFEVLMUw3HsdCx2wUOK6IC+jFBFSHQpZANymKnCIPCSeQhBgV4ATrAkniewWOs0UOkpFeXIAzoVfYVIucMvvIGumLDUgrw7Ko6d0vowJg030YP/Oob/yQN6bBaQfb3Kf4vgiAqmpMwKC++fTlJ9ne9B7Te658E6nGCgSxQzONhtnuRDQqRmBkbtFAvtE4bdy3Xv5WsQO+EjswXv7TpvlvddLZIh/JVwooKsseVRBsd3IbPNkPZiQNzn+r7KItC7IJW/GPk1lUXd9Ji5bq+oyzvHIZ+52TNK7DWrRoTtKVSUtKpZRSSk0NLVyUUkopNTW0cFFKKaXU1NDCRSml1GVjnC9Wq+mmhYtSSqnLxuX6X11qgxYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmxnQXLgYwJt4AqdzfNGnZ+LHohGpEMAJ93+MSNsZVbibOfNN4xCBi4l0h3i9uoXwMxWPTGwaKYXGD/m/CD3ZGrXbZDBi8WILYvi6ykxjsftsbv8MW13vZ+n77ZR3OL97tpLX8fsxjr40bKzDtDuO+V0oNN9Ut/y8+bZ4adVxXcN2AzQSbBUweh3SERARCUaQUxNCLByjjAiSJwzIioIwJCL0ogOJ+OS4tcpHSIvcoAZ8KmStjAcosJAErkAjGCcZtZCA5JzgXcDbeEhdzj8qYgLSIBajZvC8qIB0YOoqYgS2yj2BzNMCwmIBMXDFOeuMGbRUNUC1+3BZ1VUrRqn+wiOq1+DeV1v+2r/V/b7wZXeyVtivuytcNiw4IGFLjN8aZjQIzxW8u9CqLmjQCYGhWERv7otyXMTtpY/wok52IB47nwD7oL162L1InKXYGW+lP0vp/0KSZSYM/M4Ot+Ee14R86/wnXaS/a/k9aVO5l1MBW6zDOf/WMmwd1WDKD9iLyYNxtVts7+J+I3TCVG8VFkcErLmXREi+JQHmlJQgmVMYVj42XeHVG2BgXqAwH79ObnhCv2Jhg4vt7eV8MBNO7KlOs6MYqQeVKzcZt47EdeZLqXbHZ4o1m1JWU8qS+kQO0+x+HYUXNltNPeLVm1BvIqPHbXaEZ5w1p0m2K85WJioedXLUaNv9Jl7vtMg7JFS69IqKUKk134aKUUkqpK4oWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFJKqakx1Z1zl24w1LC4LriOw3UE2yV20s0Emws2E4wHkwesL5rOBTYa0VG0+c9jv3+XGyQXrDWIpa+LrrhKJ13HRvfcxCAJRVfd2Ek3duCNXXRD2U3XOSQRvBO8BZIALnbUtUU3XesCzsUuuonzpC52xU1d7KI7rJtuan1vaBGsCaTG95qRjdVRV+LrMhwOwRYN0cpOj30NwPa4F1i1+dpgB9m4vP7ny6ZxloHup6MaxW3XeXOc15XTjLM8Npr+jds4LYjZ6D6M7e+gW7lfnWaS+W9lsDvtpu61lY6fg51uq+O2Xc7AD061i+iwDrrbdbYdXP+hy9ziGI2zjP3sdjpqe3Y1zx10FT4sJunYq65sU124JN+7TMcIvuUwLYdrWVybOOxQ3CQOy2KmWxY0AZvHeACCYHzAlh12q0wRAVBGBDhDcHajkOnFAlSjAWIEQDm+vB8LmqKYSUCs68UDBCf4ajxAErBJEQ2QhF40QC3xsaApYgJq1seipmj/Xy1mLBIjAYyQFIVMWcSUhc2wYqYsZGAjGqAsZByBgOt7I9yu6+u4nWe92OHzMhsnF4vve7N3JmycxCsvCX3RA35jGUNOQtUCyBnfm6Z6YnPF+lnjN07ijO5K3GvLP/ZJrzKfSqFW3fY+Mnmn3a0MFkvQX8CUJ8TBAmYr23XdHZxnOd9R7f/jOKms3+67Io/TUn674mUvi4VxW/DvNENqP4qlS2EvWu5Pi8MSc3CY6d5RSiml1NTQwkUppZRSU2PiwuXzn/88P/ZjP8aZM2cwxvD7v//7fc+LCLfeeitnzpyh2Wzyohe9iG9+85t903Q6Hd761rdy8uRJZmdneeUrX8lDDz20qw1RSiml1OVv4sJlbW2N7/u+7+NDH/rQ0Off97738f73v58PfehDfOUrX+H06dO89KUvZWVlpTfNLbfcwqc//WnuvPNOvvCFL7C6usorXvEKvPdD56mUUkopBTv4cu7LXvYyXvaylw19TkT44Ac/yHve8x5e9apXAfDxj3+cU6dO8clPfpKf/dmfZWlpiY985CP8zu/8Di95yUsA+MQnPsF1113H5z73OX74h394F5ujlFJKqcvZnn7H5f777+fcuXPcfPPNvXH1ep0XvvCF3HPPPQDce++9ZFnWN82ZM2c4e/Zsb5pBnU6H5eXlvptSSimlrjx7WricO3cOgFOnTvWNP3XqVO+5c+fOUavVOHbs2MhpBt1+++0sLCz0btddd91errZSSimlpsS+/FeRMf19AkRk07hBW03z7ne/m6Wlpd7twQcf3LN1VUoppdT02NPC5fTp0wCbrpycP3++dxXm9OnTdLtdFhcXR04zqF6vc+TIkb6bUkoppa48e1q43HDDDZw+fZq77rqrN67b7XL33Xfz/Oc/H4CbbrqJNE37pnnkkUf4xje+0ZtmXPU0I63l2LpH6gHfCPg6+KbgGxQ3g29A3jDkdRMf1w2+ZvF1R6hZJLFI6ghJcd8aMPHWazIpRTRAACOC8YIJxC68vrzRGxoPpryfg83jOJuDyU0chnjf5OXQQDDgDZJbQm4IweJzi/cWHyzd3JF7RxaKx8GReUculjw48mDJxZIFR1aMy4IjF0ceHB5LEEMmLt6C27gvDi+WILbXPba8X3ZyLLvUVrtv+m1+jLzY3m0nBrvpDnYkHdZtd1SnTYfghnQ6rU5ffX7YfKqdUrfrCmyR3m1cB92efU9iBPY6F6I67yLKYutpdt5pdZLt38/t3CsH/fOk1F6b+L+KVldX+Zu/+Zve4/vvv5+vf/3rHD9+nOuvv55bbrmF2267jRtvvJEbb7yR2267jZmZGV7zmtcAsLCwwBve8Abe8Y53cOLECY4fP8473/lOnvnMZ/b+y2hcP3XDl7EzTdZDjaW8ycVshqWswVKnyXK3zmq7zmqrRt5OoGOxLYtrW1zbxBiA9kAkQHGz3TISQDB5iEWKD5hM+iMByuKmEgkgie1lGlVzjoKrRAC4MuMIpBIXIEUGUowIECSNj30i5IkgSYwDwJWRAAFrA0kRCZDYjYwjZ6SXceRM6EUBlLEAZRxAaooIgEpEQBkHkBo/OhKgyEQqT/LliaJaRAwrMEYVL86ErYuAgfdeV9RNG+3+N/6Vviyqhs1vWDxAfI3tRQN4TO/56vjqybBXyBUxAMNsat0+dKr+eWy01C/GVyIOnNmcUbRn7ds35UFVd3hlP+6w+BxVSFTjBOKSzNDconLanSyjVG2TX22hv120AAxvxb8Xrf6HteDfLmJgVLv/nRZR48YMjLMe+1HIHdZ2/zuNXRich+YzTW7iwuV//+//zQ/90A/1Hr/97W8H4PWvfz0f+9jHeNe73kWr1eJNb3oTi4uLPPe5z+Wzn/0s8/Pzvdd84AMfIEkSXv3qV9NqtXjxi1/Mxz72MZwb9da+S/oXh9pDhyVHZBozZ9TO6AlOqQ0TFy4vetGLkMEgwgpjDLfeeiu33nrryGkajQZ33HEHd9xxx6SL3xn9hVd76LCEoE1rYJ6anBYt02EaPjq8HBz8u++loFdclFJTbC8+llD7b/w0eLUbV0bhopRSU0yvuCi1QQsXpZRSag/oR0WXhhYuSk3oMHy/RSmlrlT6DqyUUkrtAf2Oy6WhhYtSSimlpsbE/w59mFzMZ2iEFC+Wus05mq6TWk/NeppJxlyty0q9zno3pd1OyWcSsrYjb7teEzrbKZrRdUxsRtctm9FZbFZpRucFk8dGdLGLrkDY+DzTlPfzgBGDCbEhXUgMxhusM0XTOSka1EFwhpAItrgvvaZ0RbO6DEwqSNHMLhRN6CQVJDP4xBKSQPCCtYG8aERnres1o3PekdqAs7EJ3bBmdIkJdIIjNYFELLlxJNaTY+NrCL2GdAGDFUswAYshFI3ovDgcQsBuNIyS/mZ0gx12q83qysZ0o5rQOTO8eZ0jbJpvtQHY4L8L9y2TasOxjSZkZVM9j9k0ftM4s3kbevMc8Xn35mZfG9tWbQZWjq82myvXf7AR3U70NVir/Gv14D6tPjfJv4LvtnHYYO+SrZY9SfO5YfMeR/XY7OS7DKMa6Y3zb+3ax2U66HdcLo2pLlz+eu0U19S7HEvWWUjWmbHd3ht7JgltSVj3dVZ9neW8wUrW4GK3yXKnwVqnRqsTCxppOUxZzJS3orNu0paioNnoquu6AZsHTB6LF+MDJkj/6aiMC7AbXXVxhuAsYik67FJ01a121C2KmnSj025IN4oZSSCkUnTYlWI6wTshS0LsqusE62IxY10sZsrOus5Kr7Nu6vymYqbm8l5Rk1aG1gRS4zcVMoMddQGcxGImw2101q0UDL3iwWwuZKon/8EiZmgnXLFDW/7DsA6wA2TI66oHsXqCKZZt2TjxOON70QgwfofXod1nzYjutH3bXBk/bN0L20Uw9OY2UOCNUyDF6fauk+ngPivf+DeKt83Fy7i2+xfi7QqjUV1ttzLq56263uMUf6O62W41/bjrs5Nuvftht8vUfxG/culHRUoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlPdOffLX3kaXJWSzGbMzbY5OtPiWH2dY7UW82mbOdehYTNOpqtcXVsmiCUTRzukrPo6a3mdlbzOcrfBarfOaqdGq1Oj3UkIrQTTtriWjZ10W7bXTdd1hKQXDxBiLEAeowFMLrH9f5BeLEC1q661BjEGDL1uumJN0fLfVrrnmo0uupVuulLpptvrpFt00xXnYiSA2+imS3EzTjBJwFrpddO1VmIUgPM4GzvplvEAqfXUnCcxG7EA9aKrbuyiW3bV9VgjvW66fZ10qx11hY0uuibE+7LRUdQRCLj+Tp9DGmNu1U13sOX+qI66sEVX3UpHWmc2Wt+P6phbHb/xOl8sY3hX0GEddl1l/W0RrRDXZ4wuultswzi27DBcmVd1uq1a1E8SQ1CNEoiPh0UfFBEMY7a8n6Sj6k665w6z1TaP0+133OUMfe0EXXMnsVXMwKRda8fdvt1GG4zTcn/cjr2TxFvsp73oVH0QnZH301QXLumSJdQducCaEYwREhuoOU/d5QTbxRJb06fGY238QZyxXWZsl3XXoekaNF1Gw2XUkwYriWfV1WlbwdsEb0GMRYxAUXBgyh8AgeovWTBYAUTiJMXvkCnGlS8xSHyZszHvyMb8HwkBSQyEeI4yUhmKwQgEqSxTIAC2eD6kxZtDOY0Uiy2GIhaSgGARAeeknA0y4mQXrAHyeJ70CbicBMigWDg4CfF5KUaYEE+45bBUFCyheK68b4sCwRH6Ws+X48ZVFjHDMo02TTsys6j/Dbaa21N9Iytzi8rxMKSAqWQeDTP4umF5TNvlGA2f74iMpFEnoSF5T9vZ7UmxOp9R67WTN9udtIHfq+JlmFEnncNyUlRqGulvjlJKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaU93y/8Q3PfIQZLMJ2VxKa26Wv5s7zt/Me8xsTnO2w5GZNgv1Nsfr6xyttZh1HWZcl7rJWUjWWUjWyUJC1nSshxotn7KSN1jJ6ix1m6x06qx1arRbNdbbCablsC0T84vaFteGpMgvch1wXcF1BZvFm8kDNheM35xhZPOAFCkC5KEvw0gSGzOMUoO4eAtpmV8khMTgU4qsooHsIldmGAmSgDgQJ4SkHAreClkiGBcwTvoyjJKkyCdyoZdflDqPM6Evv6hmPan1fflFcRjzi7bLMIIit0gEawIZDodg2b/8omEG2/9vamdfjUOodn4fnPeoTJEt1sFjhreFH5VRNDK7aMi8B5a7VRRAuQ8G84iq96vT7NSweVRb6k+aWTRui/9hGTbVOIGdtv3fbUbR4Lyqy9jPfJndxBiMnOc+ZSZdCrvNSLpcTEsUxVQXLo0nupClpC1HtwhDzLoGmzmyzLDuDd5bfLBI+cZYi9lFdZf3MoywXYJY5qVN26UcSdosJw1mki6zSZOLSZNl51lPanSTFO+SXiCiWBNDDZ1BrMTHJsYQiQVrLBB6b9NGpO/ka0LxIAjGxNcaa5AgiLOYUBYtNoY1BlPc4nnLB1OMBxMMIYBJiMPisSSChJiBJAmVYSxw4rKEMlJJxCCJj0MxBOcJYkhdDA/s5RcRT2jeelJjYm5RL3xQcBIIxmy8oRXZRZ540hgcX70/SX6RF7upeCkNjt+ukNnuxDoquyg+NzybqHoC2y7PqJznOBlF253Yhu2TYds/evuqBcXwabYzKmtqcB6TZBZNHPA3RvDeQdiLk8ReFguXWxDfXtmLkEO1tw5/aaWUUmrfadGipoUWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSik1wjT8l81emZZtnY61VEoppQ7AlfRfRdOyrVq4KKWUUmpqaOGilFJKjTAtH5/shWnZ1ulYS6WUUuoATMvHJ3thWrZVCxellFJKTQ0tXJRSSik1NaY6qwgLxscwQ5cJrguhXQYRWvLE0bUpqzYG/pkyMA6DxxIw1G0W84oKzgRcERRYt55GktHwCVnNkgdL8JasXmQCedvLCCpzg2LYT9E6W2L0TkwqClhTrDQBjAEfMFu12RbBFNlBJghiDMaDKbbbWIP1EEw5Ls4WUwwrjykHhmLZIEbAbzwp3hCwGBPw3mJMkY1kYvCeDUUAXwBrHIS4OVaEYIQ8OLAecCQUK1oEFMZ24haKkMXeuEpGUV9uzw7yiqrHcD9tlVcUn5dNeUWlSfJpqrlEo0yaLzNpAOXI+YzIKxqVTTTJPGDvwt72M6doLwMWDwPNKlLTYqoLF1931NuedDWj8WgMJZTEks848llHd9aRzTmy+QYX5+Z5bF4Ic550rsvcbJtjMy1ONNY4VlvnaNpiwbWYcR3mXJtr0vjGk0nCeqix4hss5w0uZk2Wu00udpost+ust2u02ymynmDbMejRtQyuY3CbUqPBdUMlOVqwWYghiT7EoMVqgrQpAu6sAWuKQEeLJKZIgLZFMrQhlCnRiSlSoInD6rhhydE2hjD2JUc7gSRgk5gc7SZMjbZGSEwgtR6LkFiPM0JSpEY7AokNWyZHV1Ojy4hKS+idGKup0VsVMaXyhL1dUVO+dtMJ2AzMc1RidMEy+otuzvjNhc02qc+usm6bnp/gBN8fVlidT2Uefds2fN7VtOjdGAx1LOcdH28UL33F7jbGLVYG5zlOQvRY852gaNkuhXqrYuJSpDEPS02eJOSyui7DgjS1WFI7oR8VKaWUUmpqaOGi1BVkPz86UZObln8/Veow0d8apa4gelleKTXttHBR6gqiV1yUUtPu8itcLr8tUkoppVTh8jvNT99/ISp1yehHRUqpaXf5FS5KKaWUumxdfoXL5bdFSu0Z/Y6LUmraXX6nef2oSKmR9KOiw2UaO+wqddAuv8JFKaWUUpctLVyUuoLoR0WHizagU2pyU51VtHY6JXRrJOsB1wkYH9+UbS64VqAmYL3B5mC7Bts15B1D3rEsthPW2zWWZxosNmc4Vl/nWG2dI0mbOdehUYQv2iJwccZ2IYl5G6kJJNaTOk8t8awmgXYS8GmCpC5mA9WKDKGawdcgaYNPwaU25hZlgu0KkpiYWeTjUIrcIiNFYCPEoMUgxUX+AGKwYhGJ942YXtCjCTHnMCTxcQgCobwPJimG3iC+yCySGN4YkmL2CRAsIRhMEpchwSBiCC7gQ8AHg3eBPFhS5xEx5NaSSCAxnmBNLxgxYEhMwBtDagLBGHywvcyi6nSp8X3Bi4iNWY7F/fKTjmrwYjWob1R+zrghjKOCDQcDAau5K+UyBwMDy48Bhp2c+gIlB6athjRWs2zKdRvMd5k0OLFavGzk9Gxs9+C2VbOENnKENufObLnMgXyb6mu32reThC3uRVG2XV7Rtusw4Uc/exUmuROTHkN1eRmWQzUtprpwuXBWaK476ouO+sVAbTWQrnpc25OsZQCIs4SaJW86slkbb3OO7rwjm6/xxNwMj88doT7X4chsm+PNdU421jheW+NYus6cazNju8y5NgCZODr1tBe8eDFrspTF0MWlToPVdp12q0a3nUDbbgQvtk0MW2ybgeDFeLOZxXYDNhdMbmL6sw8YIaZEBynSnmP0s+QBsQZrY9iiuBimKC7eYvCiIRRhi72QxYRi2koIo6s8l5gY0JgKkgiSWLwTfCLkScA6wTqPc0lf6GLiYpBi6uI4Z2JRkhRFXnk/FoJhI3hR+oMXM1wMZ5TB8EVDQLAYHEKgSI+WjcJhsIiB4YWMrxQCw4wqBAYLlOrJeFTooC/Wc5iySNmY1mxMWyy3Oi6I3Vi3gZNdtQDZymBhUw0aLF/rxY5dvOzUYKDiuMXLboqT6nHoCzbcVAjurHjZrmgpwwm3O1mME7R4UFfORgUsXur1mSTocTcO63eQLtX2H1Z6nVIppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NTQwkUppZRSU0MLF6WUUkpNDS1clFJKKTU1tHBRSiml1NSYqHC5/fbb+YEf+AHm5+e5+uqr+fEf/3G+/e1v900jItx6662cOXOGZrPJi170Ir75zW/2TdPpdHjrW9/KyZMnmZ2d5ZWvfCUPPfTQ7rdGKaWUUpe1iQqXu+++mze/+c186Utf4q677iLPc26++WbW1tZ607zvfe/j/e9/Px/60If4yle+wunTp3npS1/KyspKb5pbbrmFT3/609x555184QtfYHV1lVe84hV47/duy5RSSil12Zmo5f9nPvOZvscf/ehHufrqq7n33nv5wR/8QUSED37wg7znPe/hVa96FQAf//jHOXXqFJ/85Cf52Z/9WZaWlvjIRz7C7/zO7/CSl7wEgE984hNcd911fO5zn+OHf/iH92jTlFJKKXW52VVW0dLSEgDHjx8H4P777+fcuXPcfPPNvWnq9TovfOELueeee/jZn/1Z7r33XrIs65vmzJkznD17lnvuuWdo4dLpdOh0Or3Hy8vLABx56kVWc0N7sUZ60VJbSqgtO2orQroecO2AzWLOj2vHHKCkJaRrhnTFks0a8llHNufI5xIem21yYX6Ov59tc7TZ5nhjnRP1NY6m68wlHWZsl7rNsCYw59q98MUjSZuFtMVSvcnFepPVZp2Vdp12JyXrJGQtR952A3lF4Dr9uUU2s7iOYHPBZgGbB0wuEPpzixDBeOJ4a5AgiDUYbxFriuBGg03K/CKJuUUJBGcIiWATeuNsmVXkDCEFSWJekiRxWilvzuCTcl6Czy15EnAu4KwjcZ6ud6TV3CJTPGdj+GJiA7kJ2JCQWk9iXC+/KJMYaJmJKwIuhdR4AjFUMlQzi0zAi4tZP0WMSzW3CDZnFw1mCW2XWVQ+N5jvMyxUcTB7Z9gytwthHBauWA1jHJZZ5LfI8BnMtxm2rdXXDwYuVrepus2jtnUnRmUiDT63mzDCYXkzg/Mblge022VUVbNldhvkuPV67C7DZth+uNRhfDtd3pWefH4lbf+OCxcR4e1vfzsveMELOHv2LADnzp0D4NSpU33Tnjp1iu985zu9aWq1GseOHds0Tfn6Qbfffju//Mu/vGn8P7nmfs6ZNR46dpQnFufILtTIFh35oqF+0VBbMaRrAdf2uHYOAomBtOao1S35jCObMWRzhu68JZ+zZGuWlfmE1lyd1bkaKzN1rmrWOFlbw6frWBNomJzU5MwkXYIYFpKUY0mdpbTJ8VqDi90my40Gy50GK+06rUZKt52Stxy+7XD1onhpg6sZXI1eIROSInjRgWQGawMmC4AFHzAUqdGheHsJBmOKcMWigMEbjLOIjwWMOIMtC45e6KLBOundlyQmR/eSpZMYtGi8QXKzEboYBPEG74v5BYt3HudsTHy2MTna2fjm44zgbKBmfV96dGIDuVgSE0itj0WJCQRjSWz8yNAST1yp9b031GoBE4uW0AsctGbjxFemR8fx5cl/47kqXw0vHGKwSOiN36KAgfGLmMFk6Pi82TR+VGL0sHWD8YqZvlToIcVLuU2jAhd3qi9EcB+Kl92G401SXExStOxU9djs5AQ1KqlcE6LVNNpxmf+Wt7yFv/iLv+C//tf/uuk5Y/p/EURk07hBW03z7ne/m6Wlpd7twQcf3OlqK6WUUmqK7ahweetb38of/MEf8Cd/8idce+21vfGnT58G2HTl5Pz5872rMKdPn6bb7bK4uDhymkH1ep0jR4703dQlduVchVRKKXWITVS4iAhvectb+NSnPsUf//Efc8MNN/Q9f8MNN3D69Gnuuuuu3rhut8vdd9/N85//fABuuukm0jTtm+aRRx7hG9/4Rm8adQjp1WR1APRjDKXUoIm+4/LmN7+ZT37yk/z3//7fmZ+f711ZWVhYoNlsYozhlltu4bbbbuPGG2/kxhtv5LbbbmNmZobXvOY1vWnf8IY38I53vIMTJ05w/Phx3vnOd/LMZz6z919GSikF+h0MpdRmExUuH/7whwF40Yte1Df+ox/9KD/zMz8DwLve9S5arRZvetObWFxc5LnPfS6f/exnmZ+f703/gQ98gCRJePWrX02r1eLFL34xH/vYx3DO7W5r1P7Rj4qUUkodAhMVLiLbn72MMdx6663ceuutI6dpNBrccccd3HHHHZMsXh0k/aNXHQC92qKUGqRZRWo8esVFHYDd9iVRSl1+tHBR49E/fJVSSh0CWrgopZRSampo4aKUOrT0Oy5KqUG7yio6aE3b5fqZRWaSjEcbbR6dm2N1oUn3aI3ORUttyVFbttRWHUkrZhe5bgAB143ZPzazuK7BtS3ZuiFZt2RrhmzOsTiXsjLX5MLsDI/OtDjRWCuyi1rMuzZ1m5Eaj0No2AxnAg2b0bRdZpMuM0mXmbTBSr3Oaq1Oux6zi3qt//uyi4oIgDLDqGti6/+uwWYWmwVMbmNmURCMLz77H8guwlkQsCEgwcQso8QQvMHkYNKirX8eW/8bb7B5bP1vytb/KRgfn5MEQiKYYAi5iW3/00rr/14OUsB709f6P/eWxIUYA2DjuMQGnCmziwKJ9Zta/wcMwZheVpEPdiOzqGh9ngIUeUWbsoug99HWuK3/d5pbVJ3P5iyiydr/97XynyC3aNh6D1tP6G8XP7S9/4Rt/3dqMO9oq1iBsEet94dNv9u8or1q979pXYb8G/h2rf73+/tAk0YXHObvJ+02m0odrKkuXGZdh6sa63xX4zFW5hqcWzjCA8eO8+DyAheXZuleqFFbdDG3aKnILlo1JC2P7QZs25MYqKUWX3fkM5ZsxtKdM2Tztrg5Lh5JWZurszxXZ2WmwYnGGifrqxxL15lzbZzJmbExBHLOtVlwKcfSdVZqDVbq/dlFq50arWaNbjshbyX4tsW1bSxa6pUCpg1JB0LqcN2A7Zq4zrnB5AFjiuDFanaRMZjgY+CiNYg3vewiaw3BGWxuEVcGLJriPr3gxJBQBDRCSNkoaHIwaSxWggdJzEb4ojd4bwjOEpzgbcC6GL6Yh1is5FZIivDFxAZS58lNIBFLEvoLmDh0RSFjYtFSDK2pZLaUGUN9gYvFiaQ8CVTfmyQ+t10Bs5Pcoup8hp3UtytiqsVLnGZzoTKqqNmYbyVQcoxCpjwRBkzfdg2OH1W89Oa9iyKmP4to/OJlr2xVvGyVVzTOOmx1or+UwYV7sb+22pZJspNGFWTTVkTsRf6U2jn9qEgptWO7vfKilFKT0ncdpZQqTNtf/pPay6tVSh0ULVyUUjtW/chIHX6jvh+k1DTRn2KllFJKTQ0tXJRSO6bfcVFKXWr6rqOU2jH9qEgpdalp4aKUUoVJ/rVXKXUwtHBRSqmC/leRUoefFi5KKVW43K+46H8VqcuB/hQrpXbscvtyrl5xUerwu7zedZRSl5R+OXe66BUXdTmY6qyi3//O93H9NW2eNHORE+lazA460uHamYucOzrPuZNHeGJlluWLDdzFJIYuLjlqKwm11UCyHoMXrQ+4Ir8oWTXUlmNuUXfWkM2VmUUJi/MNFufneGiuw9HZFscaLY7X1zheW2chaTFju73gxbkihHHetTmarLPq66w0GqxkdZa6TVa7NVbbdTrtlKydkHcctmVjVlHLxODFtiFpC67jcB3BdS2uK9hMsFnA5gGTC4Qit6jMFxTAF+OLzCJT5hflgjiDdTG7SJwhpAbphSXSf0uLoMUUQtcQUno5RuJivlHMLIrziKGLFuMCuROsDTgnOBewtghXdAHnHakNveDF1PoieNFTc/2ZRbn1WITM+l5ekTcxU8iKxCFCMAFb/MW8VfCil40TrjWyKbPIb5H5Mzh+q8wi2D63CGJ20WDO0WBm0WCwoqt8pDEst6icbqvt2DZYsRjfl2M0EI44uL3jqu6XneQV7cSovKi+DKKB3JxheUXjGCfLZqsspHJ9x93mcQMNtwqp7E0zhdlB6soy1YXL0uIMj8ylNFzGnOvE4iHpcjJd5Wi6zkLa5qH0KI+k86wkM3RcCsYCBsRiAhgv2AxMHmJBYA0uC9jMYXOHzS3GG0wwmGDJJKUlBqm80TojOAI2iWnGzsQTbGo8DZNTtxl1m1O3OTWb907UzghrNrBuhcwJwTjEFqckY+KJ1hjECGx6IylSoAmYHBCDBIlFi8Q3sXi/uCOmdzNiCGKxQRCxGBFCauK0UoQ2SvEY6W1rea6Mj4UgYMX0TltxcoOIIAJGBJwBCYiAcxuvF1cUGJV9GcSAAzxxGOJmEiC1njw4sJDiyShmZkIMVCyHperjgeBFa+IJsUyOroYuTnoSHpUY3Xt+i+DF0jjJy8PSooc91z/f/gTprdZ9u+Jlq3Xeia0Spve6eJk0pXi3J+7DFMA3TqG1F0XhpXSY9u+kxv3ZGqfAvJLpnpkm0/Peoi5z03SiU0pdXrRwUUoppdTU0MJlmkzvFVJ1mZn04xel1PQ5rP+FpoXLNNGr80opddk7LN9vOSzrMehwrpUaTv/IVYeEfsdFqf1zWK90HBZauCilJqYfFSm1fw7rlY7DQvfONNE/ctUhoVdclNo/esVla1q4TBP9I1cdEnrFRan9c1iuuBzWAupw7B01Hv0jVymlLnuHpWA4LAXUoMO5VkqpQ00/KlLq8ndYCqhBU93yf+6bdVYuHuHekwv85YlTnF5Y4UmzF7mqtkrTZVzTWGI+bXNVc5VH5+d59Ngcq0tN2ksptYu2yC6y1FYS0rVA0vLYPIAXknWP6wZcy5GuW7IVS3fekC07snlLez7l7+caPD43x5HZNseb65xoxNyiI0mLGbeRW5Qaz7xrkxrPjOvGeIK0zkqtwXIj3lY7NVrNGt12Qt5K8G2La9leZpFvg2sTs4w6QtIRbGawXVvkFllMHjBBMKHIKRqVXRQM1sd2/OKF4AzGW2wvr6iSWZTHPCKTg01iVlFIwOSmyCwSjI8t/CURQgI4QbxBrOCTYv7O4q1gXcC5QB4CNnckzpO6QBYsqY2t5p1x5OKH5BYFAoYcR2I9wcR2/Y4YoWARMnFFhpGN2UXVvKKB1v+9K1iyfW7RqMyi8rmt2v5X5wej84uqbe7L6YblEw3mFsXX9H90U40AGPbmEwa2y4vFFvMo84nGySzqn+dkxcxgO//qPHba9n/8zJ7++WzVYn0wU2grB9GO/jB+bLfbdZpkn++V3S7T6mf5Q+1H9tV0Fy4Pe7IArU7Kej7L34f4hpqaQNMtsZCsczxZ42S6yvHaGgu1BR6uL/BEbY5OWickLp5wLWBiZo9ZjwGGJvfQAdsNRcihw2UW1zWxYMgMWZ7Q9YalYPDBENi4QTxgzsb8orQoYhriqJucpivyi1zML6onDVYSz6qr03aCdwneglhbrJ9BrOlFDmFiDFDMM4qBPhYLeTy5GNicXYTpFTA42xtaAQkBSQyEeH43AkZM3CdiMAKhmmEkRZRQ8VgkngisCBKKcS5OJ8GCCMEFBBujkMTgXIizEkPiNn6wgzWbhjHHKIdQnIiLHCMnoZdnFHOjxssuckjMaxoztygWVFsXL+V02xlVxAyeTPtP2KGvSBkMVhw86VaLnGE2FT5Diq9xsoz65zn6jXtUsbFVZtEoe3Wi3qoI2smb7U6Klr0MWhw06q/lvczBmYaT9bRlMant6UdFSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaU905N2kLZlkIqUGco02TB4Khkye0fMrVjRXmXAdnAnWbs5C2aTdTcu9YFENGDXCIid1zxTpCYkjWDK4TML5oBe8DrmuQ9kYnW7Gm6FgLGTVWis6MQQx5sGQ1R5Y65lyHhs2o2yzOywRSm9PAkDlHEItPN15bdnhsUzS5ja8itqstutZWO3oaQ6+NrBWsAXIpW9+CD7Fj7gAjgmBiPEDJF01oTbWiNb1liyk7ZVa63FI2rjUb3UYrs5TKUCR2J44/dQERi3PDj601UnTDdRubFxzY+PJghLx43FsJKe5s0T237Jhbbf+/V91zYbz2/33TD3SO3Un33MHn+ue/dQfdYcbpkjupwW63o7dRtm37v1920/Z/nK65+9H6fL/a/e/HuvbmfQk72R7GOAS1e1NduNQvdEiWDHN/7+gcTWmdcLSunuPhq2f4+6sXOHV8mevmL/KkxkVOpGtcXVvmu2YeY/HIDI8cX+DhtQXOL8+xttikvZhSX7TULlrqS5b6ciBZ87h2jm15bCsnWbXUao76jCObc3TnDN15S3bEks0nLB6psTw/w2OzsxxttjneWOdEfY2j6TpHkjYzrkPD5KS2y4ztMufadJKUdkhZ9XWW8wZLWZOLnSYr3TorM3XarRpZO8G3HL5lcS0Tbx2DbxtcW2IcQVeKm8Hkgs0E6wPkJrb+96HX/t8IMbfIxGLEWIPkBqxBkhhnEJzB5BbrDCGNeUU227gfMuL9DEIKNofgivyiRJCkyC9yRX5RIoiTIr/IElzAOOnlF+UukCSe3FsyF0iL7KIsOFLrqTlPHiyJddSs72UXZWJJTcDjcQSskV5+URkBUL4Jj8ov2rhf/GDtQXZRddrtVLOJyuUCQ0/ag8VLfF1/+/9S9SQ8KsuoLwtpILdosHgZzDIaNEmBs/U2XrriZZK2/8OKl3Hb/Jf7blhBcBC5PIOmvS3+NEQPjHIYjv+00Y+KlFJKKTU1tHBRSqkDpH9tKzUZLVyUUuoA7SRVWqkrmRYuSil1gPSKi1KT0cJFKaUOkF5xUWoyWrgopdQB0isuSk1GCxellFJKTQ0tXJRSSik1NbRwUUqpA6TfcVFqMlq4KKXUAdLvuCg1malu+Z/N17AhZg3ZTEhaQrpq8A1Dp1bjcTfX+2vGY1lIWlhiS/jZpMNCrU2rmZJ1E7LckuUO4w3WG0ywvaAd2/UYHx8YL9hccN1A0raEtGh9nxhCYvEuoWXqOCM4G9vLV/MygrXUbdZrwZ4ajzeWus1puoxuSOgmDi+WzDu8t4Rg8d4QvMH4mC9kgsEHIBSPBYwYjBisgAgELJaAyUGw/blFUmyPECMBbDHKA8bE+YXYBt8EsD7mORkfowKMAWMFa2KwUTBFbJIHawzBlC30DcZIjBbAxK32xfqYgBiQYAjG4L3FAMYI1mzc92LJgxRxTEIutpdPtCm3qGjXHzdodHZRzCcylawis2Vu0aD9yC3acl4j2v7H52RoFtGW2TsjXtP3+gkziyaNOrjUBqMVhtlqn22a34Tt/quPt4oSGFyHUe34t8rhGYx/UOpyMtWFy+KNKUcu1mhcyKkvdqlfEGYfSeguJDG36Ko5Hr5qhoevOsqJ46tcf2SRa2cucjJd5WS6wpMbF1g60uT88XkeWjvKueV5VhdnaC8m1C5Y6heL27Lr5RaVt2TFUqsPyS2aj7lFF4/UWJ5r8tjsHAszLU40Y27RQtpiIWkx59pFblEMYJyXNscTx3paY6XeYCVvcLHR5GK3yVKnwUq7yC1qpeRti1u3uHbMLco7hqTMLepu5BbZrmBzi81CLDhyE4ciGE9f8VLNLhIvYA02MQRnsXnMLtrIKiLesiK3KKUo3ECK56wrHwumklsUM4wESQTJDD4RgrMYF/BOisyimFeU2EDiHKnzpDYOc2vjeOOpuZhXNJhbVBaLjjiPsgCpBkHa4sQQTMBierlFvVDGgdyiMlBzMHSxNKqIGRy/1Ul9nMyi8vlqxlB8buMkNhi+WDUsv8hjdpxZNM42D+NlYxsnzSsq98HQZbP9soft5zCicBhWZExivzJ0dhseOEmBNnIeI7ZtJ+u2n6GO00wL0OEO559GSimllFJDaOGilFJKqamhhYtSakf262MQpZTaihYuSimllJoaWrgopXZEv0yplDoIWrgopXZEPypSSh0ELVyUUjuiV1yUUgdBCxellFJKTQ0tXJRSO6IfFSmlDoIWLkoppZSaGhMVLh/+8Id51rOexZEjRzhy5AjPe97z+J//83/2nhcRbr31Vs6cOUOz2eRFL3oR3/zmN/vm0el0eOtb38rJkyeZnZ3lla98JQ899NCOVj6bh+68IZtz5E2HpEWb7lxwHUhakKwbwlrKynqDJ9qzPN6ZYylvsu7rBAyOQM3mNJOMZi0jaeT4ZsDPCHkT8qYhb1p83RJqjpBaxMXP9mPrfMFmgs2IbfYzsF2wHUvoOLqdhFY3ZS2rsZLVWcvrrPsa675OWxKC2P5cEoTUeBLjqbucmvXUnCd1HpcEbOrBlW32QdKNNvshgeA2bpLEDvbiDGKLMCFrkKK1fwwX2sxIL+wo5iB5KfKRqjcqw8H7UHbQN8HEocT79PKVDIiBYJBgEClvEELMZyrHhcrNB1vctxvjes9bAhvPQWyX77HFdPH5cjgoiN02v2dUq3kvdqyMnnHa4Q+2rZ+0hbrb4krITlqI7/eVler2Vbe9On5wHzjC0NthclBXpC7nNvGapK0AjIiM/ZPwh3/4hzjn+O7v/m4APv7xj/Pv//2/52tf+xrf+73fy6//+q/z3ve+l4997GM87WlP41d/9Vf5/Oc/z7e//W3m5+cB+Lmf+zn+8A//kI997GOcOHGCd7zjHVy4cIF7770X59xY67G8vMzCwgI/8Km3cX71FMnDdWb/3jBzPlC/kOHaHgDfcHQXEtrHLa2rDO2rAlzV4arjK1x/ZJHrmoucSNdo2IxMHBfyWc53Ym7RI0tHWFts4i6k1C8Y6otCfUmoL3uSdY9te2xehASmDl935DOObM7SnTNkc4ZsHrJ5IZ/32LmM5myXozMtjo/KLTI5AJkkZOJYDzWW8ibLeYPF7gxLnSbL3TorrQbtdopvJZiWw7Ysrg1Jy+A64NrgOoLrMpBbJJXcomLoQ8wqGvwxMEW+UDHExdwiSYpAyV6wZJldVM0q6s8tEkcvt0h6RVW8L0nMLcIKJIJJAsYJzgWsCyRJwNlA4jxpkV+UOk/NepwN1Kwnsb7IL4o5RakJJNbjjGCReJ+YX2RNLAydCVikf1i84TsEa0JvGMfF4aiT7DDjFCmwdX5RtVAaDNobVkSNyp8ZVZBVp69O0ze+zHWqPD9OkTbOl3er86lu36jtHhY2uBtb7d/qPpjki8jjFiyD8xxMiR48ltXspi2XP+bP3aj5b7WOfcvZRVbRuMsa3CejCpdx1mXT8R1jeaMMW4+dHvdRyw1iRx7LnSx/kuX25rlHy68ue767yH/6p7/P0tISR44c2XKdR5noisuP/diP8aM/+qM87WlP42lPexrvfe97mZub40tf+hIiwgc/+EHe85738KpXvYqzZ8/y8Y9/nPX1dT75yU8CsLS0xEc+8hF+4zd+g5e85CU8+9nP5hOf+AT33Xcfn/vc53a0AUoppZS6cuz4Oy7ee+68807W1tZ43vOex/3338+5c+e4+eabe9PU63Ve+MIXcs899wBw7733kmVZ3zRnzpzh7NmzvWmG6XQ6LC8v992UUkopdeWZuHC57777mJubo16v88Y3vpFPf/rTPOMZz+DcuXMAnDp1qm/6U6dO9Z47d+4ctVqNY8eOjZxmmNtvv52FhYXe7brrrpt0tZVSSil1GZi4cPkH/+Af8PWvf50vfelL/NzP/Ryvf/3r+da3vtV73gx84VNENo0btN007373u1laWurdHnzwwUlXWymllFKXgYkLl1qtxnd/93fznOc8h9tvv53v+77v4z/8h//A6dOnATZdOTl//nzvKszp06fpdrssLi6OnGaYer3e+0+m8qaUUkqpK8+u+7iICJ1OhxtuuIHTp09z11139Z7rdrvcfffdPP/5zwfgpptuIk3TvmkeeeQRvvGNb/SmUUoppZQaJZlk4l/8xV/kZS97Gddddx0rKyvceeed/Omf/imf+cxnMMZwyy23cNttt3HjjTdy4403cttttzEzM8NrXvMaABYWFnjDG97AO97xDk6cOMHx48d55zvfyTOf+Uxe8pKX7MsGKqWUUuryMVHh8uijj/K6172ORx55hIWFBZ71rGfxmc98hpe+9KUAvOtd76LVavGmN72JxcVFnvvc5/LZz36218MF4AMf+ABJkvDqV7+aVqvFi1/8Yj72sY+N3cNFKaWUUleuiQqXj3zkI1s+b4zh1ltv5dZbbx05TaPR4I477uCOO+6YZNFKKaWUUtOdVTRpK3SllFJKTbeJrrgcNi85/X9Ze/I5/vopV/M3j51k6ZFZmo/UaT4qNC8Eass5zce71Bcts+cc7aOO9okmiycbPHryON84uc41R5e5fm6Ra+pLHE/WOJmu8JTmEzx2ZJ6/P3mEh1aOcuHiHK3FGrULlvpiSn0xobYSSFdj+3+TB5L1DNfOSVcd9YYjm7Vks0X7//mEbM7Rmq+zNtfk/Ow8c7Ntjs20ONFY41htnaNpiwXXYsZ1SI1nxuTUbcaca3MsqXMiXWO53uRi1mR5psHFdpOVdp31do2sneBbCXnL4tom3jqmaP1PvN8tIwBMka1kMWUEQIit/ylyiAAIASMGYwziQZzFWUEyg00MoWt6uU2x/b9UIgCk0u7f9HKUJDEEB7b3GMQaJBFCAiSCOIskAUksxgV8LkXrf0tWtPyvJZaudaQ2kLnYvr/mYr5TYgNdIyQhkFofW/6LxRkhMR5rhBxLYkMvBsCZgJWN9v/BBCyGgGAxOIRA0X5bNlr/V9vFD2v/X21nv1X7//K5YW30HaG3nLJQL1uXl8usrke1RXe1dfdgflHZ3r+cPojtTeMx/eNNwIvttfQOmE3bM2zdx2kBXs673L7qtlW3u9rufi/b/o9aTnwcevvQItu2/Z80m2hwns7Ili3nx/lDbZKcour2lfMfd9/upt3/pMvaqW2jEcY4pupwmuorLjOuy5GkzZG0TbPeRWqBkG6ECwLgBesDNisze8B2DSY35Lkl844sOHxxYkqNp2Ezmq5Lw+U0khyX+N7JtXcSthShhcVyyvDBIpDQ+iIbKBdsDtYbTA54QwgWX9zyYDcCA4uAwFIMjhNSm5MWmTypjSfomN8TsFYwNmb9iI25hX1Da4rHphhXCVmE3vrLqD46IjEgsQxehBig2LtJcas+pghojNOW0w8bDj5frAxxcWZjVBG4GHf15uyaavBiyfees33DcvoyfHEnJn3dbgIYtwtdHJWZtNVJbKsgxktt3Eyn0k6vtJZZVfthPwIVL+ewRKV2Y6oLF6WUUkpdWbRwUUoppdTU0MJFKaWUUlNDCxellFJKTQ0tXJRSSik1NbRwUUoppdTU0MJFKaWUUlNDCxellFJKTQ0tXJRSSik1Naa65f/Xlq/n+rTFNY0l0qs899e7PDR/lIsLDTrHHI3HLI2LgXQtYPJAfQlsLiRtS7Lu6KzO8uBKnQsnZnh0YZ7rZxc5WV9l3rU5lqyTznhmky5ztQ6PNI6w1JzBz9TIm5Z8xpHNWOorlmTNk7Q9JgsYH3AtMF56rfVd1+I6Btux5B1D1nYstx3tTsrabI2VZoMTjTprtTpHk3XmXKfX+t+aQIMc51qkxlO3OTWbU3OeusupJTkraZ12UiNLE/KaI7RsbMOfGkJKrw1/SAyhC86BSyziBMkN1gZMLmCIHYD9QBdQEYwv7wcQgxEDEtv4I2B87JRrgondgxNDCMRuwgFCMHGYxOmCNxhPjAcQgwQIAQix8zHBxm7FAhJMvBXdc30I+GDwRQRAcAYvgdwEahiCeHIT2/oHE1v/B29IrCcEE1v/IzgJBGMgFC3IMaTGb7RdNiHeLzuYiqXsEO4IBDG9Tqxl2/hRXWwhds/drktstQV+3/hKa3rY3DJ9WPv/ON1G6/7N85S+1v/lNMPGV2MJhrVKHycCoG+9RrRbH6ftf/l4p4btu1HL2W6d96Nj7k7tR6fdaWiLf6mPgbvCM/IOw/ZPdeHy8OoCVx31XNe4wNW1ZY6na8ykXf6fO0nLzoBYrLe4rpB2BNPJi9yeBJs7rDcQEtaY4ZwRUhuLgXnXZqYoHmZctygWPA/awEU7S5c6EPvqm2B7rf6dF2weMMFjfMB412t9b4LtO+FnJHSBZSBUfg4s0svPcVZITWz3n0qOLbJ0HAFnpDdtVQbxFGaKk6wxlaHE4SYWS4jbVJwAjQeqbf7LcUaKbQdLiBk+xat6p6reS2Lr/tjWPxYoSNzejdcYRKQYZ3qn/V6r/2LKYCzk2/xAWOh6CNaQlG/ixYKsCcUCPeCwUqyEBDIcKb63BcOKF2tCPJn37ttNxQvQGz/KfhYvw6bZmHZ4ATNJ8dI3v0pu0ahtKI0qYqonxUkzi3Zr3HltyvOprPPE2UQj9uOlKAzKk81WWUjjOkzF2uVMIx9G04+KlFI7ctj/Er+S6Enu8jPsKqmKdM8opXbkSv7L+0redqUOmhYuSqkduZKvuBy2bde/zi8/ehVtNP1pV0rtyJV81eFK3nZ1aWgxOpruGaWUUkpNDS1clFI7ctg+LrmS6ccKlx89pqNp4aKU2hH9uESp/aMfFY2me2YMWvkqpQ4zPcmpK4n+tI9B3xSU2kw/Kjo89I8rdSWZ6s655y4c4a+POAKG4+ka1ghHay0W5lo8fjSl065hM4PxDrGQrPnYPDYXkraQrEHaMISaY63W4NFkjpqN7eCvqllmXAeLMJt0OFJrcbRZp91NWe848m6KzQw2B+ttbHkvgDEYX3SfFcFmAdc1hFRw7aL9vjNIArlzZC6l5QIrLramT0zAmoArhgBp0TLWIaQ2py6Opu3ScQndxJEFR+Yd3ltCsPiiRX4o2/B7g6kV7fiDxKEU61vegsEKUHS3xQimPDGV3XOlGCcSRwWDMQIejI2deY0HY0xsUGuEYGKrf+shGInPGQimaOLri30WJHYZ9mWn37gqeBBjCSbEF3iDMfEWgsUbMEawoSguLViJyw1iyIMFG3/QgxHy4OLKGGLnVGNBAr4YOlN0ei3b/UNf91yIhex+d8/djVHdc2F499btuuT2TVvtcLtN99zqa2D7GIDDYruuujv5iKz82dl+/8qedLfdqb3sTrzXDkOr+UtJi9HRprpwqX91lr9ZOcbD1y/wjKvP8bS58zx74QFunDvP/zt2FX911VU8dm6BziMpM+cSmo9Z6kuedD0nXc+pX3Q0LzhaT1jaFxpcvLrG0lWzPHZilifPL/KUmSc4ma6ykKxzKl3m2sZFHpo9ygPzx3l0YZ71I02yI45s0VJfNNSWLbUVT9Ly2G7AdjymG3CdgGtZ0jVHtmbprhmyNUM2Hx+3Wo7OXMrqbJ3lmQarzTpr9TrHa2ssJC3mXJuGyajbjFRyGiZnxnWYSzrMp03m0w6zyQxLtQar9Tpr9RqdeoqvJYSOxdcsrg0uNbiawdXAdSjiDwSXCK5rkEyKYixAbsCHSkZRpXgRAzYWGxIs4uP94AzGW2wuhNTEgsmDzWPBZnzMcAreYHKwScwuCh5MHos5k8bnxcd8IxJBghRZRgZxBgmWkPgio8j0cotS5/HBktuYU5QWwyCeYA25WBITCBgsgrcGZ4TEeAhsyi3qO0EVxUugeEMZUrzARoZOtXAYVsRsV7yMOtkPZhJVC6Zh2UWD6wL9b4hb5hNRbovpjwMYiCSo7qetiphhMQDV4ucg2/5PWuyNv4zdnXzGWfZ2y9jqhL+bbds8r90VFochF2mnheOV/n2vS7390/EnkFJKKaUOpUtdcGrhopRSSu3CQV8putJo4aKUUkrtwpX+UdGlpoWLUkoptQt6xeXS0sJFKaWU2gW94nJpaeGilFJK7YJecbm0tHBRSimlduFKv+Ki/w6tlFJKTZEr/YqL/ju0UkoppdQIWrgopZRSampMdcv/dF3IVwxrSw0enllgNukS6vGSVdNlzNc7XJzJyOcSuvMG1zK4zGKCw2ZFdkgusf19G9y6IVtLWGo2eSLtMp+2Sa1ngdj2PLWeuaTLXK3Dcr1OZybFty15y+CaBtslzt9LzN7Bxlb5IkVmkMTldQ2+C7a8dSwhdXSThFaasJbWaCQZTZ9Rtzmp8TgrvcwiawJOhNR46janZnMaSUbbx+yiTuLIE4uksR2/pBKHHkKZG+RNXB8P4gwhoWjhH3OOMEIMFRqSWUR8WspxRcaRCUAQxBbt/U2cZ5lFFLOSwPqYJSShyFIKxGyiQMwrssQcpKLVP8XjeN8gEtv/G2OxVhAxiBh8sBgbsBJzinwlv4hAbP9PjAmIWUUWZzxBLN7IpswiKC6BVjKLXLW9/UDb/zjOjN36fL8zi0pbtbOfxFjt58ds2z4YGzBs/Kj2/pciT2c/lzG4Hw9Dq/vD6qCzm8al33G5tNs/1YXLsb9ssfBYyupDNR6//hTnrl/gKdc8wfcsPMqTm09wbWORRxeO8O2rruaBq4/TPtegc87RfNTSvOBJV3PqT3hqSznNCwntxx2tkyntq47wV1c3OX9yjicfXeQpc09wTW2Jq9NlTiarnKot8+jsEf5u/jgPH1lgeWGG7EJKbdGSLTrqS4baqiVZD7iWx+QB18pxHUPSsiRrjnTdkq1auvNFbtFcSjafcGE9ZXWuwdJcg4vNJicba5ysr3IsWWfetWNekfGkzlO3GTO2y4ztciRpM590WKo1WK41WKo3WGvUaLdq5I2ErO3wLYtrG0LN4DoQ2obQyy2CkFhcJtiOwTqDzQVrDBIE40MsMIpCrMwsMqbMD4oFiwSL8XGc8QabF0VRL7OozC2SjQyjhJhTVIQsSgIhjYVOyIvCq8ws8gafhDhNEEKRU5SHQGIDmQ2kLuBsILUBJwEfQswvkkBiPEmRWeSt78svSkzMOkorw3Eyi+L4ONgqt2ivM4s2ZxANzy0aXHb1ddXgyJHZRMNyjIbkDvXmOeRNbNiJeTCwsZxmWPEyuL27ycWp7pv9yivaTUbR4Ml6q2VPmlO0k0Kgemx2EzA5uA2HOdBRHW76UdEol6CA1N9Zpca3VeK2UurKoYXLAbrCUtqV2pW9+LhLKTX99J1gFL0aopRSSh06WrgopaaCflSklAItXJRSU0I/KlJKgRYuo+n3T5Q6VPSKi1IKtHAZTb/jotSholdclFKghYtSakroFRelFGjhopSaEnrFRSkFWriMpt9xUepQ0SsuSimY8sJFUouRImuoZQhrKRdbDS50Z1jKm2TisAjNJKNWzwjNQN6AvAl5w+BrNrarL7J0bCa4LriOwXQcrU7KSlZnJWuwHmp0QorH4ExsHd9wGfU0x9UCoS6EmuDr4GsGnxpCYpDExMyf8jszEvN7jN/ILrIZ2MxgMyCzeG/JckfHJ3SDoxsSOiEhE0cQSyiyeCC2006NxxohtZ6a9SRFu/vEBqwLGBfACuIEccShpbhvKkODWBN/Kop1lnLdrSnCh7b+8o8ps4uKbUXiuBgXUG4//Y+HjO8NBSgyjMrpKfOUxBTpAzGrKFSGfTdMX2vxjXEbP/5eNh6X0/a9BoMXG3ONtvgCVPWqwGA7872+YrDfJ/Jx29aPk7c0jVkuO40V2E27f6XU9qY6q6h1ImX+oufoxYzZcylrDyasXneSL16/wFVPusjTjz/K9c1F/vHxJb57/jH+9uRJ/vbUCRbPzdJ+JKH5qGXm8UBtKSddykiXcxqLCZ3HHa3HHK2r5/nbq5s8ctURrjt2lO868jhn6hdZcC0Wmi1O15a5fnaRB+aP8eDCMS4cnaV7oUZ30VG/6KhdtNRXLMlaIGl5bBawHY/tepK2J1l31NYs2YqlO2fIVi3ZvCVbs1yc38gsWpyd4URjjZO1NY6m6ywk6zRMTmpyUutJjadhM+Zdm/mkwZG0zcW0yXK9wXK9wWqnxnq9RtZJyFsJvm1xbRuDJdsG1zHxficWbS41uK7gOkVeUddg8oAxRWaRmC0yiwzWC3iDcTYWSInFOiGkMXsoJPRyikIiMcsoiRlFpsgskjwGNYZEMN4QvEF8kVmUFJlFXop5BLw3OGdxldwiX80ssqHIBXIkNpCEQFLJKkqtJ4yRWRQwpAAm4MURkBi0KG4jgHHC3KIyl2enmUXV+ZYmzS3aNptoIMcozndjmVtlF/XWqRpQWcklKl+zXV5Rue67LQDLfVM9NrvNK5q0WNmLoMVJc4rGXpchGULbFZ67yY4ax7jbUtrv9dkNDdXcvam+4qKUUkqpK4sWLkoppZSaGlq4KKWUUmpqaOGilFJKqamhhYtSSimlpoYWLkoppZSaGlq4KKWUUmpqaOGilFLqsneYe7uoyWjhopRS6rI32FRPTS8tXJRSSik1Naa65f+jL4D1cw3mHww0nsg48kCH5hMJa48krDzpKv7s2qMcv2aJG48/znXNRZ618DDXzlzk744d5++uPs7io7O0HnE0z1uaT8TW/8laTtLKqS0lNC44Wo8ntK+a56+unuGhqxa47thFnjr/BGfqF5lzbZ7iOhxL1jndXOGh+aM8vHCEi8dm6S7WqC06uhcdtSVLbcWSrgaStsd2AyYLpLngOhbXsqTrlu6aJVs1dFdt0f7fsVi0/r8w12Rxpmj9X1/lSNLua/0/YzukxjNju0Xr/zYreWP81v/1Suv/NiQdCKnDdQM2MdhubP9vcoPxA63/Ibb+90AQjK20/neGEEAsGF9p/e9M0fZ/c+t/44nt/wOYvIgCSGOb/+BBvIlt/0PZ+t8QnCU4wRf5TL7S+j+3QuI8zgip83gJ5CaQiN3U+j8OXREBYGK7/2JozUarbis2tquvtvsvW7CX7dOrf+BJfG671v/b5f5UW+H3jR/R+r+6TBje/r/a9j9Os7nF/6g4gHL6wXWsGra+ZQv5wRb/1fFbtf0fta2TqM5zVNv/wfb3e5lDtFWEgDOCH1hukP7jtJWtWuQPzns/7cX+2mpbJsnAGhVlMG3t9yeNP7gcTfUVF6l7QhrDAgFMFnDdQNKOwYuma+nmCXmIm5kaz6zrMJd2aNQypBYItZh303v/EMHkgs0CLhNct8jqyQx5EXzY8clGvgxCw2Y0bZeGy6glHpeEXqZOSIowwyK8UMqQQiGe5H15i/k9Jo/DeDPgDSFYcu/oBkcullxcESA4mE8T4g3BEYMgE+tJTMzqcVZwLmAsYCWujy0CFyu3ON4gpljfXshisaBRv+ci9H6nKgGJplLc9Acrbjwux5lyHmXoYuX5eDMDjzfGiVRXxfRu5eNyWA1j3E55AqueJL3Y3ptd9RiMOglVbbfMUTk/49pJ8OI4r9nqBLR9Zs7+hA5ebmnR0xhEOQkNn1R7ZaoLF6WUUkpdWbRwUUoppdTU0MJFKaWUUlNjV4XL7bffjjGGW265pTdORLj11ls5c+YMzWaTF73oRXzzm9/se12n0+Gtb30rJ0+eZHZ2lle+8pU89NBDu1kVpZRSSl0Bdly4fOUrX+G3fuu3eNazntU3/n3vex/vf//7+dCHPsRXvvIVTp8+zUtf+lJWVlZ609xyyy18+tOf5s477+QLX/gCq6urvOIVr8B7v/MtUUoppdRlb0eFy+rqKq997Wv57d/+bY4dO9YbLyJ88IMf5D3veQ+vetWrOHv2LB//+MdZX1/nk5/8JABLS0t85CMf4Td+4zd4yUtewrOf/Ww+8YlPcN999/G5z31ub7ZKKaWUUpelHRUub37zm3n5y1/OS17ykr7x999/P+fOnePmm2/ujavX67zwhS/knnvuAeDee+8ly7K+ac6cOcPZs2d70wzqdDosLy/33ZRSSil15Zm4Ad2dd97JV7/6Vb7yla9seu7cuXMAnDp1qm/8qVOn+M53vtObplar9V2pKacpXz/o9ttv55d/+ZcnXVWllFJKXWYmuuLy4IMP8ra3vY1PfOITNBqNkdMZ099kS0Q2jRu01TTvfve7WVpa6t0efPDBSVZbKaWUUpeJia643HvvvZw/f56bbrqpN857z+c//3k+9KEP8e1vfxuIV1Wuueaa3jTnz5/vXYU5ffo03W6XxcXFvqsu58+f5/nPf/7Q5dbrder1+qbxT7n+MR5cqJHP1JmbqzHzWCBZ9zQuejBgc8d65wh/0U5ZvGqGp8xd4Gi6zunGMhyH+63weDJPqNUIdYevG+oXHelajssCtWUfO7h6i/GOdj7DA5mjnSes+RrXNRc5lqyT2pxj6ToQ20o7IzzuAh3XICQJITH41BBSqKUGWTe4jsfksXW+6xRdY4NgxMWOscFAMBAceYDVELuu+mDp+oSs4cjEsZC0mLHdoiV96HXyLdcFYotoa2L7e2cD1gptUyO34J1FrC265hbdcY1BjCCm6ChsbPEYrDHYrOiAGYo2//0ta+M4F7vcG4A8NuoVgUCsliUQO94iRcfcoqusQGyDazZmKwAGKUbErrfFZOXkYkEEL0XXXBcq0xoSF+cfGxYbgjXUgC6xvXwQD46NFSyGVmKH4RRPRtmiOfTa+ve6nRb7Pojd1PrfmthCvuz0GsT0tf4f7AC7Xev/UW3/e89XWtYPKpe7Xev/US3+yzgAP9A+eVT7/+o6V7ev97pKy/Vh2zWq7X91vbcySSzAuG3/L5Vhbf/Hfd2ltJepy9PYgl9dehMVLi9+8Yu57777+sb9y3/5L3n605/Oz//8z/PUpz6V06dPc9ddd/HsZz8bgG63y913382v//qvA3DTTTeRpil33XUXr371qwF45JFH+MY3vsH73ve+iVb+OccfoDmf8FfhNK1unaRtSdY96WqOzQTjE8RY1l2TR1LPbNplPm1zIl2j6TISEzBGeEwWMD7F5BabgevamFmUZbG4kASMBWNp2TpPuDnqztN0GXWbc9xmLLgWqen/r6jHxNAVUxQjButNUQSB8YL1vsjjCfEE2fv9d9Brt2/AOHIL66YojGzM17EmxIIFwbqABVKT91r/b3WEyxb4OQlBwIjFCPGNsmijH9vvG3ygaMkfi4n42vLszujixQiC7Xsb2nhVebnPEHpTCVBGCwjBGMpdYHOKx3E5powiMDHXCF/uPkswgY2LidX7RUXlPARLF6hVVswG6StaCMTMIjEbLf4ptr0sXipFTLV4icWKGXg8fvGynd0UL7D7k/Gw4iXOd+sCBrZfdxhd0Ey63sP2a3W/XKqiZFRe1FZ5Rftpqyyk+PhgijWlxjFR4TI/P8/Zs2f7xs3OznLixIne+FtuuYXbbruNG2+8kRtvvJHbbruNmZkZXvOa1wCwsLDAG97wBt7xjndw4sQJjh8/zjvf+U6e+cxnbvqyr1JKqb13qUIWldoPe54O/a53vYtWq8Wb3vQmFhcXee5zn8tnP/tZ5ufne9N84AMfIEkSXv3qV9NqtXjxi1/Mxz72MZxze706SimlBlzKhGil9tquC5c//dM/7XtsjOHWW2/l1ltvHfmaRqPBHXfcwR133LHbxSullJqQFi1qmmlWkVJKKaWmhhYuSil1hbnU/3mk1F7SwkUppa4w+lGRmmZauCillFJqamjhopRSSqmpoYWLUkpdYfQ7LmqaTXXhkonjSK1Nc75N51igfczQXUjIGw4x4LpCuiYkK4bWcoPza3Ocb8+znDcIYmi6jPl6h3SmSzYnZHPQnTPkMxZfd4g1mCC4TiBpCUkLknVDtpayuN7kic4sF7MZ1kONTGIPmrrNmXVd5modGvUM08zxDcE3IG8a8obB1w2+ZpHEgo2fNfda/ueCzQTXLW9gu2A6ltB1dLoJa90a63mNtbzOeqjRkYRMHEFsX1fQ1OakxlO3OTWbU3Oe1Po4dB6XeEwSIBFCKoSE3k0q90MC4iAkhuBMjAawFNEAJnbuH5EzZaTo9S9S3I9dg2OsgRQ3+m6U9/3GML4uRiH0T2sq8QhFZ99gYtv/YAjlOCAEixSxCUHiNLHd/+ab7xvG/RrE9IbDxOlM7z4w5LGtTD/6ewbbdZYdx3bdeAdbtVenr7aXdxstnUeO3zzvrZddjQCwW8xnP0zapRh219Z+L1viK6X2oQHdpbTua9x07AG+e/YxvnrsOv7qxCm6/6/B/AMpM4/mNJ7ISFct9YsJaxdrPLZ0kotPavLdVzf47vnHuL55gVP1Za6ZOc7/WzjBowtHyY7UyGcSZs5bGk9Asp5T63iSdUe6lpKuWForNVbWEv5yvc7FE03WFmo8qXGR48kaV9eWmXMdjqbrHK23+PvmAuebc7SbDfJmgm8YfN1SrxkkMSTrHtvx2DzgfMBmFtcNuI7DdR2uA7ZjcB1D1k7pdC0XsoROltCaTWn7hFY95WiasuBazLgOqfGbbnWb03QZs67LUtKgnuSspHXWUk87qZGnjjx1hNQiiUGciYVKUbSIM7iO4Gxsiy+ZwdqAySW23h/MLZKYQVSEA4GzIGBDQJxBQkASE4sUDyHtL158iEUjUhQqEgsoisKEIDF6IBS5R0mx6GARJ4REYrElsXgJwRJcIIjB2ViAlcWJswGR2Nq/Jp5gfWw3b/Ne+/8QDInxEGL0AKFoi47ptaev5hbFfTAqCmBz6/+yoKmeVHebWVSd31bZRdUCapLMonJ8nP/o9v9xOzcvv6+Vf7EPh43bNO0etKMflUU0Kq9oUlsVK1ut/2BWz6SN4g7TlZRxspX2I1pgPwvFw7R/JzVuDtRBxVBM4nCv3SUk1V8e/cL94TO97xdK7Zj+949Sm2nhopRSSqmpoYVLwVQvAepf94eP/uGprkDT/NGEUvtFCxc1HfT9WymlFFq49Oh3XA45PSbqCqTfcVFqMy1cCvpRkVLqsNGPipTaTAsXpZRSSk0NLVzUdNA/PJVSSqGFi5oW+lG/ugLpd1yU2myqCxcvZnPnUEPRgn5g4rKLa6HarXFTh8fy9dZs0cq+fz1iO/j+aYe1Mh+6bqOUvepHjJYhb2qTdDwsv9djBj5Hn2gdN890hy8cMbteJ97y8RYTj3FV5rBcuNlpR1Z1ZdHvuCi12VS3/P+T/32Wvzy7xnOuepAXnPx/PO3Ieb5++loevPYka3+XMv9gQvPxnJlHM+pLluZjCWuPzvOta2e4/8xxbjz5ON819xjfNfM41zSWefDIMf765FU8ftURWo+kzDxSY/a8o76Y41o5zUc9teWExsWE9QuW1oUZHjpV59GT8zx84iJPmbvANY0lFpJ1nlS/yLF0nZP1Vf5+ZoGH5xd4YmGW9YsNsiOObNFSWzLUlyzpqiNZ97iOx/iAawVsN+DajqTlSNct3TVDtmboridk85aV+YTWfJ3luTqLMzOcaKxxsr7G0WSdOdehYbNeu/9516ZhM2ZslznXYTbpMJ92WEobrNQarNTrrNbrdOopeS1B6o5QM7i6wdVMvN8Gl0LSgZA6XDdguwbbDdjcYPKAsTF7iKJFfy+jiPi8GMBZTIh5RxIMxsboA+PLIb1bSAwmLbKNUtMfDeAN4sGkQijuizNIIvHmBXEGk8RhCIL3Budi6/88BBIb8C7gbMBbS+pi2/9ELDXJCWLITSARS2oC3hqCsSS2mM6GXsv+vtb/Rav+QFEUi+27T3G/bP1fzgPoiwOA8dr+l9NtpW+eA0VTuexhre7HaftfPrcx//Hb/49q+19u01Zt/4eZpH38pG3/t2zVv6sso923WD+oAucgc5j2K+Nq0piFrYwbabDbZV7qvK+DNtV/9pkQA/MgHri6zaknOSQh5uuUUTESg/ysl+KkWGTXVN4QHYHEehLnwcWcG2xx9QE2rtjI5kBAEWJw35ArLqnxJDbgjMSLEUYQE9dNRl0dChuhhKbI/DFhY9nlC8urLuUvhi9DAAfXw4S+E4c1gkV6Q2Nk46qLATGysU7VdTQgg1dUrCme2+KXTmTz48FxMPxyiPQPjWweb6ByoPrnExfVv27DFr3xXH8o4l4bdlKPy9vfwMX9MCqLZqvwxa1epw4XDYdUh9XhfEdUSh16hz2ITe3OXocfKrVX9J1HKbUjeuVEKXUQtHBRSiml1NTQwkUppdQm+h0XdVhp4aKUUmoT/Y6LOqy0cFFKKaXU1NDCRSml1Cb6UZE6rLRwUUoptYl+VKQOKy1clFJKbaJXXNRhNdUt/4/+pWEpnOCzT23wjGse5SlzT/APjpzHPTnwN/WruDjTJJtNmTlvSVcD9YseBGxmWW/P8c12wsWrm9y48Bgn66tc01gmPRaoO89DtWOs1OrkDYevGxoXLOlajut4ahfBBIfN463dneGBbsJ6lrJypM61Mxc5lqzTsBnH0nUAEhNIned8Msd60kTShJBYQmoIqaGWGNI1g2v52D4/CLbjSYTYQj/YePOxPX7mHVmosZTbeN872s2UbkjIU0uWOGZsl7rNcAgOYcZ1YpfgolOwNRK7+hadfa0R2i6Q2RRvQayNHYSNiV2ErUGsFEOLs4J0YydgawRrgVxie1sfMGWX2KJVrRHAh9htFyAPGGdit9pgkKKhmZH+G5heB+EgJnbGTYvZiiGIYKXsIgwEAyJI0aFXRGIzYmdAAsHGZYrbGJZCuW4O8AnB+vjYxuNQDoMxvWGKj0+ULf1NIIjbaMNd9jsRW7T6N5W2zuXiNlr/D7b9h/Fa/4/bYbec97DW/5O0/R98rn8ZZYTBqBb5lXkOafvfN+2Itv/D57v1yXYnVxEG2/7vhS3jAwb2wXbt4Hfa7n9wvnsRPVCd16jx2rhQ7dZUFy4zj3pkwbC80OSxo7NcP3uB0/UlrAmsZzUeXEvprKTUVizpWsC1A3ViBo5vGNZna1yYnWFppsHJ+ioLrkXa8HRCLEIeazmy9ZTuqiFpWZKWwYSAa+dgwKcGX7f4uqXdTFhuNFisz7CQtplzHWZMhxnjIYVMHG2f0PWOLHNkmSXvGlzXkHfAFfdtFjN58DH3xxqDs/HkmiRCSOgVOyG1+MTRTRPWk5R6UqPhMmo2J7EhZhWJxxnfy8lJjaducryzMR5ADHliyYIjDxYfLN5bvDdIbgi5wdckFkwJmMRgvBDKTKEEJJiYDSSxwDEiYA0SpNKivyheiOMxBhMknt6MKeITYiQDxmDsRqyC9bEWMbZYZjGU3mMTYxJMjHPACBLYCMssowoMBGOxBIIxGGPwxmCCwRgbiy8xBAx5sFgXTy65WKwIwQhBLMEIiMUbAQl4Y4kL3CheBjOEyjfyUCletmqNP6x42c4kxQv0n5C3e34nxcuwaScx6faMY7BgGCev6FLY7xP6sIJw1/Pcpysy+7GuvXlfwuOqV6z2j5a+SimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aIub9r9UymlLitauKjLm/ZSUEqpy4oWLkoppZSaGlPdOdd4obYk1M87Hpk7yl+mGfkRR2I8xxvrPH6sRWfVkbQsNnfUl4AAaStQW7bki5a1mSYP1I/RcDk0ITWeo2mL4811VhYadNYdruVwHYvrJqQCNgsYLyQtIV2DfAV809Gp13m8NkszyUiKVvEztotFaLqMhbTNaq3OerPGUjch71hs0S3X5mBzGzvBBgEpOtAGwfqAzWJnXdcxuDaEGoR27J6bpQmtpEYt8dRdTs16UhM751oELKTFPrMmkNqcVBx1m9N1OY2Q002yGB2Qxs654mPX3OAN1htCXnSy9UUnW2+KdQXjQBKLSIit8ouPZwzE7rmYXufc3rETQTBFDEDxnI/pAhTLMQaMMRgjGFsszxZdfG2lo66n6I5bvL7ovluO6++gK0W7/tgt15hycsEaGyczgrUSu+daIYgU3XMDFkMWHKn1BDE4U7SRH9I9FxN6bf9D8dghve655fjYSdf2tf0fZru2/3thp51Fd9I9d7/a/m+/rpe2K+5+mqTd/352pFXqUprqwsVmgYW/a1NbrbG03uBvuqcJNxieeezv+b6jD3GivsZfNM7weP0oIY2b2rjgqV/IcW2Haztsp8Zj2TFyb+EkPGXmCa5rXOBI0uJIrc2306tZrs0jSYI4x4yF+sUM186xWcBmCa6bYDNLK0tZzue438f8oDBnOVVfZiFZ56RdoWEzmi6j4XL+PvFcSGbpJnXEWYKL+T9iDWIgJYduwOYB8YHEC8YLxjust9gis8h4SxZS2sEQgokt+4MlF0vAEJLijcrGoiw1PmYXOek9TowntXHobMAaWDNCx0CwkBuLmI11wxSZRUVxUEbviLFxn2CLYiRsLl6KzCGE2JofGws0MRBfGSOB+t6PTXxBebIRAcxGLVTkFMWhbOQZSfncxlCkWF4CG+FDwyWb3uQTggmk1ve91CI4CfHxkOKF6km2UrwMFjKDxctOM4vK6cYxrO3/TjKLyueBLbOLgJFxAcMKklEFTXUf7DYWYJy2/9vFI+zEbvOKxi1a7BbREofB5VRITpvtcrAOK/2oSCmllFJTQwsXpZRSSk0NLVyUUkopNTW0cFFKKaXU1NDCRSmllFJTQwsXpZRSSk0NLVyUUlNrt/8KrZSaPvpbr5SaWvvdkE8pdfhMdQO65afUWHgiJVkPzD1sCEnK38op2tcnPO3oY8y6LjccfYJu7liRI2AcITE0Fj02ExoXA+IsmIRFFrgvWLonHU+eucCc63Bdc5H8uOWvxbAqc0ACxiHWUFvKsV1P0vbIsiE4EBvntS6zPCiGgCEXS1Z3zLs2dZNzNF0na8bmcCKGRSCjTqwhy0ZADoDExO67xsdOva4TwJYdYe1GMzgDOQldYBkIElu/BTF4MXji8mZsl5Siey70ytbgRjcg6gBBEjyAWIwY8rhWvUZw/WILOWs27o/qoBu75sZjYIJAHpu39ZrQ9c23aEJnyuXFBl3lNOX0cZyw0f4uvkygaGAXiucsOFM0jBvNInQHNq/aQRcLCT52zg0QjCHFs6kJXbH/4qaMbkI3aCdN6GCy7rLbNVfbqgldXMfxG9GVrxvWSbdvHhN0yp206V5cx0vf9Gy7/bxV9+FN89qj5nPbNSAbtZ9GdXeOz11ZxeRhb/B3OZrqwmX9lGGmbZl5tIs7FxCTEtKUc80FjjVanDxyjhtmn8Aa4T5vaXXmcS1L0jLUO4Haco4Rh1hHqDmWa7M83FjgWG2dY8k6p+tLWCPk4vjr3NHuzmK7Nrb/71hs12O6gTTk8TzuDJIYQuJopQ3OFy34my5jxnapuw7zJkAtvqnnweKDYckbsryOzU1xi+3/rRdMHmKb/TwgxUlfDKTWxOW5WHiIA3GOzKWsWyF1AWdCX/v/1HisCVj6i5fyBBHE4lNDLq7owBu78Xa9QYIheCka4hqCBxMMJgg+bHSqNSF2pw2E2P12VPv/YlsMZqN4Kdv/99r4G2wulZb9JnbbNQZbdOvFlNEAbEQGFEVO7y5FsYeAL96Eq+3/fezaa4wQgsWbou1/sORGINAbWiPkRcGSEoo3fbdRvEjAD2n/v+nkOqR4AYa2/j+I4mWSE/s4Lf23es123XP75rlF2/rddNMdpzPuTrrnDh63vejAO0mb/0Hbtf2fpHhS6qDoT2ipcoJVSiml1OGkhYtSSimlpsYVU7gYvZSilFJKTb0rpnAR/f6U2qUr7UuHSil1GF0xhcu2V1x28YU3pZRSSl0aV0zhsu0Vl0v8r5FKKaWUmtwVU7hsq7jiotddlFJKqcPriilctv2oSP8dWm1D+1sopdTBm8oGdGUjttBpk2eBPO8ixuC7Ht+xhPU22VqXjs2wRshaXfK1DqGd4juWPPO43GO8kGcO33X4DoRWRr7WoVvr0s4zvPV0uhnZWhe/3ia0HL5j8V0hz3JMnmG8IFjyLMF3E3zHENpCaGX49Q5Z2qUrXTq1DOtyghg63tHtdMlbHfx6jbBukZbg2xbfoZh/wOQeyXNsHlt1iRiCceS5K9bb4rsmrnsqhCRAkuNNF0+H3HfIfJduktFJM1KXITYnLzryilgyEdrB0PGGbp6QeUPWTcg7cb6+HQhtj7QSaFtM20DHYDrEtrpdgS6QCVLcbCYYH7B50UAvSOyMG4qmcNXP7YyJvevEgjWINUjR9VfEImGjC3AQgwTw1aGH4CEEYjM+XzSmSyR2M06kaM4niBMobiYRjA0YJ2BDMS4U4wLG+uK+j03kXMDYnMwGxATEeoRAsB5nhGAC1giWgLcBR3zsTMAiReO/uN2umLZsOmeLBnSm+PJv2bSs2p10sJFZOZ/tTNKIbbAxWrUBXfU5GTLPUZ1wtyr2qq/pW1bxmtD3/MZ8tmqgNjiPrQw22Cu3sTp+cJow4d96dshxq85j0/z7trNf2eF2kgZ0w9Z21HyHrcOwdYTddc4d9jOxeT9vf4xHdazdat3GXdao5fshy9xuPYbtv0mWuZt12Olyy2M0vJv35MuvLqObZcDGeXwnjOzm1QfkoYce4rrrrjvo1VBKKaXUDjz44INce+21O3rtVBYuIQS+/e1v84xnPIMHH3yQI0eOHPQqXXGWl5e57rrrdP8fID0GB0+PwcHTY3DwJjkGIsLKygpnzpzB2p19/D6VHxVZa3nSk54EwJEjR/SH9QDp/j94egwOnh6Dg6fH4OCNewwWFhZ2tRz9tqFSSimlpoYWLkoppZSaGlNbuNTrdX7pl36Jer1+0KtyRdL9f/D0GBw8PQYHT4/BwbvUx2Aqv5yrlFJKqSvT1F5xUUoppdSVRwsXpZRSSk0NLVyUUkopNTW0cFFKKaXU1JjKwuU3f/M3ueGGG2g0Gtx000382Z/92UGv0mXh9ttv5wd+4AeYn5/n6quv5sd//Mf59re/3TeNiHDrrbdy5swZms0mL3rRi/jmN7/ZN02n0+Gtb30rJ0+eZHZ2lle+8pU89NBDl3JTLhu33347xhhuueWW3jg9Bvvv4Ycf5qd+6qc4ceIEMzMz/MN/+A+59957e8/rMdhfeZ7z7/7dv+OGG26g2Wzy1Kc+lV/5lV8hhI3sHD0Ge+vzn/88P/ZjP8aZM2cwxvD7v//7fc/v1f5eXFzkda97HQsLCywsLPC6172OixcvTrayMmXuvPNOSdNUfvu3f1u+9a1vydve9jaZnZ2V73znOwe9alPvh3/4h+WjH/2ofOMb35Cvf/3r8vKXv1yuv/56WV1d7U3za7/2azI/Py+/93u/J/fdd5/8xE/8hFxzzTWyvLzcm+aNb3yjPOlJT5K77rpLvvrVr8oP/dAPyfd93/dJnucHsVlT68tf/rI85SlPkWc961nytre9rTdej8H+unDhgjz5yU+Wn/mZn5E///M/l/vvv18+97nPyd/8zd/0ptFjsL9+9Vd/VU6cOCH/43/8D7n//vvlv/23/yZzc3PywQ9+sDeNHoO99Ud/9Efynve8R37v935PAPn0pz/d9/xe7e8f+ZEfkbNnz8o999wj99xzj5w9e1Ze8YpXTLSuU1e4/KN/9I/kjW98Y9+4pz/96fILv/ALB7RGl6/z588LIHfffbeIiIQQ5PTp0/Jrv/ZrvWna7bYsLCzIf/7P/1lERC5evChpmsqdd97Zm+bhhx8Wa6185jOfubQbMMVWVlbkxhtvlLvuukte+MIX9goXPQb77+d//uflBS94wcjn9Rjsv5e//OXyr/7Vv+ob96pXvUp+6qd+SkT0GOy3wcJlr/b3t771LQHkS1/6Um+aL37xiwLI//2//3fs9Zuqj4q63S733nsvN998c9/4m2++mXvuueeA1urytbS0BMDx48cBuP/++zl37lzf/q/X67zwhS/s7f97772XLMv6pjlz5gxnz57VYzSBN7/5zbz85S/nJS95Sd94PQb77w/+4A94znOewz//5/+cq6++mmc/+9n89m//du95PQb77wUveAH/63/9L/7qr/4KgP/zf/4PX/jCF/jRH/1RQI/BpbZX+/uLX/wiCwsLPPe5z+1N84//8T9mYWFhomMyVSGLjz/+ON57Tp061Tf+1KlTnDt37oDW6vIkIrz97W/nBS94AWfPngXo7eNh+/873/lOb5parcaxY8c2TaPHaDx33nknX/3qV/nKV76y6Tk9Bvvvb//2b/nwhz/M29/+dn7xF3+RL3/5y/zrf/2vqdfr/PRP/7Qeg0vg53/+51laWuLpT386zjm897z3ve/lJ3/yJwH9PbjU9mp/nzt3jquvvnrT/K+++uqJjslUFS4lY0zfYxHZNE7tzlve8hb+4i/+gi984QubntvJ/tdjNJ4HH3yQt73tbXz2s5+l0WiMnE6Pwf4JIfCc5zyH2267DYBnP/vZfPOb3+TDH/4wP/3TP92bTo/B/vnd3/1dPvGJT/DJT36S7/3e7+XrX/86t9xyC2fOnOH1r399bzo9BpfWXuzvYdNPekym6qOikydP4pzbVJmdP39+UyWodu6tb30rf/AHf8Cf/MmfcO211/bGnz59GmDL/X/69Gm63S6Li4sjp1Gj3XvvvZw/f56bbrqJJElIkoS7776b//gf/yNJkvT2oR6D/XPNNdfwjGc8o2/c93zP9/DAAw8A+ntwKfzbf/tv+YVf+AX+xb/4Fzzzmc/kda97Hf/m3/wbbr/9dkCPwaW2V/v79OnTPProo5vm/9hjj010TKaqcKnVatx0003cddddfePvuusunv/85x/QWl0+RIS3vOUtfOpTn+KP//iPueGGG/qev+GGGzh9+nTf/u92u9x99929/X/TTTeRpmnfNI888gjf+MY39BiN4cUvfjH33XcfX//613u35zznObz2ta/l61//Ok996lP1GOyzf/JP/smmNgB/9Vd/xZOf/GRAfw8uhfX1daztPz0553r/Dq3H4NLaq/39vOc9j6WlJb785S/3pvnzP/9zlpaWJjsm43/P+HAo/x36Ix/5iHzrW9+SW265RWZnZ+Xv/u7vDnrVpt7P/dzPycLCgvzpn/6pPPLII73b+vp6b5pf+7Vfk4WFBfnUpz4l9913n/zkT/7k0H+Ju/baa+Vzn/ucfPWrX5V/9s/+mf4L4i5U/6tIRI/Bfvvyl78sSZLIe9/7Xvnrv/5r+S//5b/IzMyMfOITn+hNo8dgf73+9a+XJz3pSb1/h/7Upz4lJ0+elHe96129afQY7K2VlRX52te+Jl/72tcEkPe///3yta99rddqZK/294/8yI/Is571LPniF78oX/ziF+WZz3zm5f/v0CIi/+k//Sd58pOfLLVaTb7/+7+/9++6aneAobePfvSjvWlCCPJLv/RLcvr0aanX6/KDP/iDct999/XNp9VqyVve8hY5fvy4NJtNecUrXiEPPPDAJd6ay8dg4aLHYP/94R/+oZw9e1bq9bo8/elPl9/6rd/qe16Pwf5aXl6Wt73tbXL99ddLo9GQpz71qfKe97xHOp1Obxo9BnvrT/7kT4a+/7/+9a8Xkb3b30888YS89rWvlfn5eZmfn5fXvva1sri4ONG6GhGRHVw5UkoppZS65KbqOy5KKaWUurJp4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoaWrgopZRSampo4aKUUkqpqaGFi1JKKaWmhhYuSimllJoa/38EkGeSCdJLSQAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# call fixed temporal embedding with the vector of 'times'\n", - "plt.imshow(emb(ref_times).numpy(), aspect='auto')" - ] - }, - { - "cell_type": "code", - "execution_count": 136, - "id": "8b17fdb7", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "feat_dim = 1024\n", - "xfmr_encoder = TransformerEncoderLayer(d_model=feat_dim, nhead=8)\n", - "visual_encoder = VisualEncoder(d_model=feat_dim, model_name=\"resnet18\")" - ] - }, - { - "cell_type": "code", - "execution_count": 137, - "id": "7999fcef-953b-42cf-927c-f3b617f68157", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "def extract_features(\n", - " instances: list[\"Instance\"], \n", - " visual_encoder: \"dreem.models.VisualEncoder\",\n", - " force_recompute: bool = False\n", - " ) -> None:\n", - " \"\"\"Extract features from instances using visual encoder backbone.\n", - "\n", - " Args:\n", - " instances: A list of instances to compute features for\n", - " VisualEncoder : pass an instance of a visual encoder\n", - " force_recompute: indicate whether to compute features for all instances regardless of if they have instances\n", - " \"\"\"\n", - " if not force_recompute:\n", - " instances_to_compute = [\n", - " instance\n", - " for instance in instances\n", - " if instance.has_crop() and not instance.has_features()\n", - " ]\n", - " else:\n", - " instances_to_compute = instances\n", - "\n", - " if len(instances_to_compute) == 0:\n", - " return\n", - " elif len(instances_to_compute) == 1: # handle batch norm error when B=1\n", - " instances_to_compute = instances\n", - "\n", - " crops = torch.concatenate([instance.crop for instance in instances_to_compute])\n", - "\n", - " features = visual_encoder(crops)\n", - "\n", - " for i, z_i in enumerate(features):\n", - " instances_to_compute[i].features = z_i" - ] - }, - { - "cell_type": "code", - "execution_count": 138, - "id": "e299e8a0-61eb-4eee-901c-49aa7e678b3b", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# partial forward pass of the transformer - up until the encoder\n", - "\n", - "def prepare_for_xfmr(ref_instances):\n", - " # extract visual encoder features from instance object; shape=(1,n_instances,d=1024)\n", - " ref_features = torch.cat(\n", - " [instance.features for instance in ref_instances], dim=0\n", - " ).unsqueeze(0)\n", - "\n", - " # window_length = len(frames)\n", - " # instances_per_frame = [frame.num_detected for frame in frames]\n", - " total_instances = len(ref_instances)\n", - " embed_dim = ref_features.shape[-1]\n", - " # print(f'T: {window_length}; N: {total_instances}; N_t: {instances_per_frame} n_reid: {reid_features.shape}')\n", - " ref_boxes = get_boxes(ref_instances) # (n_instances,1,4)\n", - " ref_boxes = torch.nan_to_num(ref_boxes, -1.0)\n", - " ref_times, query_times = get_times(ref_instances, query_instances=None)\n", - "\n", - " # clip length \n", - " window_length = len(ref_times.unique())\n", - "\n", - " # computes the temporal embedding vector for each instance\n", - " ref_temp_emb = emb_t(ref_times)\n", - " # computes the positional embedding vector for each instance\n", - " ref_pos_emb = emb_p(ref_boxes)\n", - "\n", - " return_embedding=False\n", - " if return_embedding:\n", - " for i, instance in enumerate(ref_instances):\n", - " instance.add_embedding(\"pos\", ref_pos_emb[i])\n", - " instance.add_embedding(\"temp\", ref_temp_emb[i])\n", - "\n", - " # we need a single vector so average the temporal and spatial embeddings\n", - " ref_emb = (ref_pos_emb + ref_temp_emb) / 2.0\n", - "\n", - " # add a new dim at the beginning to represent the batch size (in our case 1)\n", - " ref_emb = ref_emb.view(1, total_instances, embed_dim)\n", - "\n", - " ref_emb = ref_emb.permute(1, 0, 2) # (total_instances, batch_size, embed_dim)\n", - "\n", - " batch_size, total_instances, embed_dim = ref_features.shape\n", - "\n", - " ref_features = ref_features.permute(\n", - " 1, 0, 2\n", - " ) # (total_instances, batch_size, embed_dim); note batch_size = 1\n", - "\n", - " return ref_features" - ] - }, - { - "cell_type": "code", - "execution_count": 139, - "id": "75ec8cab-25b9-4e9e-a64a-b5dbe00cc81a", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# pass instances through visual encoder to get the feature vector (q,k,v); modifies the feature attribute of each Instance in ref_instances\n", - "extract_features(ref_instances, visual_encoder)" - ] - }, - { - "cell_type": "markdown", - "id": "a972707a-51a7-45ff-987e-80ee0dea4752", - "metadata": {}, - "source": [ - "### Rotary Positional Embeddings" - ] - }, - { - "cell_type": "code", - "execution_count": 140, - "id": "f0823cf1-2a35-4920-a62e-896bd9dbb078", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# create transformer instance to test embeddings \n", - "tfmr = Transformer()" - ] - }, - { - "cell_type": "code", - "execution_count": 143, - "id": "5e0b9d31-34be-40f8-91dc-b91d59aee170", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "assoc = tfmr(ref_instances)" - ] - }, - { - "cell_type": "code", - "execution_count": 157, - "id": "9f29ca35-9ff2-4e9a-bba0-37a3a14ad522", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "gtr = GTRRunner()" - ] - }, - { - "cell_type": "code", - "execution_count": 160, - "id": "0aa3876a-6246-4d02-80a5-013d382f6d38", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "metrics = gtr._shared_eval_step(data[0],\"train\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aee0d129-83f2-4f76-b452-132391554b4c", - "metadata": {}, - "outputs": [], - "source": [ - "metrics" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "dreem", - "language": "python", - "name": "dreem" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/run_trainer.py b/scripts/run_trainer.py similarity index 86% rename from run_trainer.py rename to scripts/run_trainer.py index fcf38ff..5046222 100644 --- a/run_trainer.py +++ b/scripts/run_trainer.py @@ -4,7 +4,7 @@ # /Users/mustafashaikh/dreem/dreem/training # /Users/main/Documents/GitHub/dreem/dreem/training -os.chdir("/Users/mustafashaikh/dreem/dreem/training") +os.chdir("./dreem/training") base_config = "./configs/base.yaml" # params_config = "./configs/override.yaml" From fe2c88e7ccfb3dc0391fd4365443c2cea4e3b80f Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Sun, 18 Aug 2024 17:29:57 -0700 Subject: [PATCH 59/63] linting --- dreem/inference/tracker.py | 1 - dreem/io/instance.py | 4 +- dreem/models/attention_head.py | 48 ++++---- dreem/models/embedding.py | 46 ++++---- dreem/models/mlp.py | 1 - dreem/models/transformer.py | 200 +++++++++++++++++++-------------- tests/test_models.py | 71 +++++++----- tests/test_training.py | 2 +- 8 files changed, 210 insertions(+), 163 deletions(-) diff --git a/dreem/inference/tracker.py b/dreem/inference/tracker.py index 8426e84..58480f4 100644 --- a/dreem/inference/tracker.py +++ b/dreem/inference/tracker.py @@ -463,7 +463,6 @@ def _run_global_tracker( # hungarian matching match_i, match_j = linear_sum_assignment((-traj_score)) - track_ids = instance_ids.new_full((n_query,), -1) for i, j in zip(match_i, match_j): # The overlap threshold is multiplied by the number of times the unique track j is matched to an diff --git a/dreem/io/instance.py b/dreem/io/instance.py index c3aa568..ba97182 100644 --- a/dreem/io/instance.py +++ b/dreem/io/instance.py @@ -565,7 +565,9 @@ def add_embedding(self, emb_type: str, embedding: torch.Tensor) -> None: emb_type: Key/embedding type to be saved to dictionary embedding: The actual torch tensor embedding. """ - if type(embedding) != dict: # for embedding agg method "average", input is array + if ( + type(embedding) != dict + ): # for embedding agg method "average", input is array # for method stack and concatenate, input is dict embedding = _expand_to_rank(embedding, 2) self._embeddings[emb_type] = embedding diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 8ea04b2..35e7b59 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -9,13 +9,7 @@ class ATTWeightHead(torch.nn.Module): """Single attention head.""" - def __init__( - self, - feature_dim: int, - num_layers: int, - dropout: float, - **kwargs - ): + def __init__(self, feature_dim: int, num_layers: int, dropout: float, **kwargs): """Initialize an instance of ATTWeightHead. Args: @@ -25,23 +19,27 @@ def __init__( embedding_agg_method: how the embeddings are aggregated; average/stack/concatenate """ super().__init__() - if 'embedding_agg_method' in kwargs: - self.embedding_agg_method = kwargs['embedding_agg_method'] + if "embedding_agg_method" in kwargs: + self.embedding_agg_method = kwargs["embedding_agg_method"] else: self.embedding_agg_method = None # if using stacked embeddings, use 1x1 conv with x,y,t embeddings as channels # ensures output represents ref instances by query instances if self.embedding_agg_method == "stack": - self.q_proj = torch.nn.Conv1d(in_channels=3, out_channels=1, - kernel_size=1, stride=1, padding=0 - ) - self.k_proj = torch.nn.Conv1d(in_channels=3, out_channels=1, - kernel_size=1, stride=1, padding=0 - ) + self.q_proj = torch.nn.Conv1d( + in_channels=3, out_channels=1, kernel_size=1, stride=1, padding=0 + ) + self.k_proj = torch.nn.Conv1d( + in_channels=3, out_channels=1, kernel_size=1, stride=1, padding=0 + ) else: - self.q_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) - self.k_proj = MLP(feature_dim, feature_dim, feature_dim, num_layers, dropout) + self.q_proj = MLP( + feature_dim, feature_dim, feature_dim, num_layers, dropout + ) + self.k_proj = MLP( + feature_dim, feature_dim, feature_dim, num_layers, dropout + ) def forward( self, @@ -63,12 +61,16 @@ def forward( # if stacked embeddings, create channels for each x,y,t embedding dimension # maps shape (1,192,1024) -> (1,64,3,1024) if self.embedding_agg_method == "stack": - key = key.view( - batch_size, 3, num_window_instances//3, feature_dim - ).permute(0, 2, 1, 3).squeeze(0) - query = query.view( - batch_size, 3, num_query_instances//3, feature_dim - ).permute(0, 2, 1, 3).squeeze(0) + key = ( + key.view(batch_size, 3, num_window_instances // 3, feature_dim) + .permute(0, 2, 1, 3) + .squeeze(0) + ) + query = ( + query.view(batch_size, 3, num_query_instances // 3, feature_dim) + .permute(0, 2, 1, 3) + .squeeze(0) + ) # key, query of shape (batch_size, num_instances, 3, feature_dim) k = self.k_proj(key).transpose(1, 0) q = self.q_proj(query).transpose(1, 0) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 7ef9b0b..57ace35 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -101,7 +101,7 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: # 100 since it's a fraction of [0,1]*100. temp is from [0, clip_len]; since clip_len # not available, we use the last value in the indexing array since this will be the # last possible frame that we would need to index since no instances in a frame after that - self.build_rope_cache(max(101, input_pos[:, -1].max() + 1)) # registers cache + self.build_rope_cache(max(101, input_pos[:, -1].max() + 1)) # registers cache self.cache = self.cache.to(input_pos.device) # extract the values based on whether input_pos is set or not rope_cache = ( @@ -121,9 +121,8 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: return rope_cache - class Embedding(torch.nn.Module): - """Class that wraps around different embedding types. + """Class that wraps around different embedding types. Creates embedding array and transforms the input data Used for both learned and fixed embeddings. """ @@ -153,7 +152,7 @@ def __init__( normalize: bool = False, scale: float | None = None, mlp_cfg: dict | None = None, - embedding_agg_method: str = "average" + embedding_agg_method: str = "average", ): """Initialize embeddings. @@ -228,18 +227,17 @@ def __init__( if self.emb_type == "pos": if self.embedding_agg_method == "average": self._emb_func = self._sine_box_embedding - else: # if using stacked/concatenated agg method + else: # if using stacked/concatenated agg method self._emb_func = self._sine_pos_embedding elif self.emb_type == "temp": self._emb_func = self._sine_temp_embedding - + elif self.mode == "rope": # pos/temp embeddings processed the same way with different embedding array inputs self._emb_func = self._rope_embedding # create instance so embedding lookup array is created only once self.rope_instance = RotaryPositionalEmbeddings(self.features) - def _check_init_args(self, emb_type: str, mode: str): """Check whether the correct arguments were passed to initialization. @@ -268,7 +266,6 @@ def _check_init_args(self, emb_type: str, mode: str): f"Cannot use aggregation method 'average' for rope embedding; must use 'stack' or 'concatenate'" ) - def _transform(self, x, emb): """Routes to the relevant embedding function to transform the input queries @@ -281,15 +278,14 @@ def _transform(self, x, emb): return self._apply_rope(x, emb) else: return self._apply_additive_embeddings(x, emb) - - - def _apply_rope(self, x, emb): + + def _apply_rope(self, x, emb): """Applies Rotary Positional Embedding to input queries Args: x: Input queries of shape (batch_size, n_query, embed_dim) emb: Rotation matrix of shape (batch_size, n_query, num_heads, embed_dim // 2, 2) - + Returns: Tensor of input queries transformed by RoPE """ @@ -300,10 +296,8 @@ def _apply_rope(self, x, emb): # apply RoPE to each query token xout = torch.stack( [ - xout[..., 0] * emb[..., 0] - - xout[..., 1] * emb[..., 1], - xout[..., 1] * emb[..., 0] - + xout[..., 0] * emb[..., 1], + xout[..., 0] * emb[..., 0] - xout[..., 1] * emb[..., 1], + xout[..., 1] * emb[..., 0] + xout[..., 0] * emb[..., 1], ], -1, ) @@ -311,22 +305,20 @@ def _apply_rope(self, x, emb): xout = xout.flatten(3).squeeze(2) return xout - - + def _apply_additive_embeddings(self, x, emb): """Applies additive embeddings to input queries Args: x: Input tensor of shape (batch_size, N, embed_dim) emb: Embedding array of shape (N, embed_dim) - + Returns: Tensor: Input queries with embeddings added - shape (batch_size, N, embed_dim) """ _emb = emb.unsqueeze(0) return x + _emb - - + def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: """Get the sequence positional embeddings. @@ -341,8 +333,8 @@ def forward(self, x, seq_positions: torch.Tensor) -> torch.Tensor: - An `N` x `self.features` tensor representing the corresponding spatial or temporal embedding. """ - # create embedding array; either rotation matrix of shape - # (batch_size, n_query, num_heads, embed_dim // 2, 2), + # create embedding array; either rotation matrix of shape + # (batch_size, n_query, num_heads, embed_dim // 2, 2), # or (N, embed_dim) array emb = self._emb_func(seq_positions, x.size()) # transform the input data with the embedding @@ -364,8 +356,9 @@ def _torch_int_div( """ return torch.div(tensor1, tensor2, rounding_mode="floor") - - def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) -> torch.Tensor: + def _rope_embedding( + self, seq_positions: torch.Tensor, input_shape: torch.Size + ) -> torch.Tensor: """Computes the rotation matrix to apply RoPE to input queries Args: seq_positions: Pos array of shape (embed_dim,) used to compute rotational embedding @@ -380,12 +373,11 @@ def _rope_embedding(self, seq_positions: torch.Tensor, input_shape: torch.Size) is_pos_emb = 1 if seq_positions.max() <= 1 else 0 # if it is positional, scale seq_positions since these are fractions # in [0,1] and we need int indexes for embedding lookup - seq_positions = seq_positions*100 if is_pos_emb else seq_positions + seq_positions = seq_positions * 100 if is_pos_emb else seq_positions # RoPE module takes in dimension, num_queries as input to calculate rotation matrix rot_mat = self.rope_instance(x_rope, seq_positions.unsqueeze(0).int()) return rot_mat - def _sine_pos_embedding(self, centroids: torch.Tensor, *args) -> torch.Tensor: """Compute fixed sine temporal embeddings per dimension (x,y) diff --git a/dreem/models/mlp.py b/dreem/models/mlp.py index a6c5ab3..c497ab8 100644 --- a/dreem/models/mlp.py +++ b/dreem/models/mlp.py @@ -37,7 +37,6 @@ def __init__( # list concatenations to ensure layer shape compability for n, k in zip([input_dim] + h, h + [output_dim]) ] - ) if self.dropout > 0.0: self.dropouts = torch.nn.ModuleList( diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 272d688..460d911 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -81,23 +81,28 @@ def __init__( self.temp_emb = Embedding(emb_type="off", mode="off", features=self.d_model) if self.embedding_meta: - self.embedding_agg_method = embedding_meta["embedding_agg_method"] \ - if "embedding_agg_method" in embedding_meta else "average" + self.embedding_agg_method = ( + embedding_meta["embedding_agg_method"] + if "embedding_agg_method" in embedding_meta + else "average" + ) if "pos" in self.embedding_meta: pos_emb_cfg = self.embedding_meta["pos"] if pos_emb_cfg: self.pos_emb = Embedding( - emb_type="pos", features=self.d_model, + emb_type="pos", + features=self.d_model, embedding_agg_method=self.embedding_agg_method, - **pos_emb_cfg - ) # agg method must be the same for pos and temp embeddings + **pos_emb_cfg, + ) # agg method must be the same for pos and temp embeddings if "temp" in self.embedding_meta: temp_emb_cfg = self.embedding_meta["temp"] if temp_emb_cfg: self.temp_emb = Embedding( - emb_type="temp", features=self.d_model, + emb_type="temp", + features=self.d_model, embedding_agg_method=self.embedding_agg_method, - **temp_emb_cfg + **temp_emb_cfg, ) else: self.embedding_meta = {} @@ -136,7 +141,7 @@ def __init__( feature_dim=feature_dim_attn_head, num_layers=num_layers_attn_head, dropout=dropout_attn_head, - **self.embedding_meta + **self.embedding_meta, ) self._reset_parameters() @@ -190,20 +195,25 @@ def forward( # (encoder_features, ref_pos_emb, ref_temp_emb) \ encoder_features, pos_emb_traceback, temp_emb_traceback = self.encoder( - encoder_queries, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, - boxes=ref_boxes, times=ref_times, - embedding_agg_method=self.embedding_agg_method - ) # (total_instances, batch_size, embed_dim) or - # (3*total_instances,batch_size,embed_dim) if using stacked embeddings + encoder_queries, + embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, + boxes=ref_boxes, + times=ref_times, + embedding_agg_method=self.embedding_agg_method, + ) # (total_instances, batch_size, embed_dim) or + # (3*total_instances,batch_size,embed_dim) if using stacked embeddings if self.return_embedding: for i, instance in enumerate(ref_instances): if self.embedding_agg_method == "average": - ref_pos_emb = pos_emb_traceback[0][i] # array + ref_pos_emb = pos_emb_traceback[0][i] # array else: - ref_pos_emb = {"x": pos_emb_traceback[0][0][i], "y": pos_emb_traceback[1][0][i]} # dict + ref_pos_emb = { + "x": pos_emb_traceback[0][0][i], + "y": pos_emb_traceback[1][0][i], + } # dict - instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict + instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict instance.add_embedding("temp", temp_emb_traceback) # -------------- Begin decoder --------------- # @@ -222,7 +232,7 @@ def forward( # just get boxes, we already have query_times from above query_boxes = get_boxes(query_instances) query_boxes = torch.nan_to_num(query_boxes, -1.0) - else: # for training, query_instances is None so just pass in the ref data + else: # for training, query_instances is None so just pass in the ref data n_query = total_instances query_instances = ref_instances query_features = ref_features @@ -230,11 +240,14 @@ def forward( query_times = ref_times decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( - query_features, encoder_features, + query_features, + encoder_features, embedding_map={"pos": self.pos_emb, "temp": self.temp_emb}, - enc_boxes=ref_boxes, enc_times=ref_times, - boxes=query_boxes, times=query_times, - embedding_agg_method=self.embedding_agg_method + enc_boxes=ref_boxes, + enc_times=ref_times, + boxes=query_boxes, + times=query_times, + embedding_agg_method=self.embedding_agg_method, ) # (L, n_query, batch_size, embed_dim) if self.return_embedding: @@ -242,7 +255,10 @@ def forward( if self.embedding_agg_method == "average": ref_pos_emb = pos_emb_traceback[0][i] # array else: - ref_pos_emb = {"x": pos_emb_traceback[0][0][i], "y": pos_emb_traceback[1][0][i]} # dict + ref_pos_emb = { + "x": pos_emb_traceback[0][0][i], + "y": pos_emb_traceback[1][0][i], + } # dict instance.add_embedding("pos", ref_pos_emb) # can be an array or a dict instance.add_embedding("temp", temp_emb_traceback) @@ -250,7 +266,7 @@ def forward( decoder_features = decoder_features.transpose( 1, 2 ) # # (L, batch_size, n_query, embed_dim) or ((L, batch_size, 3*n_query, embed_dim)) if using stacked embeddings - encoder_features = encoder_features.permute(1, 0, 2) + encoder_features = encoder_features.permute(1, 0, 2) # (batch_size, total_instances, embed_dim) or (batch_size, 3*total_instances, embed_dim) asso_output = [] @@ -260,7 +276,7 @@ def forward( # or number of ref instances for training. total_instances is always the number of reference instances asso_matrix = self.attn_head(frame_features, encoder_features).view( n_query, total_instances - ) # call to view() just removes the batch dimension; output of attn_head is (1,n_query,total_instances) + ) # call to view() just removes the batch dimension; output of attn_head is (1,n_query,total_instances) asso_matrix = AssociationMatrix(asso_matrix, ref_instances, query_instances) asso_output.append(asso_matrix) @@ -305,9 +321,7 @@ def __init__( self.activation = _get_activation_fn(activation) - def forward( - self, queries: torch.Tensor - ) -> torch.Tensor: + def forward(self, queries: torch.Tensor) -> torch.Tensor: """Execute a forward pass of the encoder layer. Args: @@ -451,9 +465,12 @@ def __init__( self.norm = norm if norm is not None else nn.Identity() def forward( - self, queries: torch.Tensor, embedding_map: Dict[str, Embedding], - boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str = None + self, + queries: torch.Tensor, + embedding_map: Dict[str, Embedding], + boxes: torch.Tensor, + times: torch.Tensor, + embedding_agg_method: str = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Execute a forward pass of encoder layer. Computes and applies embeddings before input to EncoderLayer @@ -511,9 +528,11 @@ def forward( decoder_queries: torch.Tensor, encoder_features: torch.Tensor, embedding_map: Dict[str, Embedding], - enc_boxes: torch.Tensor, enc_times: torch.Tensor, - boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str = None + enc_boxes: torch.Tensor, + enc_times: torch.Tensor, + boxes: torch.Tensor, + times: torch.Tensor, + embedding_agg_method: str = None, ) -> torch.Tensor: """Execute a forward pass of the decoder block. @@ -521,7 +540,7 @@ def forward( decoder_queries: Query sequence for decoder to generate (n_query, batch_size, embed_dim). encoder_features: Output from encoder, that decoder uses to attend to relevant parts of input sequence (total_instances, batch_size, embed_dim) - + Returns: The output tensor of shape (L, n_query, batch_size, embed_dim). @@ -529,11 +548,16 @@ def forward( decoder_features = decoder_queries intermediate = [] - # since the encoder output doesn't change for any number of decoder layer inputs, + # since the encoder output doesn't change for any number of decoder layer inputs, # we can process its embedding outside the loop if embedding_agg_method == "average": - encoder_features, *_ = apply_embeddings(encoder_features, embedding_map, - enc_boxes, enc_times, embedding_agg_method) + encoder_features, *_ = apply_embeddings( + encoder_features, + embedding_map, + enc_boxes, + enc_times, + embedding_agg_method, + ) # TODO: ^ should embeddings really be applied to encoder output again before cross attention? # switched off for stack and concatenate methods as those further split the tokens. Kept for "average" # for backward compatibility @@ -542,9 +566,7 @@ def forward( decoder_features, pos_emb_traceback, temp_emb_traceback = apply_embeddings( decoder_features, embedding_map, boxes, times, embedding_agg_method ) - decoder_features = layer( - decoder_features, encoder_features - ) + decoder_features = layer(decoder_features, encoder_features) if self.return_intermediate: intermediate.append(self.norm(decoder_features)) @@ -557,10 +579,14 @@ def forward( return decoder_features.unsqueeze(0), pos_emb_traceback, temp_emb_traceback -def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], - boxes: torch.Tensor, times: torch.Tensor, - embedding_agg_method: str): - """ Applies embeddings to input queries for various aggregation methods. This function +def apply_embeddings( + queries: torch.Tensor, + embedding_map: Dict[str, Embedding], + boxes: torch.Tensor, + times: torch.Tensor, + embedding_agg_method: str, +): + """Applies embeddings to input queries for various aggregation methods. This function is called from the transformer encoder and decoder Args: @@ -574,7 +600,9 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], pos_emb, temp_emb = embedding_map["pos"], embedding_map["temp"] # queries is of shape (n_query, batch_size, embed_dim); transpose for embeddings - queries = queries.permute(1,0,2) # queries is shape (batch_size, n_query, embed_dim) + queries = queries.permute( + 1, 0, 2 + ) # queries is shape (batch_size, n_query, embed_dim) # calculate temporal embeddings and transform queries queries_t, ref_temp_emb = temp_emb(queries, times) @@ -595,14 +623,13 @@ def apply_embeddings(queries: torch.Tensor, embedding_map: Dict[str, Embedding], # forward pass of Embedding object transforms input queries with embeddings queries_x, ref_pos_emb_x = pos_emb(queries, ref_x) queries_y, ref_pos_emb_y = pos_emb(queries, ref_y) - queries_avg = None # pass dummy var in to collate_queries + queries_avg = None # pass dummy var in to collate_queries pos_emb_traceback = (ref_pos_emb_x, ref_pos_emb_y) - # concatenate or stack the queries (avg. method done above since it applies differently) queries = collate_queries( - (queries_avg, queries_t, queries_x, queries_y, queries), - embedding_agg_method) + (queries_avg, queries_t, queries_x, queries_y, queries), embedding_agg_method + ) # transpose for input to EncoderLayer to (n_queries, batch_size, embed_dim) queries = queries.permute(1, 0, 2) @@ -640,40 +667,46 @@ def _get_activation_fn(activation: str) -> callable: raise RuntimeError(f"activation should be relu/gelu/glu, not {activation}.") -def collate_queries(queries: Tuple[torch.Tensor], embedding_agg_method: str - ) -> torch.Tensor: - """Aggregates queries transformed by embeddings +def collate_queries( + queries: Tuple[torch.Tensor], embedding_agg_method: str +) -> torch.Tensor: + """Aggregates queries transformed by embeddings - Args: - _queries: 5-tuple of queries (already transformed by embeddings) for _, x, y, t, original input - each of shape (batch_size, n_query, embed_dim) - embedding_agg_method: String representing the aggregation method for embeddings + Args: + _queries: 5-tuple of queries (already transformed by embeddings) for _, x, y, t, original input + each of shape (batch_size, n_query, embed_dim) + embedding_agg_method: String representing the aggregation method for embeddings - Returns: - Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), - stacked (increased number of tokens), or averaged (original token number and length) - """ + Returns: + Tensor of aggregated queries of shape; can be concatenated (increased length of tokens), + stacked (increased number of tokens), or averaged (original token number and length) + """ + + queries_avg, queries_t, queries_x, queries_y, orig_queries = queries + + if embedding_agg_method == "average": + collated_queries = queries_avg + elif embedding_agg_method == "stack": + # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) + # stacked is of shape (batch_size, 3*n_query, embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) + elif embedding_agg_method == "concatenate": + mlp = MLP( + input_dim=queries_t.shape[-1] * 3, + hidden_dim=queries_t.shape[-1] * 2, + output_dim=queries_t.shape[-1], + num_layers=1, + dropout=0.0, + ) + # concatenated is of shape (batch_size, n_query, 3*embed_dim) + collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) + # pass through MLP to project into space of (batch_size, n_query, embed_dim) + collated_queries = mlp(collated_queries) + else: + collated_queries = orig_queries - queries_avg, queries_t, queries_x, queries_y, orig_queries = queries + return collated_queries - if embedding_agg_method == "average": - collated_queries = queries_avg - elif embedding_agg_method == "stack": - # (t1,t2,t3...),(x1,x2,x3...),(y1,y2,y3...) - # stacked is of shape (batch_size, 3*n_query, embed_dim) - collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=1) - elif embedding_agg_method == "concatenate": - mlp = MLP(input_dim=queries_t.shape[-1] * 3, hidden_dim=queries_t.shape[-1] * 2, - output_dim=queries_t.shape[-1], num_layers=1, dropout=0.) - # concatenated is of shape (batch_size, n_query, 3*embed_dim) - collated_queries = torch.cat((queries_t, queries_x, queries_y), dim=2) - # pass through MLP to project into space of (batch_size, n_query, embed_dim) - collated_queries = mlp(collated_queries) - else: - collated_queries = orig_queries - - return collated_queries - def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ @@ -682,9 +715,12 @@ def spatial_emb_from_bb(bb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: Args: bb: Bounding boxes of shape (n_query, n_anchors, 4) from which to compute x,y centroids; each bounding box is [ymin, xmin, ymax, xmax] - - Returns: + + Returns: A tuple of tensors containing the emebdding array for x,y dimensions, each of shape (n_query,) """ # compute avg of xmin,xmax and ymin,ymax - return bb[:,:,[1,3]].mean(axis=2).squeeze(), bb[:,:,[0,2]].mean(axis=2).squeeze() \ No newline at end of file + return ( + bb[:, :, [1, 3]].mean(axis=2).squeeze(), + bb[:, :, [0, 2]].mean(axis=2).squeeze(), + ) diff --git a/tests/test_models.py b/tests/test_models.py index 76ef074..bdf17f0 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -15,7 +15,7 @@ TransformerEncoderLayer, TransformerDecoderLayer, spatial_emb_from_bb, - apply_embeddings + apply_embeddings, ) @@ -35,7 +35,9 @@ def test_att_weight_head(): """Test self-attention head logic.""" b, n, f = 1, 10, 1024 # batch size, num instances, features - att_weight_head = ATTWeightHead(feature_dim=f, num_layers=2, dropout=0.1, embedding_agg_method="average") + att_weight_head = ATTWeightHead( + feature_dim=f, num_layers=2, dropout=0.1, embedding_agg_method="average" + ) q = k = torch.rand(size=(b, n, f)) @@ -165,19 +167,39 @@ def test_embedding_validity(): with pytest.raises(Exception): # embedding_agg_method cannot be average for rope - _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="average") - _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="stacked") + _ = Embedding( + emb_type="pos", mode="rope", features=128, embedding_agg_method="average" + ) + _ = Embedding( + emb_type="pos", mode="rope", features=128, embedding_agg_method="stacked" + ) - _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="stack") - _ = Embedding(emb_type="pos", mode="rope", features=128, embedding_agg_method="concatenate") + _ = Embedding( + emb_type="pos", mode="rope", features=128, embedding_agg_method="stack" + ) + _ = Embedding( + emb_type="pos", mode="rope", features=128, embedding_agg_method="concatenate" + ) - _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="average") - _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="stack") - _ = Embedding(emb_type="pos", mode="fixed", features=128, embedding_agg_method="concatenate") + _ = Embedding( + emb_type="pos", mode="fixed", features=128, embedding_agg_method="average" + ) + _ = Embedding( + emb_type="pos", mode="fixed", features=128, embedding_agg_method="stack" + ) + _ = Embedding( + emb_type="pos", mode="fixed", features=128, embedding_agg_method="concatenate" + ) - _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="average") - _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="stack") - _ = Embedding(emb_type="pos", mode="learned", features=128, embedding_agg_method="concatenate") + _ = Embedding( + emb_type="pos", mode="learned", features=128, embedding_agg_method="average" + ) + _ = Embedding( + emb_type="pos", mode="learned", features=128, embedding_agg_method="stack" + ) + _ = Embedding( + emb_type="pos", mode="learned", features=128, embedding_agg_method="concatenate" + ) _ = Embedding(emb_type="temp", mode="learned", features=128) _ = Embedding(emb_type="pos", mode="learned", features=128) @@ -198,16 +220,10 @@ def test_rope_embedding(): x = torch.rand(size=(1, N, d_model)) pos_emb = Embedding( - emb_type="pos", - mode="rope", - features=d_model, - embedding_agg_method="stack" + emb_type="pos", mode="rope", features=d_model, embedding_agg_method="stack" ) temp_emb = Embedding( - emb_type="temp", - mode="rope", - features=d_model, - embedding_agg_method="stack" + emb_type="temp", mode="rope", features=d_model, embedding_agg_method="stack" ) ref_x, ref_y = spatial_emb_from_bb(boxes) @@ -452,10 +468,7 @@ def test_transformer_decoder(): # with position pos_emb = query_pos_emb = torch.ones_like(encoder_features) - decoder_features = transformer_decoder( - decoder_queries, - encoder_features - ) + decoder_features = transformer_decoder(decoder_queries, encoder_features) assert decoder_features.size() == decoder_queries.size() @@ -467,8 +480,12 @@ def test_transformer_basic(): num_detected = 10 img_shape = (1, 100, 100) embedding_meta = {"embedding_agg_method": "stack"} - transformer = Transformer(d_model=feats, num_encoder_layers=1, num_decoder_layers=1, - embedding_meta=embedding_meta) + transformer = Transformer( + d_model=feats, + num_encoder_layers=1, + num_decoder_layers=1, + embedding_meta=embedding_meta, + ) frames = [] @@ -514,7 +531,7 @@ def test_transformer_embedding(): embedding_meta = { "pos": {"mode": "learned", "emb_num": 16, "normalize": True}, "temp": {"mode": "learned", "emb_num": 16, "normalize": True}, - "embedding_agg_method": "average" + "embedding_agg_method": "average", } transformer = Transformer( diff --git a/tests/test_training.py b/tests/test_training.py index 8c5206e..5729510 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -138,7 +138,7 @@ def test_config_gtr_runner(tmp_path, base_config, params_config, two_flies): "dataset.clip_length": 8, "trainer.min_epochs": 1, "checkpointing.dirpath": model_dir, - "logging.save_dir": logs_dir + "logging.save_dir": logs_dir, } cfg.set_hparams(hparams) From de2ace9aad8c02cb7f7b5b1e411e5a49f761440f Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Mon, 26 Aug 2024 14:54:00 -0700 Subject: [PATCH 60/63] add cross attn for rope-stack before final asso matrix output --- dreem/models/attention_head.py | 36 +++++++++++++++++++++++++--------- dreem/models/transformer.py | 2 -- scripts/run_eval.py | 12 ++++++++++++ scripts/run_trainer.py | 3 +-- 4 files changed, 40 insertions(+), 13 deletions(-) create mode 100644 scripts/run_eval.py diff --git a/dreem/models/attention_head.py b/dreem/models/attention_head.py index 35e7b59..701dc6f 100644 --- a/dreem/models/attention_head.py +++ b/dreem/models/attention_head.py @@ -33,6 +33,9 @@ def __init__(self, feature_dim: int, num_layers: int, dropout: float, **kwargs): self.k_proj = torch.nn.Conv1d( in_channels=3, out_channels=1, kernel_size=1, stride=1, padding=0 ) + self.attn_x = torch.nn.MultiheadAttention(feature_dim, 1) + self.attn_y = torch.nn.MultiheadAttention(feature_dim, 1) + self.attn_t = torch.nn.MultiheadAttention(feature_dim, 1) else: self.q_proj = MLP( feature_dim, feature_dim, feature_dim, num_layers, dropout @@ -59,26 +62,41 @@ def forward( num_window_instances = key.shape[1] # if stacked embeddings, create channels for each x,y,t embedding dimension - # maps shape (1,192,1024) -> (1,64,3,1024) + # maps shape (1,num_instances*3,feature_dim) -> (num_instances,3,feature_dim) if self.embedding_agg_method == "stack": - key = ( - key.view(batch_size, 3, num_window_instances // 3, feature_dim) + key_stacked = ( + key + .view(batch_size, 3, num_window_instances // 3, feature_dim) .permute(0, 2, 1, 3) - .squeeze(0) + .squeeze(0) # keep as (num_instances*3, feature_dim) ) + key_orig = key.squeeze(0) # keep as (num_instances*3, feature_dim) + query = ( query.view(batch_size, 3, num_query_instances // 3, feature_dim) .permute(0, 2, 1, 3) .squeeze(0) ) - # key, query of shape (batch_size, num_instances, 3, feature_dim) - k = self.k_proj(key).transpose(1, 0) - q = self.q_proj(query).transpose(1, 0) - # k,q of shape (batch_size, num_instances, feature_dim) + # pass t,x,y frame features through cross attention with entire encoder 3*num_window_instances tokens before MLP; + # note order is t,x,y + out_t, _ = self.attn_t(query=query[:,0,:], key=key_orig, value=key_orig) + out_x, _ = self.attn_x(query=query[:,1,:], key=key_orig, value=key_orig) + out_y, _ = self.attn_y(query=query[:,2,:], key=key_orig, value=key_orig) + # combine each attention output to (num_instances, 3, feature_dim) + collated = torch.stack((out_t, out_x, out_y), dim=0).permute(1,0,2) + # mlp_out has shape (1, num_window_instances, feature_dim) + mlp_out = self.q_proj(collated).transpose(1,0) + + # key, query of shape (num_instances, 3, feature_dim) + # TODO: uncomment this if not using modified attention heads for t,x,y + k = self.k_proj(key_stacked).transpose(1, 0) + # q = self.q_proj(query).transpose(1, 0) + # k,q of shape (num_instances, feature_dim) + attn_weights = torch.bmm(mlp_out, k.transpose(1, 2)) else: k = self.k_proj(key) q = self.q_proj(query) + attn_weights = torch.bmm(q, k.transpose(1, 2)) - attn_weights = torch.bmm(q, k.transpose(1, 2)) return attn_weights # (B, N_t, N) diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index 460d911..4cc2041 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -186,11 +186,9 @@ def forward( ref_times, query_times = get_times(ref_instances, query_instances) batch_size, total_instances, embed_dim = ref_features.shape - ref_features = ref_features.permute( 1, 0, 2 ) # (total_instances, batch_size, embed_dim) - encoder_queries = ref_features # (encoder_features, ref_pos_emb, ref_temp_emb) \ diff --git a/scripts/run_eval.py b/scripts/run_eval.py new file mode 100644 index 0000000..a433852 --- /dev/null +++ b/scripts/run_eval.py @@ -0,0 +1,12 @@ +from dreem.training import train +from omegaconf import OmegaConf + +# /Users/mustafashaikh/dreem/dreem/training +# /Users/main/Documents/GitHub/dreem/dreem/training + + +inference_config = "tests/configs/inference.yaml" + +cfg = OmegaConf.load(inference_config) + +eval.run(cfg) \ No newline at end of file diff --git a/scripts/run_trainer.py b/scripts/run_trainer.py index 5046222..397fca9 100644 --- a/scripts/run_trainer.py +++ b/scripts/run_trainer.py @@ -4,8 +4,7 @@ # /Users/mustafashaikh/dreem/dreem/training # /Users/main/Documents/GitHub/dreem/dreem/training -os.chdir("./dreem/training") - +os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") base_config = "./configs/base.yaml" # params_config = "./configs/override.yaml" From 9b2917152bf2b913d925027d212c3888a45ae11d Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Tue, 27 Aug 2024 16:14:41 -0700 Subject: [PATCH 61/63] minor bug fix in rope embedding for single instance clips --- dreem/models/embedding.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dreem/models/embedding.py b/dreem/models/embedding.py index 57ace35..dfcf9ec 100644 --- a/dreem/models/embedding.py +++ b/dreem/models/embedding.py @@ -101,6 +101,7 @@ def forward(self, x: Tensor, input_pos: Optional[Tensor] = None) -> Tensor: # 100 since it's a fraction of [0,1]*100. temp is from [0, clip_len]; since clip_len # not available, we use the last value in the indexing array since this will be the # last possible frame that we would need to index since no instances in a frame after that + if input_pos.dim() <= 1: input_pos = input_pos.unsqueeze(0) self.build_rope_cache(max(101, input_pos[:, -1].max() + 1)) # registers cache self.cache = self.cache.to(input_pos.device) # extract the values based on whether input_pos is set or not @@ -370,12 +371,13 @@ def _rope_embedding( # use num_heads=1 for compatibility with torch ROPE x_rope = torch.rand(input_shape).unsqueeze(2) # infer whether it is a positional or temporal embedding - is_pos_emb = 1 if seq_positions.max() <= 1 else 0 + is_pos_emb = 1 if seq_positions.max() < 1 else 0 # if it is positional, scale seq_positions since these are fractions # in [0,1] and we need int indexes for embedding lookup seq_positions = seq_positions * 100 if is_pos_emb else seq_positions + seq_positions = seq_positions.unsqueeze(0).int() # RoPE module takes in dimension, num_queries as input to calculate rotation matrix - rot_mat = self.rope_instance(x_rope, seq_positions.unsqueeze(0).int()) + rot_mat = self.rope_instance(x_rope, seq_positions) return rot_mat From 1998f6f2d08b1dcee6d3be4dc59651af54558e17 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Wed, 9 Oct 2024 19:25:36 +0000 Subject: [PATCH 62/63] - Started implementation for post processing fixes; no logic changes - added tracker debugging script --- dreem/inference/post_processing.py | 12 +++++++++--- dreem/inference/tracker.py | 6 ++++-- scripts/run_tracker.py | 12 ++++++++++++ scripts/run_trainer.py | 10 ++++++---- tests/test_inference.py | 4 ++++ 5 files changed, 35 insertions(+), 9 deletions(-) create mode 100644 scripts/run_tracker.py diff --git a/dreem/inference/post_processing.py b/dreem/inference/post_processing.py index 09fd8ff..a64739c 100644 --- a/dreem/inference/post_processing.py +++ b/dreem/inference/post_processing.py @@ -126,6 +126,8 @@ def filter_max_center_dist( k_boxes: torch.Tensor | None = None, nonk_boxes: torch.Tensor | None = None, id_inds: torch.Tensor | None = None, + h: int = None, + w: int = None ) -> torch.Tensor: """Filter trajectory score by distances between objects across frames. @@ -135,6 +137,8 @@ def filter_max_center_dist( k_boxes: The bounding boxes in the current frame nonk_boxes: the boxes not in the current frame id_inds: track ids + h: height of image + w: width of image Returns: An N_t x N association matrix @@ -147,13 +151,15 @@ def filter_max_center_dist( k_s = ((k_boxes[:, :, 2:] - k_boxes[:, :, :2]) ** 2).sum(dim=2) # n_k nonk_ct = (nonk_boxes[:, :, :2] + nonk_boxes[:, :, 2:]) / 2 - + # TODO: nonk_boxes should be only from previous frame rather than entire window dist = ((k_ct[:, None, :, :] - nonk_ct[None, :, :, :]) ** 2).sum( dim=-1 ) # n_k x Np - - norm_dist = dist / (k_s[:, None, :] + 1e-8) + # TODO: note that dist is in units of fraction of the height and width of the image; + # TODO: need to scale it by the original image size so that its in units of pixels + # norm_dist = dist / (k_s[:, None, :] + 1e-8) norm_dist = dist.mean(axis=-1) # n_k x Np + # norm_dist = valid = norm_dist < max_center_dist # n_k x Np valid_assn = ( diff --git a/dreem/inference/tracker.py b/dreem/inference/tracker.py index 58480f4..279a24d 100644 --- a/dreem/inference/tracker.py +++ b/dreem/inference/tracker.py @@ -199,7 +199,7 @@ def sliding_inference( # if no track ids, then assign new ones for i, instance in enumerate(frames[batch_idx].instances): if instance.pred_track_id == -1: - curr_track += 1 + curr_track_id += 1 instance.pred_track_id = curr_track_id else: @@ -351,6 +351,8 @@ def _run_global_tracker( query_frame.add_traj_score("asso_nonquery", asso_nonquery_df) + # need frame height and width to scale boxes during post-processing + _, h, w = query_frame.img_shape.flatten() pred_boxes = model_utils.get_boxes(all_instances) query_boxes = pred_boxes[query_inds] # n_k x 4 nonquery_boxes = pred_boxes[nonquery_inds] # n_nonquery x 4 @@ -435,7 +437,7 @@ def _run_global_tracker( # threshold for continuing a tracking or starting a new track -> they use 1.0 # todo -> should also work without pos_embed traj_score = post_processing.filter_max_center_dist( - traj_score, self.max_center_dist, query_boxes, nonquery_boxes, id_inds + traj_score, self.max_center_dist, query_boxes, nonquery_boxes, id_inds, h, w ) if self.max_center_dist is not None and self.max_center_dist > 0: diff --git a/scripts/run_tracker.py b/scripts/run_tracker.py new file mode 100644 index 0000000..f4aefe9 --- /dev/null +++ b/scripts/run_tracker.py @@ -0,0 +1,12 @@ +from dreem.inference import track +from omegaconf import OmegaConf +import os + +# /Users/mustafashaikh/dreem/dreem/training +# /Users/main/Documents/GitHub/dreem/dreem/training +# os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") +config = "/root/vast/mustafa/dreem-experiments/run/lysosome-baselines/debug/configs/inference.yaml" + +cfg = OmegaConf.load(config) + +track.run(cfg) \ No newline at end of file diff --git a/scripts/run_trainer.py b/scripts/run_trainer.py index 397fca9..f6829e3 100644 --- a/scripts/run_trainer.py +++ b/scripts/run_trainer.py @@ -4,11 +4,13 @@ # /Users/mustafashaikh/dreem/dreem/training # /Users/main/Documents/GitHub/dreem/dreem/training -os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") -base_config = "./configs/base.yaml" -# params_config = "./configs/override.yaml" +# os.chdir("/Users/main/Documents/GitHub/dreem/dreem/training") +base_config = "/root/vast/mustafa/dreem-experiments/run/lysosome-baselines/debug/configs/base-updated.yaml" +params_config = "/root/vast/mustafa/dreem-experiments/run/lysosome-baselines/debug/configs/override-updated.yaml" cfg = OmegaConf.load(base_config) -# cfg["params_config"] = params_config +# Load and merge override config +override_cfg = OmegaConf.load(params_config) +cfg = OmegaConf.merge(cfg, override_cfg) train.run(cfg) \ No newline at end of file diff --git a/tests/test_inference.py b/tests/test_inference.py index 2b55484..7f580dd 100644 --- a/tests/test_inference.py +++ b/tests/test_inference.py @@ -215,6 +215,8 @@ def test_post_processing(): # set_default_device k_boxes=k_boxes, nonk_boxes=nonk_boxes, id_inds=id_inds, + h=im_size, + w=im_size ) ).all() @@ -226,6 +228,8 @@ def test_post_processing(): # set_default_device k_boxes=k_boxes, nonk_boxes=nonk_boxes, id_inds=id_inds, + h=im_size, + w=im_size ) ).all() From e4ce29c5d2e2302f218ce61896d5821a8890e0a6 Mon Sep 17 00:00:00 2001 From: shaikh58 Date: Fri, 13 Dec 2024 13:45:07 +0000 Subject: [PATCH 63/63] merge bug fixes --- dreem/datasets/sleap_dataset.py | 18 +++++++++--------- dreem/inference/eval.py | 29 +++++++++++------------------ dreem/inference/track.py | 31 ++++++++++++------------------- dreem/models/transformer.py | 1 - 4 files changed, 32 insertions(+), 47 deletions(-) diff --git a/dreem/datasets/sleap_dataset.py b/dreem/datasets/sleap_dataset.py index 7bfc6e3..a0ded6e 100644 --- a/dreem/datasets/sleap_dataset.py +++ b/dreem/datasets/sleap_dataset.py @@ -137,7 +137,7 @@ def __init__( # if self.seed is not None: # np.random.seed(self.seed) self.labels = [sio.load_slp(slp_file) for slp_file in self.slp_files] - self.vid_readers = {} + self.videos = [imageio.get_reader(vid_file) for vid_file in self.vid_files] # do we need this? would need to update with sleap-io # for label in self.labels: @@ -204,12 +204,12 @@ def get_instances(self, label_idx: list[int], frame_idx: list[int]) -> list[Fram lf = video[frame_ind] try: - img = lf.image - except FileNotFoundError as e: - if video_name not in self.vid_readers: - self.vid_readers[video_name] = sio.load_video(video_name) - vid_reader = self.vid_readers[video_name] - img = vid_reader[lf.frame_idx] + img = vid_reader.get_data(int(lf.frame_idx)) + except IndexError as e: + logger.warning( + f"Could not read frame {frame_ind} from {video_name} due to {e}" + ) + continue if len(img.shape) == 2: img = img.expand_dims(-1) @@ -414,5 +414,5 @@ def get_instances(self, label_idx: list[int], frame_idx: list[int]) -> list[Fram def __del__(self): """Handle file closing before garbage collection.""" - for reader in self.vid_readers: - reader.close() + for reader in self.videos: + reader.close() \ No newline at end of file diff --git a/dreem/inference/eval.py b/dreem/inference/eval.py index 44c1956..000a9b8 100644 --- a/dreem/inference/eval.py +++ b/dreem/inference/eval.py @@ -26,33 +26,26 @@ def run(cfg: DictConfig) -> dict[int, sio.Labels]: """ eval_cfg = Config(cfg) - # update with parameters for batch train job - if "batch_config" in cfg.keys(): + if "checkpoints" in cfg.keys(): try: index = int(os.environ["POD_INDEX"]) - except KeyError as e: - index = int( - input(f"{e}. Assuming single run!\nPlease input task index to run:") - ) - - hparams_df = pd.read_csv(cfg.batch_config) - hparams = hparams_df.iloc[index].to_dict() - _ = hparams.pop("Unnamed: 0", None) - - if eval_cfg.set_hparams(hparams): - logger.info("Updated the following hparams to the following values") - logger.info(hparams) + # For testing without deploying a job on runai + except KeyError: + index = input("Pod Index Not found! Please choose a pod index: ") + + logger.info(f"Pod Index: {index}") + + checkpoints = pd.read_csv(cfg.checkpoints) + checkpoint = checkpoints.iloc[index] else: - hparams = {} + checkpoint = eval_cfg.cfg.ckpt_path logging.getLogger().setLevel(level=cfg.get("log_level", "INFO").upper()) model = GTRRunner.load_from_checkpoint(checkpoint, strict=False) model.tracker_cfg = eval_cfg.cfg.tracker model.tracker = Tracker(**model.tracker_cfg) - logger.info(f"Using the following tracker:") - print(model.tracker) model.metrics["test"] = eval_cfg.cfg.runner.metrics.test model.persistent_tracking["test"] = eval_cfg.cfg.tracker.get( @@ -84,4 +77,4 @@ def run(cfg: DictConfig) -> dict[int, sio.Labels]: # override with params config, and specific params: # python eval.py --config-dir=./configs --config-name=inference +params_config=configs/params.yaml dataset.train_dataset.padding=10 - run() + run() \ No newline at end of file diff --git a/dreem/inference/track.py b/dreem/inference/track.py index 9115d30..91c289d 100644 --- a/dreem/inference/track.py +++ b/dreem/inference/track.py @@ -108,33 +108,27 @@ def run(cfg: DictConfig) -> dict[int, sio.Labels]: """ pred_cfg = Config(cfg) - # update with parameters for batch train job - if "batch_config" in cfg.keys(): + if "checkpoints" in cfg.keys(): try: index = int(os.environ["POD_INDEX"]) - except KeyError as e: - index = int( - input(f"{e}. Assuming single run!\nPlease input task index to run:") - ) - - hparams_df = pd.read_csv(cfg.batch_config) - hparams = hparams_df.iloc[index].to_dict() - _ = hparams.pop("Unnamed: 0", None) - - if pred_cfg.set_hparams(hparams): - logger.info("Updated the following hparams to the following values") - logger.info(hparams) + # For testing without deploying a job on runai + except KeyError: + index = input("Pod Index Not found! Please choose a pod index: ") + + logger.info(f"Pod Index: {index}") + + checkpoints = pd.read_csv(cfg.checkpoints) + checkpoint = checkpoints.iloc[index] else: - hparams = {} + checkpoint = pred_cfg.cfg.ckpt_path logging.getLogger().setLevel(level=cfg.get("log_level", "INFO").upper()) model = GTRRunner.load_from_checkpoint(checkpoint, strict=False) tracker_cfg = pred_cfg.get_tracker_cfg() - + logger.info("Updating tracker hparams") model.tracker_cfg = tracker_cfg model.tracker = Tracker(**model.tracker_cfg) - logger.info(f"Using the following tracker:") logger.info(model.tracker) @@ -144,7 +138,6 @@ def run(cfg: DictConfig) -> dict[int, sio.Labels]: os.makedirs(outdir, exist_ok=True) for label_file, vid_file in zip(labels_files, vid_files): - logger.info(f"Tracking {label_file} - {vid_file}...") dataset = pred_cfg.get_dataset( label_files=[label_file], vid_files=[vid_file], mode="test" ) @@ -170,4 +163,4 @@ def run(cfg: DictConfig) -> dict[int, sio.Labels]: # override with params config, and specific params: # python train.py --config-dir=./configs --config-name=inference +params_config=configs/params.yaml dataset.train_dataset.padding=10 - run() + run() \ No newline at end of file diff --git a/dreem/models/transformer.py b/dreem/models/transformer.py index e82d92e..ce91b35 100644 --- a/dreem/models/transformer.py +++ b/dreem/models/transformer.py @@ -286,7 +286,6 @@ def forward( self.fourier_norm, ) - decoder_features = self.decoder( decoder_features, pos_emb_traceback, temp_emb_traceback = self.decoder( query_features, encoder_features,