From 5e379b765d6d7e9c4a41cfa0426b2386d813b47b Mon Sep 17 00:00:00 2001 From: YuanJianhao508 <820010508@qq.com> Date: Wed, 29 May 2024 14:54:50 +0100 Subject: [PATCH] first update --- README.md | 55 +- llava/__init__.py | 1 + llava/constants.py | 18 + llava/conversation.py | 398 + llava/eval/eval_gpt_mmvet.py | 276 + llava/eval/eval_gpt_review.py | 113 + llava/eval/eval_gpt_review_bench.py | 121 + llava/eval/eval_gpt_review_visual.py | 118 + llava/eval/eval_gqa.py | 499 + llava/eval/eval_pope.py | 81 + llava/eval/eval_science_qa.py | 114 + llava/eval/eval_science_qa_gpt4.py | 104 + llava/eval/eval_science_qa_gpt4_requery.py | 149 + llava/eval/eval_textvqa.py | 65 + .../eval/generate_webpage_data_from_table.py | 111 + llava/eval/m4c_evaluator.py | 334 + llava/eval/model_qa.py | 85 + llava/eval/model_vqa.py | 112 + llava/eval/model_vqa_loader.py | 148 + llava/eval/model_vqa_mmbench.py | 175 + llava/eval/model_vqa_science.py | 150 + llava/eval/qa_baseline_gpt35.py | 74 + llava/eval/run_llava.py | 97 + llava/eval/summarize_gpt_review.py | 60 + .../table/caps_boxes_coco2014_val_80.jsonl | 80 + llava/eval/table/model.jsonl | 5 + llava/eval/table/prompt.jsonl | 4 + llava/eval/table/question.jsonl | 80 + llava/eval/table/reviewer.jsonl | 4 + llava/eval/table/rule.json | 11 + .../video/eval_benchmark_1_correctness.py | 191 + .../eval_benchmark_2_detailed_orientation.py | 191 + llava/eval/video/eval_benchmark_3_context.py | 191 + llava/eval/video/eval_benchmark_4_temporal.py | 190 + .../video/eval_benchmark_5_consistency.py | 198 + llava/eval/video/eval_video_qa.py | 206 + .../run_inference_benchmark_consistency.py | 96 + .../video/run_inference_benchmark_general.py | 87 + llava/eval/video/run_inference_video_qa.py | 171 + .../eval/video/run_inference_video_qa_act.py | 171 + llava/eval/webpage/index.html | 162 + llava/eval/webpage/script.js | 245 + llava/eval/webpage/styles.css | 105 + llava/mm_utils.py | 123 + llava/model/__init__.py | 2 + llava/model/apply_delta.py | 48 + llava/model/builder.py | 173 + llava/model/consolidate.py | 29 + llava/model/language_model/llava_llama.py | 145 + llava/model/language_model/llava_llama_v1.py | 155 + llava/model/language_model/llava_mpt.py | 113 + llava/model/llava_arch.py | 346 + llava/model/make_delta.py | 52 + llava/model/multimodal_encoder/builder.py | 69 + .../model/multimodal_encoder/clip_encoder.py | 78 + llava/model/multimodal_encoder/mae_encoder.py | 80 + llava/model/multimodal_projector/builder.py | 257 + llava/model/utils.py | 20 + llava/serve/__init__.py | 0 llava/serve/cli.py | 145 + llava/serve/controller.py | 298 + llava/serve/eval_custom.py | 159 + llava/serve/eval_custom_chunck.py | 184 + llava/serve/eval_custom_predsig.py | 166 + llava/serve/examples/desert.jpg | Bin 0 -> 262144 bytes llava/serve/examples/extreme_ironing.jpg | Bin 0 -> 62587 bytes llava/serve/examples/sample_demo_1.mp4 | Bin 0 -> 262144 bytes llava/serve/examples/sample_demo_13.mp4 | Bin 0 -> 262144 bytes llava/serve/examples/sample_demo_22.mp4 | Bin 0 -> 262144 bytes llava/serve/examples/sample_demo_3.mp4 | Bin 0 -> 262144 bytes llava/serve/examples/sample_demo_8.mp4 | Bin 0 -> 262144 bytes llava/serve/examples/sample_demo_9.mp4 | Bin 0 -> 262144 bytes llava/serve/examples/sample_img_13.png | Bin 0 -> 48079 bytes llava/serve/examples/sample_img_22.png | Bin 0 -> 60836 bytes llava/serve/examples/sample_img_8.png | Bin 0 -> 262144 bytes llava/serve/examples/waterview.jpg | Bin 0 -> 95499 bytes llava/serve/gradio_utils.py | 141 + llava/serve/gradio_web_server.py | 253 + llava/serve/model_worker.py | 285 + llava/serve/register_worker.py | 26 + llava/serve/test_message.py | 62 + llava/serve/video_caption.py | 148 + llava/train/llama_flash_attn_monkey_patch.py | 115 + .../train/llama_xformers_attn_monkey_patch.py | 129 + llava/train/llava_trainer.py | 176 + llava/train/train.py | 1095 + llava/train/train_mem.py | 18 + llava/train/train_xformers.py | 13 + llava/utils.py | 126 + pyproject.toml | 36 + retrieval/BDDX_RAG_hybird_vpmatch.json | 22796 +++++++++++++++ retrieval/BDDX_RAG_neg_3.json | 17697 ++++++++++++ retrieval/BDDX_RAG_pos_3.json | 19893 ++++++++++++++ retrieval/BDDX_RAG_tuned_vpmatch_t13.json | 22928 ++++++++++++++++ retrieval/BDDX_RAG_visual_vpmatch.json | 22796 +++++++++++++++ retrieval/bddx_vpath_info_match.json | 1878 ++ retrieval/check_sim.py | 49 + retrieval/embeddings_project.npz | Bin 0 -> 1048576 bytes retrieval/final_match.py | 238 + retrieval/matching.py | 327 + retrieval/project.py | 105 + retrieval/projector/best_model.pth | Bin 0 -> 786432 bytes retrieval/rag.py | 85 + retrieval/train.py | 195 + retrieval/train_projector.sh | 5 + retrieval/vpath_info_match.py | 55 + scripts/finetune.sh | 46 + scripts/pretrain.sh | 43 + scripts/zero2.json | 23 + scripts/zero3.json | 28 + scripts/zero3_offload.json | 56 + video_process/create_bddx_json.py | 170 + 112 files changed, 120523 insertions(+), 1 deletion(-) create mode 100644 llava/__init__.py create mode 100644 llava/constants.py create mode 100644 llava/conversation.py create mode 100644 llava/eval/eval_gpt_mmvet.py create mode 100644 llava/eval/eval_gpt_review.py create mode 100644 llava/eval/eval_gpt_review_bench.py create mode 100644 llava/eval/eval_gpt_review_visual.py create mode 100644 llava/eval/eval_gqa.py create mode 100644 llava/eval/eval_pope.py create mode 100644 llava/eval/eval_science_qa.py create mode 100644 llava/eval/eval_science_qa_gpt4.py create mode 100644 llava/eval/eval_science_qa_gpt4_requery.py create mode 100644 llava/eval/eval_textvqa.py create mode 100644 llava/eval/generate_webpage_data_from_table.py create mode 100644 llava/eval/m4c_evaluator.py create mode 100644 llava/eval/model_qa.py create mode 100644 llava/eval/model_vqa.py create mode 100644 llava/eval/model_vqa_loader.py create mode 100644 llava/eval/model_vqa_mmbench.py create mode 100644 llava/eval/model_vqa_science.py create mode 100644 llava/eval/qa_baseline_gpt35.py create mode 100644 llava/eval/run_llava.py create mode 100644 llava/eval/summarize_gpt_review.py create mode 100644 llava/eval/table/caps_boxes_coco2014_val_80.jsonl create mode 100644 llava/eval/table/model.jsonl create mode 100644 llava/eval/table/prompt.jsonl create mode 100644 llava/eval/table/question.jsonl create mode 100644 llava/eval/table/reviewer.jsonl create mode 100644 llava/eval/table/rule.json create mode 100644 llava/eval/video/eval_benchmark_1_correctness.py create mode 100644 llava/eval/video/eval_benchmark_2_detailed_orientation.py create mode 100644 llava/eval/video/eval_benchmark_3_context.py create mode 100644 llava/eval/video/eval_benchmark_4_temporal.py create mode 100644 llava/eval/video/eval_benchmark_5_consistency.py create mode 100644 llava/eval/video/eval_video_qa.py create mode 100644 llava/eval/video/run_inference_benchmark_consistency.py create mode 100644 llava/eval/video/run_inference_benchmark_general.py create mode 100644 llava/eval/video/run_inference_video_qa.py create mode 100644 llava/eval/video/run_inference_video_qa_act.py create mode 100644 llava/eval/webpage/index.html create mode 100644 llava/eval/webpage/script.js create mode 100644 llava/eval/webpage/styles.css create mode 100644 llava/mm_utils.py create mode 100644 llava/model/__init__.py create mode 100644 llava/model/apply_delta.py create mode 100644 llava/model/builder.py create mode 100644 llava/model/consolidate.py create mode 100644 llava/model/language_model/llava_llama.py create mode 100644 llava/model/language_model/llava_llama_v1.py create mode 100644 llava/model/language_model/llava_mpt.py create mode 100644 llava/model/llava_arch.py create mode 100644 llava/model/make_delta.py create mode 100644 llava/model/multimodal_encoder/builder.py create mode 100644 llava/model/multimodal_encoder/clip_encoder.py create mode 100644 llava/model/multimodal_encoder/mae_encoder.py create mode 100644 llava/model/multimodal_projector/builder.py create mode 100644 llava/model/utils.py create mode 100644 llava/serve/__init__.py create mode 100644 llava/serve/cli.py create mode 100644 llava/serve/controller.py create mode 100644 llava/serve/eval_custom.py create mode 100644 llava/serve/eval_custom_chunck.py create mode 100644 llava/serve/eval_custom_predsig.py create mode 100644 llava/serve/examples/desert.jpg create mode 100644 llava/serve/examples/extreme_ironing.jpg create mode 100644 llava/serve/examples/sample_demo_1.mp4 create mode 100644 llava/serve/examples/sample_demo_13.mp4 create mode 100644 llava/serve/examples/sample_demo_22.mp4 create mode 100644 llava/serve/examples/sample_demo_3.mp4 create mode 100644 llava/serve/examples/sample_demo_8.mp4 create mode 100644 llava/serve/examples/sample_demo_9.mp4 create mode 100644 llava/serve/examples/sample_img_13.png create mode 100644 llava/serve/examples/sample_img_22.png create mode 100644 llava/serve/examples/sample_img_8.png create mode 100644 llava/serve/examples/waterview.jpg create mode 100644 llava/serve/gradio_utils.py create mode 100644 llava/serve/gradio_web_server.py create mode 100644 llava/serve/model_worker.py create mode 100644 llava/serve/register_worker.py create mode 100644 llava/serve/test_message.py create mode 100644 llava/serve/video_caption.py create mode 100644 llava/train/llama_flash_attn_monkey_patch.py create mode 100644 llava/train/llama_xformers_attn_monkey_patch.py create mode 100644 llava/train/llava_trainer.py create mode 100644 llava/train/train.py create mode 100644 llava/train/train_mem.py create mode 100644 llava/train/train_xformers.py create mode 100644 llava/utils.py create mode 100644 pyproject.toml create mode 100644 retrieval/BDDX_RAG_hybird_vpmatch.json create mode 100644 retrieval/BDDX_RAG_neg_3.json create mode 100644 retrieval/BDDX_RAG_pos_3.json create mode 100644 retrieval/BDDX_RAG_tuned_vpmatch_t13.json create mode 100644 retrieval/BDDX_RAG_visual_vpmatch.json create mode 100644 retrieval/bddx_vpath_info_match.json create mode 100644 retrieval/check_sim.py create mode 100644 retrieval/embeddings_project.npz create mode 100644 retrieval/final_match.py create mode 100644 retrieval/matching.py create mode 100644 retrieval/project.py create mode 100644 retrieval/projector/best_model.pth create mode 100644 retrieval/rag.py create mode 100644 retrieval/train.py create mode 100644 retrieval/train_projector.sh create mode 100644 retrieval/vpath_info_match.py create mode 100644 scripts/finetune.sh create mode 100644 scripts/pretrain.sh create mode 100644 scripts/zero2.json create mode 100644 scripts/zero3.json create mode 100644 scripts/zero3_offload.json create mode 100644 video_process/create_bddx_json.py diff --git a/README.md b/README.md index b1accae..3efd4d0 100644 --- a/README.md +++ b/README.md @@ -12,4 +12,57 @@ Official GitHub repository for "RAG-Driver: Generalisable Driving Explanations w **RAG-Driver** is a Multi-Modal Large Language Model with Retrieval-augmented In-context Learning capacity designed for generalisable and explainable end-to-end driving with strong zeroshot generalisation capacity. ## News -Codes and models will be released soon +## 📰 News +* **[2024.05.27]** Code update is in Progress, this repo is under active maintenance. + + +## TODO List +- [ ] Uploading the processed version of BDDX. +- [ ] Uploading the model checkpoint. +- [ ] Releasing Spoken-SAX dataset. +- [ ] further cleaning of retrieval engine codebase + +## Usage + +### Requirements and Installation +* Python >= 3.10 +* Pytorch == 2.0.1 +* CUDA Version >= 11.7 +* Install required packages: +```bash +git clone https://github.com/YuanJianhao508/RAG-Driver.git +cd RAG-DRIVER +conda create -n ragdriver python=3.10 -y +conda activate ragdriver +pip install --upgrade pip # enable PEP 660 support +pip install -e . +pip install -e ".[train]" +pip install flash-attn --no-build-isolation +pip install decord opencv-python git+https://github.com/facebookresearch/pytorchvideo.git@28fe037d212663c6a24f373b94cc5d478c8c1a1d +``` + +### Instruction Tuning on BDD-X dataset + +```bash +bash ./scripts/finetune.sh +``` + +- Download pre-trained Video-LLaVA LLM and projector checkpoint from [here](https://huggingface.co/LanguageBind/Video-LLaVA-7B) and [here](https://huggingface.co/LanguageBind/Video-LLaVA-Pretrain-7B) and specify path in '--model_name_or_path' and '--pretrain_mm_mlp_adapter'. +- Download pre-trained LanguageBind encoder from [here](https://huggingface.co/LanguageBind/LanguageBind_Video_merge) and specify path in '--video_tower'. +- Change the batch size '--per_device_train_batch_size' and gradient accumulation step '--gradient_accumulation_steps' based on the number of gpu available, please ensure the effective batch size (i.e. --per_device_train_batch_size * gradient accumulation step * number of gpus) equals '128'. + + +## Citations +If you find our paper and code useful in your research, please consider citing: +```BibTeX +@article{yuan2024rag, + title={RAG-Driver: Generalisable Driving Explanations with Retrieval-Augmented In-Context Learning in Multi-Modal Large Language Model}, + author={Yuan, Jianhao and Sun, Shuyang and Omeiza, Daniel and Zhao, Bo and Newman, Paul and Kunze, Lars and Gadd, Matthew}, + journal={arXiv preprint arXiv:2402.10828}, + year={2024} +} +} +``` + +## Acknowledgement +This repo is built on [Video-LLaVA](https://github.com/haotian-liu/LLaVA) and [ADAPT](https://github.com/jxbbb/ADAPT). We thank all the authors for their open-sourced codebase. \ No newline at end of file diff --git a/llava/__init__.py b/llava/__init__.py new file mode 100644 index 0000000..4d1f016 --- /dev/null +++ b/llava/__init__.py @@ -0,0 +1 @@ +from .model import LlavaLlamaForCausalLM diff --git a/llava/constants.py b/llava/constants.py new file mode 100644 index 0000000..f1bcfae --- /dev/null +++ b/llava/constants.py @@ -0,0 +1,18 @@ +CONTROLLER_HEART_BEAT_EXPIRATION = 30 +WORKER_HEART_BEAT_INTERVAL = 15 + +LOGDIR = "." + +# Model Constants +IGNORE_INDEX = -100 +X_TOKEN_INDEX = {'IMAGE': -200, 'VIDEO': -201, 'AUDIO': -202, 'THERMAL': -203, 'DEPTH': -204} +X_INDEX_TOKEN = {v: k for k, v in X_TOKEN_INDEX.items()} +# IMAGE_TOKEN_INDEX = -200 +DEFAULT_X_TOKEN = {'IMAGE': "", 'VIDEO': "", 'AUDIO': "", 'THERMAL': "", 'DEPTH': ""} +# DEFAULT_IMAGE_TOKEN = "" +DEFAULT_X_PATCH_TOKEN = {'IMAGE': "", 'VIDEO': "", 'AUDIO': "", 'THERMAL': "", 'DEPTH': ""} +# DEFAULT_IMAGE_PATCH_TOKEN = "" +DEFAULT_X_START_TOKEN = {'IMAGE': "", 'VIDEO': "", 'AUDIO': "", 'THERMAL': "", 'DEPTH': ""} +# DEFAULT_IM_START_TOKEN = "" +DEFAULT_X_END_TOKEN = {'IMAGE': "", 'VIDEO': "", 'AUDIO': "", 'THERMAL': "", 'DEPTH': ""} +# DEFAULT_IM_END_TOKEN = "" diff --git a/llava/conversation.py b/llava/conversation.py new file mode 100644 index 0000000..6545ce1 --- /dev/null +++ b/llava/conversation.py @@ -0,0 +1,398 @@ +import dataclasses +from enum import auto, Enum +from typing import List, Tuple + + +class SeparatorStyle(Enum): + """Different separator style.""" + SINGLE = auto() + TWO = auto() + MPT = auto() + PLAIN = auto() + LLAMA_2 = auto() + + +@dataclasses.dataclass +class Conversation: + """A class that keeps all conversation history.""" + system: str + roles: List[str] + messages: List[List[str]] + offset: int + sep_style: SeparatorStyle = SeparatorStyle.SINGLE + sep: str = "###" + sep2: str = None + version: str = "Unknown" + + skip_next: bool = False + + def get_prompt(self): + messages = self.messages + if len(messages) > 0 and type(messages[0][1]) is tuple: + messages = self.messages.copy() + init_role, init_msg = messages[0].copy() + init_msg = init_msg[0].replace("", "").strip() + if 'mmtag' in self.version: + messages[0] = (init_role, init_msg) + messages.insert(0, (self.roles[0], "")) + messages.insert(1, (self.roles[1], "Received.")) + else: + messages[0] = (init_role, "\n" + init_msg) + + if self.sep_style == SeparatorStyle.SINGLE: + ret = self.system + self.sep + for role, message in messages: + if message: + if type(message) is tuple: + message, _, _ = message + ret += role + ": " + message + self.sep + else: + ret += role + ":" + elif self.sep_style == SeparatorStyle.TWO: + seps = [self.sep, self.sep2] + ret = self.system + seps[0] + for i, (role, message) in enumerate(messages): + if message: + if type(message) is tuple: + message, _, _ = message + ret += role + ": " + message + seps[i % 2] + else: + ret += role + ":" + elif self.sep_style == SeparatorStyle.MPT: + ret = self.system + self.sep + for role, message in messages: + if message: + if type(message) is tuple: + message, _, _ = message + ret += role + message + self.sep + else: + ret += role + elif self.sep_style == SeparatorStyle.LLAMA_2: + wrap_sys = lambda msg: f"<>\n{msg}\n<>\n\n" + wrap_inst = lambda msg: f"[INST] {msg} [/INST]" + ret = "" + + for i, (role, message) in enumerate(messages): + if i == 0: + assert message, "first message should not be none" + assert role == self.roles[0], "first message should come from user" + if message: + if type(message) is tuple: + message, _, _ = message + if i == 0: message = wrap_sys(self.system) + message + if i % 2 == 0: + message = wrap_inst(message) + ret += self.sep + message + else: + ret += " " + message + " " + self.sep2 + else: + ret += "" + ret = ret.lstrip(self.sep) + elif self.sep_style == SeparatorStyle.PLAIN: + seps = [self.sep, self.sep2] + ret = self.system + for i, (role, message) in enumerate(messages): + if message: + if type(message) is tuple: + message, _, _ = message + ret += message + seps[i % 2] + else: + ret += "" + else: + raise ValueError(f"Invalid style: {self.sep_style}") + + return ret + + def append_message(self, role, message): + self.messages.append([role, message]) + + def get_images(self, return_pil=False): + images = [] + for i, (role, msg) in enumerate(self.messages[self.offset:]): + if i % 2 == 0: + if type(msg) is tuple: + import base64 + from io import BytesIO + from PIL import Image + msg, image, image_process_mode = msg + if image_process_mode == "Pad": + def expand2square(pil_img, background_color=(122, 116, 104)): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + image = expand2square(image) + elif image_process_mode in ["Default", "Crop"]: + pass + elif image_process_mode == "Resize": + image = image.resize((336, 336)) + else: + raise ValueError(f"Invalid image_process_mode: {image_process_mode}") + max_hw, min_hw = max(image.size), min(image.size) + aspect_ratio = max_hw / min_hw + max_len, min_len = 800, 400 + shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw)) + longest_edge = int(shortest_edge * aspect_ratio) + W, H = image.size + if longest_edge != max(image.size): + if H > W: + H, W = longest_edge, shortest_edge + else: + H, W = shortest_edge, longest_edge + image = image.resize((W, H)) + if return_pil: + images.append(image) + else: + buffered = BytesIO() + image.save(buffered, format="PNG") + img_b64_str = base64.b64encode(buffered.getvalue()).decode() + images.append(img_b64_str) + return images + + def to_gradio_chatbot(self): + ret = [] + for i, (role, msg) in enumerate(self.messages[self.offset:]): + if i % 2 == 0: + if type(msg) is tuple: + import base64 + from io import BytesIO + msg, image, image_process_mode = msg + max_hw, min_hw = max(image.size), min(image.size) + aspect_ratio = max_hw / min_hw + max_len, min_len = 800, 400 + shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw)) + longest_edge = int(shortest_edge * aspect_ratio) + W, H = image.size + if H > W: + H, W = longest_edge, shortest_edge + else: + H, W = shortest_edge, longest_edge + image = image.resize((W, H)) + buffered = BytesIO() + image.save(buffered, format="JPEG") + img_b64_str = base64.b64encode(buffered.getvalue()).decode() + img_str = f'' + msg = img_str + msg.replace('', '').strip() + ret.append([msg, None]) + else: + ret.append([msg, None]) + else: + ret[-1][-1] = msg + return ret + + def copy(self): + return Conversation( + system=self.system, + roles=self.roles, + messages=[[x, y] for x, y in self.messages], + offset=self.offset, + sep_style=self.sep_style, + sep=self.sep, + sep2=self.sep2, + version=self.version) + + def dict(self): + if len(self.get_images()) > 0: + return { + "system": self.system, + "roles": self.roles, + "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages], + "offset": self.offset, + "sep": self.sep, + "sep2": self.sep2, + } + return { + "system": self.system, + "roles": self.roles, + "messages": self.messages, + "offset": self.offset, + "sep": self.sep, + "sep2": self.sep2, + } + + +conv_vicuna_v0 = Conversation( + system="A chat between a curious human and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the human's questions.", + roles=("Human", "Assistant"), + messages=( + ("Human", "What are the key differences between renewable and non-renewable energy sources?"), + ("Assistant", + "Renewable energy sources are those that can be replenished naturally in a relatively " + "short amount of time, such as solar, wind, hydro, geothermal, and biomass. " + "Non-renewable energy sources, on the other hand, are finite and will eventually be " + "depleted, such as coal, oil, and natural gas. Here are some key differences between " + "renewable and non-renewable energy sources:\n" + "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable " + "energy sources are finite and will eventually run out.\n" + "2. Environmental impact: Renewable energy sources have a much lower environmental impact " + "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, " + "and other negative effects.\n" + "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically " + "have lower operational costs than non-renewable sources.\n" + "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote " + "locations than non-renewable sources.\n" + "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different " + "situations and needs, while non-renewable sources are more rigid and inflexible.\n" + "6. Sustainability: Renewable energy sources are more sustainable over the long term, while " + "non-renewable sources are not, and their depletion can lead to economic and social instability.\n") + ), + offset=2, + sep_style=SeparatorStyle.SINGLE, + sep="###", +) + + + +conv_vicuna_v1 = Conversation( + system="A chat between a curious user and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the user's questions.", + roles=("USER", "ASSISTANT"), + version="v1", + messages=(), + offset=0, + sep_style=SeparatorStyle.TWO, + sep=" ", + sep2="", +) + +conv_llama_2 = Conversation( + system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. + +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""", + roles=("USER", "ASSISTANT"), + version="llama_v2", + messages=(), + offset=0, + sep_style=SeparatorStyle.LLAMA_2, + sep="", + sep2="", +) + +conv_llava_llama_2 = Conversation( + system="You are a helpful language and vision assistant. " + "You are able to understand the visual content that the user provides, " + "and assist the user with a variety of tasks using natural language.", + roles=("USER", "ASSISTANT"), + version="llama_v2", + messages=(), + offset=0, + sep_style=SeparatorStyle.LLAMA_2, + sep="", + sep2="", +) + +conv_mpt = Conversation( + system="""<|im_start|>system +A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""", + roles=("<|im_start|>user\n", "<|im_start|>assistant\n"), + version="mpt", + messages=(), + offset=0, + sep_style=SeparatorStyle.MPT, + sep="<|im_end|>", +) + +conv_llava_plain = Conversation( + system="", + roles=("", ""), + messages=( + ), + offset=0, + sep_style=SeparatorStyle.PLAIN, + sep="\n", +) + +conv_llava_v0 = Conversation( + system="A chat between a curious human and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the human's questions.", + roles=("Human", "Assistant"), + messages=( + ), + offset=0, + sep_style=SeparatorStyle.SINGLE, + sep="###", +) + +conv_llava_v0_mmtag = Conversation( + system="A chat between a curious user and an artificial intelligence assistant. " + "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language." + "The visual content will be provided with the following format: visual content.", + roles=("Human", "Assistant"), + messages=( + ), + offset=0, + sep_style=SeparatorStyle.SINGLE, + sep="###", + version="v0_mmtag", +) + +conv_llava_v1 = Conversation( + system="A chat between a curious human and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the human's questions.", + roles=("USER", "ASSISTANT"), + version="v1", + messages=(), + offset=0, + sep_style=SeparatorStyle.TWO, + sep=" ", + sep2="", +) + +conv_llava_v1_mmtag = Conversation( + system="A chat between a curious user and an artificial intelligence assistant. " + "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language." + "The visual content will be provided with the following format: visual content.", + roles=("USER", "ASSISTANT"), + messages=(), + offset=0, + sep_style=SeparatorStyle.TWO, + sep=" ", + sep2="", + version="v1_mmtag", +) + + +################################## For Driving +conv_driving = Conversation( + system="A chat between a curious user and an artificial intelligence assistant designed for autonomous driving. " + "The assistant gives helpful, detailed, and polite answers to the user's questions including driving action justification and control signal.", + roles=("USER", "ASSISTANT"), + version="v1", + messages=(), + offset=0, + sep_style=SeparatorStyle.TWO, + sep=" ", + sep2="", +) + +default_conversation = conv_vicuna_v0 +conv_templates = { + "default": conv_vicuna_v0, + "v0": conv_vicuna_v0, + "v1": conv_vicuna_v1, + "vicuna_v1": conv_vicuna_v1, + "llama_2": conv_llama_2, + + "plain": conv_llava_plain, + "v0_plain": conv_llava_plain, + "llava_v0": conv_llava_v0, + "v0_mmtag": conv_llava_v0_mmtag, + "llava_v1": conv_llava_v1, + "v1_mmtag": conv_llava_v1_mmtag, + "llava_llama_2": conv_llava_llama_2, + + "mpt": conv_mpt, + "driving": conv_driving, +} + + +if __name__ == "__main__": + print(default_conversation.get_prompt()) diff --git a/llava/eval/eval_gpt_mmvet.py b/llava/eval/eval_gpt_mmvet.py new file mode 100644 index 0000000..9d306e9 --- /dev/null +++ b/llava/eval/eval_gpt_mmvet.py @@ -0,0 +1,276 @@ +import argparse + +import openai +import json +import os +from tqdm import tqdm +import pandas as pd +import numpy as np +from collections import Counter +import time + + + +parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') +parser.add_argument('--mmvet_path') +parser.add_argument('--ckpt_name') +parser.add_argument('--result_path') +args = parser.parse_args() + + +openai.api_base = "" +openai.api_key = '' + +gpt_model = "gpt-4-0613" + + +prompt = """Compare the ground truth and prediction from AI models, to give a correctness score for the prediction. in the ground truth means it is totally right only when all elements in the ground truth are present in the prediction, and means it is totally right when any one element in the ground truth is present in the prediction. The correctness score is 0.0 (totally wrong), 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, or 1.0 (totally right). Just complete the last space of the correctness score. + +Question | Ground truth | Prediction | Correctness +--- | --- | --- | --- +What is x in the equation? | -1 -5 | x = 3 | 0.0 +What is x in the equation? | -1 -5 | x = -1 | 0.5 +What is x in the equation? | -1 -5 | x = -5 | 0.5 +What is x in the equation? | -1 -5 | x = -5 or 5 | 0.5 +What is x in the equation? | -1 -5 | x = -1 or x = -5 | 1.0 +Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme talks about Iceland and Greenland. It's pointing out that despite their names, Iceland is not very icy and Greenland isn't very green. | 0.4 +Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme is using humor to point out the misleading nature of Iceland's and Greenland's names. Iceland, despite its name, has lush green landscapes while Greenland is mostly covered in ice and snow. The text 'This is why I have trust issues' is a playful way to suggest that these contradictions can lead to distrust or confusion. The humor in this meme is derived from the unexpected contrast between the names of the countries and their actual physical characteristics. | 1.0 +""" + +# load metadata +# Download mm-vet.zip and `unzip mm-vet.zip` and change the path below +mmvet_path = args.mmvet_path +use_sub_set = False +decimal_places = 1 # number of decimal places to round to + +if use_sub_set: + bard_set_file = os.path.join(mmvet_path, "bard_set.json") + with open(bard_set_file, 'r') as f: + sub_set = json.load(f) + sub_set_name = 'bardset' + sub_set_name = sub_set_name + '_' +else: + sub_set = None + sub_set_name = '' + +mmvet_metadata = os.path.join(mmvet_path, "mm-vet.json") +with open(mmvet_metadata, 'r') as f: + data = json.load(f) + +counter = Counter() +cap_set_list = [] +cap_set_counter = [] +len_data = 0 +for id, value in data.items(): + if sub_set is not None and id not in sub_set: + continue + question = value["question"] + answer = value["answer"] + cap = value["capability"] + cap = set(cap) + counter.update(cap) + if cap not in cap_set_list: + cap_set_list.append(cap) + cap_set_counter.append(1) + else: + cap_set_counter[cap_set_list.index(cap)] += 1 + + len_data += 1 + +sorted_list = counter.most_common() +columns = [k for k, v in sorted_list] +columns.append("total") +columns.append("std") +columns.append('runs') +df = pd.DataFrame(columns=columns) + +cap_set_sorted_indices = np.argsort(-np.array(cap_set_counter)) +new_cap_set_list = [] +new_cap_set_counter = [] +for index in cap_set_sorted_indices: + new_cap_set_list.append(cap_set_list[index]) + new_cap_set_counter.append(cap_set_counter[index]) + +cap_set_list = new_cap_set_list +cap_set_counter = new_cap_set_counter +cap_set_names = ["_".join(list(cap_set)) for cap_set in cap_set_list] + +columns2 = cap_set_names +columns2.append("total") +columns2.append("std") +columns2.append('runs') +df2 = pd.DataFrame(columns=columns2) + + + + + + + + +###### change your model name ###### +model = args.ckpt_name +result_path = args.result_path +num_run = 1 # we set it as 5 in the paper +model_results_file = os.path.join(result_path, f"{model}.json") + +# grade results for each sample to svae +grade_file = f'{model}_{gpt_model}-grade-{num_run}runs.json' +grade_file = os.path.join(result_path, grade_file) + +# score results regarding capabilities/capability integration to save +cap_score_file = f'{model}_{sub_set_name}{gpt_model}-cap-score-{num_run}runs.csv' +cap_score_file = os.path.join(result_path, cap_score_file) +cap_int_score_file = f'{model}_{sub_set_name}{gpt_model}-cap-int-score-{num_run}runs.csv' +cap_int_score_file = os.path.join(result_path, cap_int_score_file) + +with open(model_results_file) as f: + results = json.load(f) +if os.path.exists(grade_file): + with open(grade_file, 'r') as f: + grade_results = json.load(f) +else: + grade_results = {} + + +def need_more_runs(): + need_more_runs = False + if len(grade_results) > 0: + for k, v in grade_results.items(): + if len(v['score']) < num_run: + need_more_runs = True + break + return need_more_runs or len(grade_results) < len_data + + +while need_more_runs(): + for j in range(num_run): + print(f'eval run {j}') + for id, line in tqdm(data.items()): + if sub_set is not None and id not in sub_set: + continue + if id in grade_results and len(grade_results[id]['score']) >= (j + 1): + continue + + model_pred = results[id] + + question = prompt + '\n' + ' | '.join( + [line['question'], line['answer'].replace("", " ").replace("", " "), model_pred, + ""]) + messages = [ + {"role": "user", "content": question}, + ] + + if id not in grade_results: + sample_grade = {'model': [], 'content': [], 'score': []} + else: + sample_grade = grade_results[id] + + grade_sample_run_complete = False + temperature = 0.0 + + while not grade_sample_run_complete: + try: + response = openai.ChatCompletion.create( + model=gpt_model, + max_tokens=3, + temperature=temperature, + messages=messages) + # print(response['model']) + content = response['choices'][0]['message']['content'] + flag = True + try_time = 1 + while flag: + try: + content = content.split(' ')[0].strip() + score = float(content) + if score > 1.0 or score < 0.0: + assert False + flag = False + except: + question = prompt + '\n' + ' | '.join( + [line['question'], line['answer'].replace("", " ").replace("", " "), + model_pred, ""]) + "\nPredict the correctness of the answer (digit): " + messages = [ + {"role": "user", "content": question}, + ] + response = openai.ChatCompletion.create( + model=gpt_model, + max_tokens=3, + temperature=temperature, + messages=messages) + # print(response) + content = response['choices'][0]['message']['content'] + try_time += 1 + temperature += 0.5 + print(f"{id} try {try_time} times") + print(content) + if try_time > 5: + score = 0.0 + flag = False + grade_sample_run_complete = True + except: + # gpt4 may have token rate limit + print("sleep 1s") + time.sleep(1) + + if len(sample_grade['model']) >= j + 1: + sample_grade['model'][j] = response['model'] + sample_grade['content'][j] = content + sample_grade['score'][j] = score + else: + sample_grade['model'].append(response['model']) + sample_grade['content'].append(content) + sample_grade['score'].append(score) + grade_results[id] = sample_grade + + with open(grade_file, 'w') as f: + json.dump(grade_results, f, indent=4) + +assert not need_more_runs() +cap_socres = {k: [0.0] * num_run for k in columns[:-2]} +counter['total'] = len_data + +cap_socres2 = {k: [0.0] * num_run for k in columns2[:-2]} +counter2 = {columns2[i]: cap_set_counter[i] for i in range(len(cap_set_counter))} +counter2['total'] = len_data + +for k, v in grade_results.items(): + if sub_set is not None and k not in sub_set: + continue + for i in range(num_run): + score = v['score'][i] + caps = set(data[k]['capability']) + for c in caps: + cap_socres[c][i] += score + + cap_socres['total'][i] += score + + index = cap_set_list.index(caps) + cap_socres2[cap_set_names[index]][i] += score + cap_socres2['total'][i] += score + +for k, v in cap_socres.items(): + cap_socres[k] = np.array(v) / counter[k] * 100 + +std = round(cap_socres['total'].std(), decimal_places) +total_copy = cap_socres['total'].copy() +runs = str(list(np.round(total_copy, decimal_places))) + +for k, v in cap_socres.items(): + cap_socres[k] = round(v.mean(), decimal_places) + +cap_socres['std'] = std +cap_socres['runs'] = runs +df.loc[model] = cap_socres + +for k, v in cap_socres2.items(): + cap_socres2[k] = round(np.mean(np.array(v) / counter2[k] * 100), decimal_places) +cap_socres2['std'] = std +cap_socres2['runs'] = runs +df2.loc[model] = cap_socres2 + +df.to_csv(cap_score_file) +df2.to_csv(cap_int_score_file) +print(df) +print(df2) \ No newline at end of file diff --git a/llava/eval/eval_gpt_review.py b/llava/eval/eval_gpt_review.py new file mode 100644 index 0000000..8af4559 --- /dev/null +++ b/llava/eval/eval_gpt_review.py @@ -0,0 +1,113 @@ +import argparse +import json +import os + +import openai +import tqdm +import ray +import time + +NUM_SECONDS_TO_SLEEP = 3 + +@ray.remote(num_cpus=4) +def get_eval(content: str, max_tokens: int): + while True: + try: + response = openai.ChatCompletion.create( + model='gpt-4', + messages=[{ + 'role': 'system', + 'content': 'You are a helpful and precise assistant for checking the quality of the answer.' + }, { + 'role': 'user', + 'content': content, + }], + temperature=0.2, # TODO: figure out which temperature is best for evaluation + max_tokens=max_tokens, + ) + break + except openai.error.RateLimitError: + pass + except Exception as e: + print(e) + time.sleep(NUM_SECONDS_TO_SLEEP) + + print('success!') + return response['choices'][0]['message']['content'] + + +def parse_score(review): + try: + score_pair = review.split('\n')[0] + score_pair = score_pair.replace(',', ' ') + sp = score_pair.split(' ') + if len(sp) == 2: + return [float(sp[0]), float(sp[1])] + else: + print('error', review) + return [-1, -1] + except Exception as e: + print(e) + print('error', review) + return [-1, -1] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') + parser.add_argument('-q', '--question') + # parser.add_argument('-a', '--answer') + parser.add_argument('-a', '--answer-list', nargs='+', default=[]) + parser.add_argument('-r', '--rule') + parser.add_argument('-o', '--output') + parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') + args = parser.parse_args() + + ray.init() + + f_q = open(os.path.expanduser(args.question)) + f_ans1 = open(os.path.expanduser(args.answer_list[0])) + f_ans2 = open(os.path.expanduser(args.answer_list[1])) + rule_dict = json.load(open(os.path.expanduser(args.rule), 'r')) + + review_file = open(f'{args.output}', 'w') + + js_list = [] + handles = [] + idx = 0 + for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2): + # if idx == 1: + # break + + ques = json.loads(ques_js) + ans1 = json.loads(ans1_js) + ans2 = json.loads(ans2_js) + + category = json.loads(ques_js)['category'] + if category in rule_dict: + rule = rule_dict[category] + else: + rule = rule_dict['default'] + prompt = rule['prompt'] + role = rule['role'] + content = (f'[Question]\n{ques["text"]}\n\n' + f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n' + f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n' + f'[System]\n{prompt}\n\n') + js_list.append({ + 'id': idx+1, + 'question_id': ques['question_id'], + 'answer1_id': ans1['answer_id'], + 'answer2_id': ans2['answer_id'], + 'category': category}) + idx += 1 + handles.append(get_eval.remote(content, args.max_tokens)) + # To avoid the rate limit set by OpenAI + time.sleep(NUM_SECONDS_TO_SLEEP) + + reviews = ray.get(handles) + for idx, review in enumerate(reviews): + scores = parse_score(review) + js_list[idx]['content'] = review + js_list[idx]['tuple'] = scores + review_file.write(json.dumps(js_list[idx]) + '\n') + review_file.close() diff --git a/llava/eval/eval_gpt_review_bench.py b/llava/eval/eval_gpt_review_bench.py new file mode 100644 index 0000000..06160f2 --- /dev/null +++ b/llava/eval/eval_gpt_review_bench.py @@ -0,0 +1,121 @@ +import argparse +import json +import os + +import openai +import time + +NUM_SECONDS_TO_SLEEP = 0.5 + + +def get_eval(content: str, max_tokens: int): + while True: + try: + response = openai.ChatCompletion.create( + model='gpt-4-0314', + messages=[{ + 'role': 'system', + 'content': 'You are a helpful and precise assistant for checking the quality of the answer.' + }, { + 'role': 'user', + 'content': content, + }], + temperature=0.2, # TODO: figure out which temperature is best for evaluation + max_tokens=max_tokens, + ) + break + except openai.error.RateLimitError: + pass + except Exception as e: + print(e) + time.sleep(NUM_SECONDS_TO_SLEEP) + + return response['choices'][0]['message']['content'] + + +def parse_score(review): + try: + score_pair = review.split('\n')[0] + score_pair = score_pair.replace(',', ' ') + sp = score_pair.split(' ') + if len(sp) == 2: + return [float(sp[0]), float(sp[1])] + else: + print('error', review) + return [-1, -1] + except Exception as e: + print(e) + print('error', review) + return [-1, -1] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') + parser.add_argument('-q', '--question') + parser.add_argument('-c', '--context') + parser.add_argument('-a', '--answer-list', nargs='+', default=[]) + parser.add_argument('-r', '--rule') + parser.add_argument('-o', '--output') + parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') + args = parser.parse_args() + + f_q = open(os.path.expanduser(args.question)) + f_ans1 = open(os.path.expanduser(args.answer_list[0])) + f_ans2 = open(os.path.expanduser(args.answer_list[1])) + rule_dict = json.load(open(os.path.expanduser(args.rule), 'r')) + + if os.path.isfile(os.path.expanduser(args.output)): + cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))] + else: + cur_reviews = [] + + review_file = open(f'{args.output}', 'a') + + context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))] + image_to_context = {context['image']: context for context in context_list} + + handles = [] + idx = 0 + for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2): + ques = json.loads(ques_js) + ans1 = json.loads(ans1_js) + ans2 = json.loads(ans2_js) + + inst = image_to_context[ques['image']] + + if isinstance(inst['caption'], list): + cap_str = '\n'.join(inst['caption']) + else: + cap_str = inst['caption'] + + category = 'llava_bench_' + json.loads(ques_js)['category'] + if category in rule_dict: + rule = rule_dict[category] + else: + assert False, f"Visual QA category not found in rule file: {category}." + prompt = rule['prompt'] + role = rule['role'] + content = (f'[Context]\n{cap_str}\n\n' + f'[Question]\n{ques["text"]}\n\n' + f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n' + f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n' + f'[System]\n{prompt}\n\n') + cur_js = { + 'id': idx+1, + 'question_id': ques['question_id'], + 'answer1_id': ans1.get('answer_id', ans1['question_id']), + 'answer2_id': ans2.get('answer_id', ans2['answer_id']), + 'category': category + } + if idx >= len(cur_reviews): + review = get_eval(content, args.max_tokens) + scores = parse_score(review) + cur_js['content'] = review + cur_js['tuple'] = scores + review_file.write(json.dumps(cur_js) + '\n') + review_file.flush() + else: + print(f'Skipping {idx} as we already have it.') + idx += 1 + print(idx) + review_file.close() diff --git a/llava/eval/eval_gpt_review_visual.py b/llava/eval/eval_gpt_review_visual.py new file mode 100644 index 0000000..d6e407a --- /dev/null +++ b/llava/eval/eval_gpt_review_visual.py @@ -0,0 +1,118 @@ +import argparse +import json +import os + +import openai +import time + +NUM_SECONDS_TO_SLEEP = 0.5 + + +def get_eval(content: str, max_tokens: int): + while True: + try: + response = openai.ChatCompletion.create( + model='gpt-4-0314', + messages=[{ + 'role': 'system', + 'content': 'You are a helpful and precise assistant for checking the quality of the answer.' + }, { + 'role': 'user', + 'content': content, + }], + temperature=0.2, # TODO: figure out which temperature is best for evaluation + max_tokens=max_tokens, + ) + break + except openai.error.RateLimitError: + pass + except Exception as e: + print(e) + time.sleep(NUM_SECONDS_TO_SLEEP) + + return response['choices'][0]['message']['content'] + + +def parse_score(review): + try: + score_pair = review.split('\n')[0] + score_pair = score_pair.replace(',', ' ') + sp = score_pair.split(' ') + if len(sp) == 2: + return [float(sp[0]), float(sp[1])] + else: + print('error', review) + return [-1, -1] + except Exception as e: + print(e) + print('error', review) + return [-1, -1] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') + parser.add_argument('-q', '--question') + parser.add_argument('-c', '--context') + parser.add_argument('-a', '--answer-list', nargs='+', default=[]) + parser.add_argument('-r', '--rule') + parser.add_argument('-o', '--output') + parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') + args = parser.parse_args() + + f_q = open(os.path.expanduser(args.question)) + f_ans1 = open(os.path.expanduser(args.answer_list[0])) + f_ans2 = open(os.path.expanduser(args.answer_list[1])) + rule_dict = json.load(open(os.path.expanduser(args.rule), 'r')) + + if os.path.isfile(os.path.expanduser(args.output)): + cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))] + else: + cur_reviews = [] + + review_file = open(f'{args.output}', 'a') + + context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))] + image_to_context = {context['image']: context for context in context_list} + + handles = [] + idx = 0 + for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2): + ques = json.loads(ques_js) + ans1 = json.loads(ans1_js) + ans2 = json.loads(ans2_js) + + inst = image_to_context[ques['image']] + cap_str = '\n'.join(inst['captions']) + box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']]) + + category = json.loads(ques_js)['category'] + if category in rule_dict: + rule = rule_dict[category] + else: + assert False, f"Visual QA category not found in rule file: {category}." + prompt = rule['prompt'] + role = rule['role'] + content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n' + f'[Question]\n{ques["text"]}\n\n' + f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n' + f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n' + f'[System]\n{prompt}\n\n') + cur_js = { + 'id': idx+1, + 'question_id': ques['question_id'], + 'answer1_id': ans1.get('answer_id', ans1['question_id']), + 'answer2_id': ans2.get('answer_id', ans2['answer_id']), + 'category': category + } + if idx >= len(cur_reviews): + review = get_eval(content, args.max_tokens) + scores = parse_score(review) + cur_js['content'] = review + cur_js['tuple'] = scores + review_file.write(json.dumps(cur_js) + '\n') + review_file.flush() + else: + print(f'Skipping {idx} as we already have it.') + idx += 1 + print(idx) + review_file.close() diff --git a/llava/eval/eval_gqa.py b/llava/eval/eval_gqa.py new file mode 100644 index 0000000..9b66f86 --- /dev/null +++ b/llava/eval/eval_gqa.py @@ -0,0 +1,499 @@ +# Evaluation code for GQA. +# Computes a suite of metrics such as accuracy, consistency, plausibility and scores per question type and length. +# Visit https://gqadataset.org/ for all information about the dataset, including examples, visualizations, paper and slides. +# +# +# Metrics: +# - Accuracy: Standard accuracy, computed over the balanced version of the dataset, which is more robust against +# cheating by making educated guesses. For each question-answer pair (q,a), we give 1 point if the +# predicted answer p matches a and 0 otherwise, and average over all questions in the dataset. +# +# - Consistency: A metric for the level of model's consistency across different questions. For each question-answer +# pair (q,a), we define a set Eq={q1, q2, ..., qn} of entailed questions, the answers to which can +# be unambiguously inferred given (q,a). +# Denote Q the set of all questions the model answered correctly. For each question q in Q, we +# measure the model's accuracy over the entailed questions Eq to get the score sq and finally +# average these results across all questions in Q. +# +# - Validity: Measures whether the model gives a "valid" answer - one that can theoretically be an answer +# to the question (e.g. a color to a color question, yes/no to a binary question etc.). +# We provide a set of valid answers to each questions over the final answer vocabulary, in +# the choices file, and use it to compute average validity across the dataset. +# +# - Plausibility: Measures whether the model answers are plausible, e.g. one that make sense in the real world, +# e.g. not answering "purple" to a question about apple color (unless it's really purple). +# We provide a set of all plausible answers to each questions, computed by looking at all +# attributes and relations hold for various objects throughout the whole dataset scene graphs, +# and use it to compute average model plausibility across the data. +# +# - Grounding: Only for attention models. Measures whether the model looks at the relevant regions in the +# image when answering a question. Each question in the dataset is annotated with the visual regions +# they refer to, which are then used to compute the level to which the model has a correct visual attention, +# which will allow to identify whether it really answers based on the image of by language-based guesses. +# Supports both spatial features and object-based features. +# +# - Distribution: Measures the overall match between the true answer distribution for different questions, +# vs the overall distribution predicted by the model through its answers for all the data. +# We use chi-square statistic to measure the degree of similarity between the distributions, +# giving indication to the level of overall world-knowledge of the model +# +# - Accuracy per type: accuracy per question structural types (logic, compare, choose), and semantic type +# (questions about attributes, relations, categories, objects or the whole scene). +# +# - Accuracy for length: accuracy as a function of the question length, in terms of (1) words number, and semantic +# complexity - number of reasoning steps. +# +# We may support additional metrics (e.g. coverage) in the future. +# +# +# Files format: +# - predictions file format: JSON array: [{"questionId": str, "prediction": str}] +# - attentions file format: JSON array: +# Spatial attention: [{"questionId": str, "attention": [mapSize x mapSize: float] }]. +# Object-based attention:[{"questionId": str, "attention": [[x0, y0, x1, y1, float] x #regions] }]. 0 < x,y < 1. +# - questions and choices files are provided as part of the dataset. +# see https://gqadataset.org/download.html for information about their format. +# +# +# If you have any questions or comments, please feel free to send an email, +# at dorarad@cs.stanford.edu. We hope you'll enjoy using the GQA dataset! :) +# +# + +from collections import defaultdict +from tqdm import tqdm +import argparse +import os.path +import glob +import json +import math + +##### Arguments +########################################################################################## + +parser = argparse.ArgumentParser() +parser.add_argument('--tier', default="val", type=str, help="Tier, e.g. train, val") +parser.add_argument('--scenes', default="{tier}_sceneGraphs.json", type=str, help="Scene graphs file name format.") +parser.add_argument('--questions', default="{tier}_all_questions.json", type=str, help="Questions file name format.") +parser.add_argument('--choices', default="{tier}_choices.json", type=str, help="Choices file name format.") +parser.add_argument('--predictions', default="{tier}_predictions.json", type=str, help="Answers file name format.") +parser.add_argument('--attentions', default="{tier}_attentions.json", type=str, help="Attentions file name format.") +parser.add_argument('--consistency', action="store_true", + help="True to compute consistency score (Need to provide answers to questions in val_all_questions.json).") +parser.add_argument('--grounding', action="store_true", + help="True to compute grounding score (If model uses attention).") +parser.add_argument('--objectFeatures', action="store_true", + help="True for object-based attention (False for spatial).") +parser.add_argument('--mapSize', default=7, type=int, + help="Optional, only to get attention score. Images features map size, mapSize * mapSize") +args = parser.parse_args() + +print( + "Please make sure to use our provided visual features as gqadataset.org for better comparability. We provide both spatial and object-based features trained on GQA train set.") +print( + "In particular please avoid using features from https://github.com/peteanderson80/bottom-up-attention since they were trained on images contained in the GQA validation set and thus may give false scores improvement.\n") + +if not args.consistency: + print("Please consider using --consistency to compute consistency scores for entailed questions.") + print("If you do so, please provide answers to all questions in val_all_questions.json.\n") + +if not args.grounding: + print("Please consider using --grounding to compute attention scores.") + print("If you do so, please provide attention maps through --attentions.\n") + + +##### Files Loading +########################################################################################## + +def loadFile(name): + # load standard json file + if os.path.isfile(name): + with open(name) as file: + data = json.load(file) + # load file chunks if too big + elif os.path.isdir(name.split(".")[0]): + data = {} + chunks = glob.glob('{dir}/{dir}_*.{ext}'.format(dir=name.split(".")[0], ext=name.split(".")[1])) + for chunk in chunks: + with open(chunk) as file: + data.update(json.load(file)) + else: + raise Exception("Can't find {}".format(name)) + return data + + +# Load scene graphs +print("Loading scene graphs...") +try: + scenes = loadFile(args.scenes.format(tier=args.tier)) +except: + print('Failed to load scene graphs -- cannot evaluate grounding') + scenes = None # for testdev + +# Load questions +print("Loading questions...") +questions = loadFile(args.questions) + +# Load choices +print("Loading choices...") +try: + choices = loadFile(args.choices.format(tier=args.tier)) +except: + print('Failed to load choices -- cannot evaluate validity or plausibility') + choices = None # for testdev + +# Load predictions and turn them into a dictionary +print("Loading predictions...") +predictions = loadFile(args.predictions.format(tier=args.tier)) +predictions = {p["questionId"]: p["prediction"] for p in predictions} + +# Make sure all question have predictions +for qid in questions: + if (qid not in predictions) and (args.consistency or questions[qid]["isBalanced"]): + print("no prediction for question {}. Please add prediction for all questions.".format(qid)) + raise Exception("missing predictions") + +# Load attentions and turn them into a dictionary +attentions = None +if args.grounding: + with open(args.attentions.format(tier=args.tier)) as attentionsFile: + attentions = json.load(attentionsFile) + attentions = {a["questionId"]: a["attention"] for a in attentions} + + +##### Scores data structures initialization +########################################################################################## + +# book to float +def toScore(b): + return float(1 if b else 0) + + +# Compute average of a list +def avg(l): + if len(l) == 0: + return 0 + return float(sum(l)) / len(l) + + +def wavg(l, w): + if sum(w) == 0: + return None + return float(sum(l[i] * w[i] for i in range(len(l)))) / sum(w) + + +# Initialize data structure to track all metrics: e.g. accuracy, validity and plausibility, as well as +# accuracy per question type, length and number of reasoning steps. +scores = { + "accuracy": [], # list of accuracies per question (1 if correct else 0). Will be averaged ultimately. + "binary": [], # list of accuracies per a binary question (1 if correct else 0). Will be averaged ultimately. + "open": [], # list of accuracies per an open question (1 if correct else 0). Will be averaged ultimately. + "validity": [], # list of validity per question (1 if valid else 0). + "plausibility": [], # list of plausibility per question (1 if plausible else 0). + "consistency": [], # list of consistency scores for entailed questions. + "accuracyPerStructuralType": defaultdict(list), + # list of question accuracies for each structural type (e.g. compare, logic questions). + "accuracyPerSemanticType": defaultdict(list), + # list of question accuracies for each semantic type (e.g. questions about an object, an attribute, a relation). + "accuracyPerLength": defaultdict(list), # list of question accuracies per question's word number. + "accuracyPerSteps": defaultdict(list), + # list of question accuracies per question's reasoning length (steps number). + "grounding": [] # list of grounding scores for each question. +} + +# Initialize golden and predicted histograms per each question group. Used to compute the distribution metric. +dist = { + "gold": defaultdict(lambda: defaultdict(int)), + "predicted": defaultdict(lambda: defaultdict(int)) +} + + +##### Question lengths - words numbers and reasoning steps number +########################################################################################## + +# Compute question length (words number) +def getWordsNum(question): + return len(question["question"].split()) + + +# Compute number of reasoning steps (excluding the final "querying" step which doesn't increase effective reasoning length) +def getStepsNum(question): + return len([c for c in question["semantic"] if not (any([o in "{}: {}".format(c["operation"], c["argument"]) + for o in ["exist", "query: name", "choose name"]]))]) + + +##### Functions for question annotations +########################################################################################## + +# Utility function for converting question annotations string keys to slices +def toSlice(strSlice): + sliceLims = (int(n) for n in strSlice.split(':')) + return apply(slice, sliceLims) + + +# Utility function for converting question annotations string keys to indexes list: +# "1" => [0] +# "1:3" => [1, 2] +# "4:9:2" => [4, 6, 8] +def intsFromSlice(strSlice): + slice_obj = get_slice_obj(slicearg) + return (range(slice_obj.start or 0, slice_obj.stop or -1, slice_obj.step or 1)) + + +##### Functions for validity and plausibility +########################################################################################## + +def belongs(element, group, question): + # normalization () + if "Common" in question["types"]["detailed"]: + group = ["color", "material", "shape"] + + return element in group + + +##### Functions for consistency scores (for entailed questions ("inferred")) +########################################################################################## + +def updateConsistency(questionId, question, questions): + inferredQuestions = [eid for eid in question["entailed"] if eid != questionId] + + if correct and len(inferredQuestions) > 0: + + cosnsitencyScores = [] + for eid in inferredQuestions: + gold = questions[eid]["answer"] + predicted = predictions[eid] + score = toScore(predicted == gold) + cosnsitencyScores.append(score) + + scores["consistency"].append(avg(cosnsitencyScores)) + + +##### Functions for grounding score (optional, only for attention models) +########################################################################################## + +# Utility functions for working with bounding boxes. +# c = (x0, y0, x1, y1), r = (r0, r1) + +def yrange(c): + return (c[1], c[3]) + + +def xrange(c): + return (c[0], c[2]) + + +def length(r): + if r is None: + return 0 + return float(r[1] - r[0]) + + +def size(c): + return length(xrange(c)) * length(yrange(c)) + + +def intersection(r1, r2): + ir = (max(r1[0], r2[0]), min(r1[1], r2[1])) + if ir[1] > ir[0]: + return ir + return None + + +def intersectionSize(c1, c2): + return length(intersection(xrange(c1), xrange(c2))) * length(intersection(yrange(c1), yrange(c2))) + + +def intersectionRate(c1, c2): + return float(intersectionSize(c1, c2)) / size(c1) + + +# Get spatial cell +def getCell(i, j): + edge = float(1) / args.mapSize + return (edge * i, edge * j, edge * (i + 1), edge * (j + 1)) + + +# Get bounding box of objectId in sceneGraph +def getRegion(sceneGraph, objectId): + obj = sceneGraph["objects"][objectId] + x0 = float(obj["x"]) / sceneGraph["width"] + y0 = float(obj["y"]) / sceneGraph["height"] + x1 = float(obj["x"] + obj["w"]) / sceneGraph["width"] + y1 = float(obj["y"] + obj["h"]) / sceneGraph["height"] + return (x0, y0, x1, y1) + + +# Compute grounding score. Computer amount of attention (probability) given to each of the regions +# the question and answers refer to. +def computeGroundingScore(question, sceneGraph, attentionMap): + ## prepare gold regions + regions = [] + # add question regions + regions += [getRegion(sceneGraph, pointer) for pointer in question["annotations"]["question"].values()] + # add answer regions + regions += [getRegion(sceneGraph, pointer) for pointer in question["annotations"]["fullAnswer"].values()] + # add all the image if the question refers to the whole scene + if any(("scene" in c) for c in question["semantic"]): + regions.append((0, 0, 1, 1)) + + # prepare attention map + if args.objectFeatures: + cells = [((x0, y0, x1, y1), attention) for x0, y0, x1, y1, attention in cells] + else: + cells = [(getCell(i, j), attentionMap[i][j]) for i in range(args.mapSize) for j in range(args.mapSize)] + + # compare attention map to gold regions + scores = [] + for region in regions: + for cell, attention in cells: + scores.append(attention * intersectionRate(cell, region)) + return sum(scores) + + +##### Functions for distribution score +########################################################################################## + +# Compute chi square statistic of gold distribution vs predicted distribution, +# averaged over all question groups +def chiSquare(goldDist, predictedDist): + sumScore, sumOverall = 0, 0 + + for group in goldDist: + score, overall = 0, 0 + + for ans in goldDist[group]: + e = goldDist[group][ans] + o = predictedDist[group].get(ans, 0) + score += ((float(o - e) ** 2) / e) + overall += goldDist[group][ans] + + sumScore += score * overall + sumOverall += overall + + avgScore = float(sumScore) / sumOverall + + return avgScore + + +##### Main score computation +########################################################################################## + +# Loop over the questions and compute mterics +for qid, question in tqdm(questions.items()): + + # Compute scores over the balanced dataset (more robust against cheating by making educated guesses) + if question["isBalanced"]: + gold = question["answer"] + predicted = predictions[qid] + + correct = (predicted == gold) + score = toScore(correct) + + wordsNum = getWordsNum(question) + stepsNum = getStepsNum(question) + + # Update accuracy + scores["accuracy"].append(score) + scores["accuracyPerLength"][wordsNum].append(score) + scores["accuracyPerSteps"][stepsNum].append(score) + scores["accuracyPerStructuralType"][question["types"]["structural"]].append(score) + scores["accuracyPerSemanticType"][question["types"]["semantic"]].append(score) + answerType = "open" if question["types"]["structural"] == "query" else "binary" + scores[answerType].append(score) + + # Update validity score + valid = ( + belongs(predicted, choices[qid]["valid"], question) if choices + else False) + scores["validity"].append(toScore(valid)) + + # Update plausibility score + plausible = ( + belongs(predicted, choices[qid]["plausible"], question) if choices + else False) + scores["plausibility"].append(toScore(plausible)) + + # Optionally compute grounding (attention) score + if attentions is not None: + groundingScore = computeGroundingScore(question, scenes[question["imageId"]], attentions[qid]) + if groundingScore is not None: + scores["grounding"].append(groundingScore) + + # Update histograms for gold and predicted answers + globalGroup = question["groups"]["global"] + if globalGroup is not None: + dist["gold"][globalGroup][gold] += 1 + dist["predicted"][globalGroup][predicted] += 1 + + if args.consistency: + # Compute consistency (for entailed questions) + updateConsistency(qid, question, questions) + +# Compute distribution score +scores["distribution"] = chiSquare(dist["gold"], dist["predicted"]) / 100 + +# Average scores over all questions (in the balanced dataset) and print scores + +metrics = [ + "binary", + "open", + "accuracy", + "consistency", + "validity", + "plausibility", + "grounding", + "distribution" +] + +detailedMetrics = [ + ("accuracyPerStructuralType", "Accuracy / structural type"), + ("accuracyPerSemanticType", "Accuracy / semantic type"), + ("accuracyPerSteps", "Accuracy / steps number"), + ("accuracyPerLength", "Accuracy / words number") +] + +subMetrics = { + "attr": "attribute", + "cat": "category", + "global": "scene", + "obj": "object", + "rel": "relation" +} +# average +for k in metrics: + if isinstance(scores[k], list): + scores[k] = avg(scores[k]) * 100 + +for k, _ in detailedMetrics: + for t in scores[k]: + scores[k][t] = avg(scores[k][t]) * 100, len(scores[k][t]) + +# print +print("") +for m in metrics: + # skip grounding and consistency scores if not requested + if m == "grounding" and not args.grounding: + continue + if m == "consistency" and not args.consistency: + continue + + # print score + print("{title}: {score:.2f}{suffix}".format(title=m.capitalize(), score=scores[m], + suffix=" (lower is better)" if m == "distribution" else "%")) + +for m, mPrintName in detailedMetrics: + print("") + # print metric title + print("{}:".format(mPrintName)) + + for t in sorted(list(scores[m].keys())): + # set sub-metric title + tName = t + if isinstance(scores[k], list): + tName = subMetrics.get(t, t).capitalize() + + # print score + print(" {title}: {score:.2f}{suffix} ({amount} questions)".format(title=tName, + score=scores[m][t][0], suffix="%", + amount=scores[m][t][1])) \ No newline at end of file diff --git a/llava/eval/eval_pope.py b/llava/eval/eval_pope.py new file mode 100644 index 0000000..b115b8f --- /dev/null +++ b/llava/eval/eval_pope.py @@ -0,0 +1,81 @@ +import os +import json +import argparse + +def eval_pope(answers, label_file): + label_list = [json.loads(q)['label'] for q in open(label_file, 'r')] + + for answer in answers: + text = answer['text'] + + # Only keep the first sentence + if text.find('.') != -1: + text = text.split('.')[0] + + text = text.replace(',', '') + words = text.split(' ') + if 'No' in words or 'not' in words or 'no' in words: + answer['text'] = 'no' + else: + answer['text'] = 'yes' + + for i in range(len(label_list)): + if label_list[i] == 'no': + label_list[i] = 0 + else: + label_list[i] = 1 + + pred_list = [] + for answer in answers: + if answer['text'] == 'no': + pred_list.append(0) + else: + pred_list.append(1) + + pos = 1 + neg = 0 + yes_ratio = pred_list.count(1) / len(pred_list) + + TP, TN, FP, FN = 0, 0, 0, 0 + for pred, label in zip(pred_list, label_list): + if pred == pos and label == pos: + TP += 1 + elif pred == pos and label == neg: + FP += 1 + elif pred == neg and label == neg: + TN += 1 + elif pred == neg and label == pos: + FN += 1 + + print('TP\tFP\tTN\tFN\t') + print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN)) + + precision = float(TP) / float(TP + FP) + recall = float(TP) / float(TP + FN) + f1 = 2*precision*recall / (precision + recall) + acc = (TP + TN) / (TP + TN + FP + FN) + print('Accuracy: {}'.format(acc)) + print('Precision: {}'.format(precision)) + print('Recall: {}'.format(recall)) + print('F1 score: {}'.format(f1)) + print('Yes ratio: {}'.format(yes_ratio)) + print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) ) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--annotation-dir", type=str) + parser.add_argument("--question-file", type=str) + parser.add_argument("--result-file", type=str) + args = parser.parse_args() + + questions = [json.loads(line) for line in open(args.question_file)] + questions = {question['question_id']: question for question in questions} + answers = [json.loads(q) for q in open(args.result_file)] + for file in os.listdir(args.annotation_dir): + assert file.startswith('coco_pope_') + assert file.endswith('.json') + category = file[10:-5] + cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category] + print('Category: {}, # samples: {}'.format(category, len(cur_answers))) + eval_pope(cur_answers, os.path.join(args.annotation_dir, file)) + print("====================================") diff --git a/llava/eval/eval_science_qa.py b/llava/eval/eval_science_qa.py new file mode 100644 index 0000000..ccf206b --- /dev/null +++ b/llava/eval/eval_science_qa.py @@ -0,0 +1,114 @@ +import argparse +import json +import os +import re +import random + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--base-dir', type=str) + parser.add_argument('--result-file', type=str) + parser.add_argument('--output-file', type=str) + parser.add_argument('--output-result', type=str) + parser.add_argument('--split', type=str, default='test') + parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"]) + return parser.parse_args() + + +def convert_caps(results): + fakecaps = [] + for result in results: + image_id = result['question_id'] + caption = result['text'] + fakecaps.append({"image_id": int(image_id), "caption": caption}) + return fakecaps + + +def get_pred_idx(prediction, choices, options): + """ + Get the index (e.g. 2) from the prediction (e.g. 'C') + """ + if prediction in options[:len(choices)]: + return options.index(prediction) + else: + return -1 + return random.choice(range(len(choices))) + + +if __name__ == "__main__": + args = get_args() + + base_dir = args.base_dir + split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split] + problems = json.load(open(os.path.join(base_dir, "problems.json"))) + predictions = [json.loads(line) for line in open(args.result_file)] + predictions = {pred['question_id']: pred for pred in predictions} + split_problems = {idx: problems[idx] for idx in split_indices} + + results = {'correct': [], 'incorrect': []} + sqa_results = {} + sqa_results['acc'] = None + sqa_results['correct'] = None + sqa_results['count'] = None + sqa_results['results'] = {} + sqa_results['outputs'] = {} + + for prob_id, prob in split_problems.items(): + if prob_id not in predictions: + pred = {'text': 'FAILED', 'prompt': 'Unknown'} + pred_text = 'FAILED' + else: + pred = predictions[prob_id] + pred_text = pred['text'] + + if pred_text in args.options: + answer = pred_text + elif len(pred_text) >= 3 and pred_text[0] in args.options and pred_text[1:3] == ". ": + answer = pred_text[0] + else: + pattern = re.compile(r'The answer is ([A-Z]).') + res = pattern.findall(pred_text) + if len(res) == 1: + answer = res[0] # 'A', 'B', ... + else: + answer = "FAILED" + + pred_idx = get_pred_idx(answer, prob['choices'], args.options) + + analysis = { + 'question_id': prob_id, + 'parsed_ans': answer, + 'ground_truth': args.options[prob['answer']], + 'question': pred['prompt'], + 'pred': pred_text, + 'is_multimodal': '' in pred['prompt'], + } + + sqa_results['results'][prob_id] = get_pred_idx(answer, prob['choices'], args.options) + sqa_results['outputs'][prob_id] = pred_text + + if pred_idx == prob['answer']: + results['correct'].append(analysis) + else: + results['incorrect'].append(analysis) + + correct = len(results['correct']) + total = len(results['correct']) + len(results['incorrect']) + + ###### IMG ###### + multimodal_correct = len([x for x in results['correct'] if x['is_multimodal']]) + multimodal_incorrect = len([x for x in results['incorrect'] if x['is_multimodal']]) + multimodal_total = multimodal_correct + multimodal_incorrect + ###### IMG ###### + + print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%, IMG-Accuracy: {multimodal_correct / multimodal_total * 100:.2f}%') + + sqa_results['acc'] = correct / total * 100 + sqa_results['correct'] = correct + sqa_results['count'] = total + + with open(args.output_file, 'w') as f: + json.dump(results, f, indent=2) + with open(args.output_result, 'w') as f: + json.dump(sqa_results, f, indent=2) diff --git a/llava/eval/eval_science_qa_gpt4.py b/llava/eval/eval_science_qa_gpt4.py new file mode 100644 index 0000000..c2ff17c --- /dev/null +++ b/llava/eval/eval_science_qa_gpt4.py @@ -0,0 +1,104 @@ +import argparse +import json +import os +import re +import random +from collections import defaultdict + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--base-dir', type=str) + parser.add_argument('--gpt4-result', type=str) + parser.add_argument('--our-result', type=str) + parser.add_argument('--split', type=str, default='test') + parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"]) + return parser.parse_args() + + +def convert_caps(results): + fakecaps = [] + for result in results: + image_id = result['question_id'] + caption = result['text'] + fakecaps.append({"image_id": int(image_id), "caption": caption}) + return fakecaps + + +def get_pred_idx(prediction, choices, options): + """ + Get the index (e.g. 2) from the prediction (e.g. 'C') + """ + if prediction in options[:len(choices)]: + return options.index(prediction) + else: + return random.choice(range(len(choices))) + + +if __name__ == "__main__": + args = get_args() + + base_dir = args.base_dir + split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split] + problems = json.load(open(os.path.join(base_dir, "problems.json"))) + our_predictions = [json.loads(line) for line in open(args.our_result)] + our_predictions = {pred['question_id']: pred for pred in our_predictions} + split_problems = {idx: problems[idx] for idx in split_indices} + + gpt4_predictions = json.load(open(args.gpt4_result))['outputs'] + + results = defaultdict(lambda: 0) + + for prob_id, prob in split_problems.items(): + if prob_id not in our_predictions: + continue + if prob_id not in gpt4_predictions: + continue + our_pred = our_predictions[prob_id]['text'] + gpt4_pred = gpt4_predictions[prob_id] + + pattern = re.compile(r'The answer is ([A-Z]).') + our_res = pattern.findall(our_pred) + if len(our_res) == 1: + our_answer = our_res[0] # 'A', 'B', ... + else: + our_answer = "FAILED" + gpt4_res = pattern.findall(gpt4_pred) + if len(gpt4_res) == 1: + gpt4_answer = gpt4_res[0] # 'A', 'B', ... + else: + gpt4_answer = "FAILED" + + our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options) + gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options) + + if gpt4_answer == 'FAILED': + results['gpt4_failed'] += 1 + # continue + gpt4_pred_idx = our_pred_idx + # if our_pred_idx != prob['answer']: + # print(our_predictions[prob_id]['prompt']) + # print('-----------------') + # print(f'LECTURE: {prob["lecture"]}') + # print(f'SOLUTION: {prob["solution"]}') + # print('=====================') + else: + # continue + pass + # gpt4_pred_idx = our_pred_idx + + if gpt4_pred_idx == prob['answer']: + results['correct'] += 1 + else: + results['incorrect'] += 1 + + + if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']: + results['correct_upperbound'] += 1 + + correct = results['correct'] + total = results['correct'] + results['incorrect'] + print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%') + print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%') + print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%') + diff --git a/llava/eval/eval_science_qa_gpt4_requery.py b/llava/eval/eval_science_qa_gpt4_requery.py new file mode 100644 index 0000000..698546e --- /dev/null +++ b/llava/eval/eval_science_qa_gpt4_requery.py @@ -0,0 +1,149 @@ +import argparse +import json +import os +import re +import random +from collections import defaultdict + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--base-dir', type=str) + parser.add_argument('--gpt4-result', type=str) + parser.add_argument('--requery-result', type=str) + parser.add_argument('--our-result', type=str) + parser.add_argument('--output-result', type=str) + parser.add_argument('--split', type=str, default='test') + parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"]) + return parser.parse_args() + + +def convert_caps(results): + fakecaps = [] + for result in results: + image_id = result['question_id'] + caption = result['text'] + fakecaps.append({"image_id": int(image_id), "caption": caption}) + return fakecaps + + +def get_pred_idx(prediction, choices, options): + """ + Get the index (e.g. 2) from the prediction (e.g. 'C') + """ + if prediction in options[:len(choices)]: + return options.index(prediction) + else: + return random.choice(range(len(choices))) + + +if __name__ == "__main__": + args = get_args() + + base_dir = args.base_dir + split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split] + problems = json.load(open(os.path.join(base_dir, "problems.json"))) + our_predictions = [json.loads(line) for line in open(args.our_result)] + our_predictions = {pred['question_id']: pred for pred in our_predictions} + split_problems = {idx: problems[idx] for idx in split_indices} + + requery_predictions = [json.loads(line) for line in open(args.requery_result)] + requery_predictions = {pred['question_id']: pred for pred in requery_predictions} + + gpt4_predictions = json.load(open(args.gpt4_result))['outputs'] + + results = defaultdict(lambda: 0) + + sqa_results = {} + sqa_results['acc'] = None + sqa_results['correct'] = None + sqa_results['count'] = None + sqa_results['results'] = {} + sqa_results['outputs'] = {} + + for prob_id, prob in split_problems.items(): + if prob_id not in our_predictions: + assert False + if prob_id not in gpt4_predictions: + assert False + our_pred = our_predictions[prob_id]['text'] + gpt4_pred = gpt4_predictions[prob_id] + if prob_id not in requery_predictions: + results['missing_requery'] += 1 + requery_pred = "MISSING" + else: + requery_pred = requery_predictions[prob_id]['text'] + + pattern = re.compile(r'The answer is ([A-Z]).') + our_res = pattern.findall(our_pred) + if len(our_res) == 1: + our_answer = our_res[0] # 'A', 'B', ... + else: + our_answer = "FAILED" + + requery_res = pattern.findall(requery_pred) + if len(requery_res) == 1: + requery_answer = requery_res[0] # 'A', 'B', ... + else: + requery_answer = "FAILED" + + gpt4_res = pattern.findall(gpt4_pred) + if len(gpt4_res) == 1: + gpt4_answer = gpt4_res[0] # 'A', 'B', ... + else: + gpt4_answer = "FAILED" + + our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options) + gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options) + requery_pred_idx = get_pred_idx(requery_answer, prob['choices'], args.options) + + results['total'] += 1 + + if gpt4_answer == 'FAILED': + results['gpt4_failed'] += 1 + if gpt4_pred_idx == prob['answer']: + results['gpt4_correct'] += 1 + if our_pred_idx == prob['answer']: + results['gpt4_ourvisual_correct'] += 1 + elif gpt4_pred_idx == prob['answer']: + results['gpt4_correct'] += 1 + results['gpt4_ourvisual_correct'] += 1 + + if our_pred_idx == prob['answer']: + results['our_correct'] += 1 + + if requery_answer == 'FAILED': + sqa_results['results'][prob_id] = our_pred_idx + if our_pred_idx == prob['answer']: + results['requery_correct'] += 1 + else: + sqa_results['results'][prob_id] = requery_pred_idx + if requery_pred_idx == prob['answer']: + results['requery_correct'] += 1 + else: + print(f""" +Question ({args.options[prob['answer']]}): {our_predictions[prob_id]['prompt']} +Our ({our_answer}): {our_pred} +GPT-4 ({gpt4_answer}): {gpt4_pred} +Requery ({requery_answer}): {requery_pred} +print("=====================================") +""") + + if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']: + results['correct_upperbound'] += 1 + + total = results['total'] + print(f'Total: {total}, Our-Correct: {results["our_correct"]}, Accuracy: {results["our_correct"] / total * 100:.2f}%') + print(f'Total: {total}, GPT-4-Correct: {results["gpt4_correct"]}, Accuracy: {results["gpt4_correct"] / total * 100:.2f}%') + print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%') + print(f'Total: {total}, GPT-4-OursVisual-Correct: {results["gpt4_ourvisual_correct"]}, Accuracy: {results["gpt4_ourvisual_correct"] / total * 100:.2f}%') + print(f'Total: {total}, Requery-Correct: {results["requery_correct"]}, Accuracy: {results["requery_correct"] / total * 100:.2f}%') + print(f'Total: {total}, Correct upper: {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%') + + sqa_results['acc'] = results["requery_correct"] / total * 100 + sqa_results['correct'] = results["requery_correct"] + sqa_results['count'] = total + + with open(args.output_result, 'w') as f: + json.dump(sqa_results, f, indent=2) + diff --git a/llava/eval/eval_textvqa.py b/llava/eval/eval_textvqa.py new file mode 100644 index 0000000..468f4bb --- /dev/null +++ b/llava/eval/eval_textvqa.py @@ -0,0 +1,65 @@ +import os +import argparse +import json +import re + +from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--annotation-file', type=str) + parser.add_argument('--result-file', type=str) + parser.add_argument('--result-dir', type=str) + return parser.parse_args() + + +def prompt_processor(prompt): + if prompt.startswith('OCR tokens: '): + pattern = r"Question: (.*?) Short answer:" + match = re.search(pattern, prompt, re.DOTALL) + question = match.group(1) + elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3: + if prompt.startswith('Reference OCR token:'): + question = prompt.split('\n')[1] + else: + question = prompt.split('\n')[0] + elif len(prompt.split('\n')) == 2: + question = prompt.split('\n')[0] + else: + assert False + + return question.lower() + + +def eval_single(annotation_file, result_file): + experiment_name = os.path.splitext(os.path.basename(result_file))[0] + print(experiment_name) + annotations = json.load(open(annotation_file))['data'] + annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations} + results = [json.loads(line) for line in open(result_file)] + + pred_list = [] + for result in results: + annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))] + pred_list.append({ + "pred_answer": result['text'], + "gt_answers": annotation['answers'], + }) + + evaluator = TextVQAAccuracyEvaluator() + print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list))) + + +if __name__ == "__main__": + args = get_args() + + if args.result_file is not None: + eval_single(args.annotation_file, args.result_file) + + if args.result_dir is not None: + for result_file in sorted(os.listdir(args.result_dir)): + if not result_file.endswith('.jsonl'): + print(f'Skipping {result_file}') + continue + eval_single(args.annotation_file, os.path.join(args.result_dir, result_file)) diff --git a/llava/eval/generate_webpage_data_from_table.py b/llava/eval/generate_webpage_data_from_table.py new file mode 100644 index 0000000..9260225 --- /dev/null +++ b/llava/eval/generate_webpage_data_from_table.py @@ -0,0 +1,111 @@ +"""Generate json file for webpage.""" +import json +import os +import re + +# models = ['llama', 'alpaca', 'gpt35', 'bard'] +models = ['vicuna'] + + +def read_jsonl(path: str, key: str=None): + data = [] + with open(os.path.expanduser(path)) as f: + for line in f: + if not line: + continue + data.append(json.loads(line)) + if key is not None: + data.sort(key=lambda x: x[key]) + data = {item[key]: item for item in data} + return data + + +def trim_hanging_lines(s: str, n: int) -> str: + s = s.strip() + for _ in range(n): + s = s.split('\n', 1)[1].strip() + return s + + +if __name__ == '__main__': + questions = read_jsonl('table/question.jsonl', key='question_id') + + # alpaca_answers = read_jsonl('table/answer/answer_alpaca-13b.jsonl', key='question_id') + # bard_answers = read_jsonl('table/answer/answer_bard.jsonl', key='question_id') + # gpt35_answers = read_jsonl('table/answer/answer_gpt35.jsonl', key='question_id') + # llama_answers = read_jsonl('table/answer/answer_llama-13b.jsonl', key='question_id') + vicuna_answers = read_jsonl('table/answer/answer_vicuna-13b.jsonl', key='question_id') + ours_answers = read_jsonl('table/results/llama-13b-hf-alpaca.jsonl', key='question_id') + + review_vicuna = read_jsonl('table/review/review_vicuna-13b_llama-13b-hf-alpaca.jsonl', key='question_id') + # review_alpaca = read_jsonl('table/review/review_alpaca-13b_vicuna-13b.jsonl', key='question_id') + # review_bard = read_jsonl('table/review/review_bard_vicuna-13b.jsonl', key='question_id') + # review_gpt35 = read_jsonl('table/review/review_gpt35_vicuna-13b.jsonl', key='question_id') + # review_llama = read_jsonl('table/review/review_llama-13b_vicuna-13b.jsonl', key='question_id') + + records = [] + for qid in questions.keys(): + r = { + 'id': qid, + 'category': questions[qid]['category'], + 'question': questions[qid]['text'], + 'answers': { + # 'alpaca': alpaca_answers[qid]['text'], + # 'llama': llama_answers[qid]['text'], + # 'bard': bard_answers[qid]['text'], + # 'gpt35': gpt35_answers[qid]['text'], + 'vicuna': vicuna_answers[qid]['text'], + 'ours': ours_answers[qid]['text'], + }, + 'evaluations': { + # 'alpaca': review_alpaca[qid]['text'], + # 'llama': review_llama[qid]['text'], + # 'bard': review_bard[qid]['text'], + 'vicuna': review_vicuna[qid]['content'], + # 'gpt35': review_gpt35[qid]['text'], + }, + 'scores': { + 'vicuna': review_vicuna[qid]['tuple'], + # 'alpaca': review_alpaca[qid]['score'], + # 'llama': review_llama[qid]['score'], + # 'bard': review_bard[qid]['score'], + # 'gpt35': review_gpt35[qid]['score'], + }, + } + + # cleanup data + cleaned_evals = {} + for k, v in r['evaluations'].items(): + v = v.strip() + lines = v.split('\n') + # trim the first line if it's a pair of numbers + if re.match(r'\d+[, ]+\d+', lines[0]): + lines = lines[1:] + v = '\n'.join(lines) + cleaned_evals[k] = v.replace('Assistant 1', "**Assistant 1**").replace('Assistant 2', '**Assistant 2**') + + r['evaluations'] = cleaned_evals + records.append(r) + + # Reorder the records, this is optional + for r in records: + if r['id'] <= 20: + r['id'] += 60 + else: + r['id'] -= 20 + for r in records: + if r['id'] <= 50: + r['id'] += 10 + elif 50 < r['id'] <= 60: + r['id'] -= 50 + for r in records: + if r['id'] == 7: + r['id'] = 1 + elif r['id'] < 7: + r['id'] += 1 + + records.sort(key=lambda x: x['id']) + + # Write to file + with open('webpage/data.json', 'w') as f: + json.dump({'questions': records, 'models': models}, f, indent=2) diff --git a/llava/eval/m4c_evaluator.py b/llava/eval/m4c_evaluator.py new file mode 100644 index 0000000..e30e958 --- /dev/null +++ b/llava/eval/m4c_evaluator.py @@ -0,0 +1,334 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import re + +from tqdm import tqdm + + +class EvalAIAnswerProcessor: + """ + Processes an answer similar to Eval AI + copied from + https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897 + """ + + CONTRACTIONS = { + "aint": "ain't", + "arent": "aren't", + "cant": "can't", + "couldve": "could've", + "couldnt": "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + "didnt": "didn't", + "doesnt": "doesn't", + "dont": "don't", + "hadnt": "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + "hasnt": "hasn't", + "havent": "haven't", + "hed": "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + "hes": "he's", + "howd": "how'd", + "howll": "how'll", + "hows": "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + "Im": "I'm", + "Ive": "I've", + "isnt": "isn't", + "itd": "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + "itll": "it'll", + "let's": "let's", + "maam": "ma'am", + "mightnt": "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + "mightve": "might've", + "mustnt": "mustn't", + "mustve": "must've", + "neednt": "needn't", + "notve": "not've", + "oclock": "o'clock", + "oughtnt": "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + "shant": "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + "shouldve": "should've", + "shouldnt": "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": "somebodyd", + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + "somebodyll": "somebody'll", + "somebodys": "somebody's", + "someoned": "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + "someonell": "someone'll", + "someones": "someone's", + "somethingd": "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + "somethingll": "something'll", + "thats": "that's", + "thered": "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + "therere": "there're", + "theres": "there's", + "theyd": "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + "theyll": "they'll", + "theyre": "they're", + "theyve": "they've", + "twas": "'twas", + "wasnt": "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + "weve": "we've", + "werent": "weren't", + "whatll": "what'll", + "whatre": "what're", + "whats": "what's", + "whatve": "what've", + "whens": "when's", + "whered": "where'd", + "wheres": "where's", + "whereve": "where've", + "whod": "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + "wholl": "who'll", + "whos": "who's", + "whove": "who've", + "whyll": "why'll", + "whyre": "why're", + "whys": "why's", + "wont": "won't", + "wouldve": "would've", + "wouldnt": "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + "yall": "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + "youd": "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + "youll": "you'll", + "youre": "you're", + "youve": "you've", + } + + NUMBER_MAP = { + "none": "0", + "zero": "0", + "one": "1", + "two": "2", + "three": "3", + "four": "4", + "five": "5", + "six": "6", + "seven": "7", + "eight": "8", + "nine": "9", + "ten": "10", + } + ARTICLES = ["a", "an", "the"] + PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)") + COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)") + PUNCTUATIONS = [ + ";", + r"/", + "[", + "]", + '"', + "{", + "}", + "(", + ")", + "=", + "+", + "\\", + "_", + "-", + ">", + "<", + "@", + "`", + ",", + "?", + "!", + ] + + def __init__(self, *args, **kwargs): + pass + + def word_tokenize(self, word): + word = word.lower() + word = word.replace(",", "").replace("?", "").replace("'s", " 's") + return word.strip() + + def process_punctuation(self, in_text): + out_text = in_text + for p in self.PUNCTUATIONS: + if (p + " " in in_text or " " + p in in_text) or ( + re.search(self.COMMA_STRIP, in_text) is not None + ): + out_text = out_text.replace(p, "") + else: + out_text = out_text.replace(p, " ") + out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE) + return out_text + + def process_digit_article(self, in_text): + out_text = [] + temp_text = in_text.lower().split() + for word in temp_text: + word = self.NUMBER_MAP.setdefault(word, word) + if word not in self.ARTICLES: + out_text.append(word) + else: + pass + for word_id, word in enumerate(out_text): + if word in self.CONTRACTIONS: + out_text[word_id] = self.CONTRACTIONS[word] + out_text = " ".join(out_text) + return out_text + + def __call__(self, item): + item = self.word_tokenize(item) + item = item.replace("\n", " ").replace("\t", " ").strip() + item = self.process_punctuation(item) + item = self.process_digit_article(item) + return item + + +class TextVQAAccuracyEvaluator: + def __init__(self): + self.answer_processor = EvalAIAnswerProcessor() + + def _compute_answer_scores(self, raw_answers): + """ + compute the accuracy (soft score) of human answers + """ + answers = [self.answer_processor(a) for a in raw_answers] + assert len(answers) == 10 + gt_answers = list(enumerate(answers)) + unique_answers = set(answers) + unique_answer_scores = {} + + for unique_answer in unique_answers: + accs = [] + for gt_answer in gt_answers: + other_answers = [item for item in gt_answers if item != gt_answer] + matching_answers = [ + item for item in other_answers if item[1] == unique_answer + ] + acc = min(1, float(len(matching_answers)) / 3) + accs.append(acc) + unique_answer_scores[unique_answer] = sum(accs) / len(accs) + + return unique_answer_scores + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in tqdm(pred_list): + pred_answer = self.answer_processor(entry["pred_answer"]) + unique_answer_scores = self._compute_answer_scores(entry["gt_answers"]) + score = unique_answer_scores.get(pred_answer, 0.0) + pred_scores.append(score) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + +class STVQAAccuracyEvaluator: + def __init__(self): + self.answer_processor = EvalAIAnswerProcessor() + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in pred_list: + pred_answer = self.answer_processor(entry["pred_answer"]) + gts = [self.answer_processor(a) for a in entry["gt_answers"]] + score = 1.0 if pred_answer in gts else 0.0 + pred_scores.append(score) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + +class STVQAANLSEvaluator: + def __init__(self): + import editdistance # install with `pip install editdistance` + + self.get_edit_distance = editdistance.eval + + def get_anls(self, s1, s2): + s1 = s1.lower().strip() + s2 = s2.lower().strip() + iou = 1 - self.get_edit_distance(s1, s2) / max(len(s1), len(s2)) + anls = iou if iou >= 0.5 else 0.0 + return anls + + def eval_pred_list(self, pred_list): + pred_scores = [] + for entry in pred_list: + anls = max( + self.get_anls(entry["pred_answer"], gt) for gt in entry["gt_answers"] + ) + pred_scores.append(anls) + + accuracy = sum(pred_scores) / len(pred_scores) + return accuracy + + +class TextCapsBleu4Evaluator: + def __init__(self): + # The following script requires Java 1.8.0 and pycocotools installed. + # The pycocoevalcap can be installed with pip as + # pip install git+https://github.com/ronghanghu/coco-caption.git@python23 + # Original pycocoevalcap code is at https://github.com/tylin/coco-caption + # but has no python3 support yet. + try: + from pycocoevalcap.bleu.bleu import Bleu + from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer + except ModuleNotFoundError: + print( + "Please install pycocoevalcap module using " + "pip install git+https://github.com/ronghanghu/coco-caption.git@python23" # noqa + ) + raise + + self.tokenizer = PTBTokenizer() + self.scorer = Bleu(4) + + def eval_pred_list(self, pred_list): + # Create reference and hypotheses captions. + gts = {} + res = {} + for idx, entry in enumerate(pred_list): + gts[idx] = [{"caption": a} for a in entry["gt_answers"]] + res[idx] = [{"caption": entry["pred_answer"]}] + + gts = self.tokenizer.tokenize(gts) + res = self.tokenizer.tokenize(res) + score, _ = self.scorer.compute_score(gts, res) + + bleu4 = score[3] # score is (Bleu-1, Bleu-2, Bleu-3, Bleu-4) + return bleu4 diff --git a/llava/eval/model_qa.py b/llava/eval/model_qa.py new file mode 100644 index 0000000..6c8c113 --- /dev/null +++ b/llava/eval/model_qa.py @@ -0,0 +1,85 @@ +import argparse +from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria +import torch +import os +import json +from tqdm import tqdm +import shortuuid + +from llava.conversation import default_conversation +from llava.utils import disable_torch_init + + +# new stopping implementation +class KeywordsStoppingCriteria(StoppingCriteria): + def __init__(self, keywords, tokenizer, input_ids): + self.keywords = keywords + self.tokenizer = tokenizer + self.start_len = None + self.input_ids = input_ids + + def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + if self.start_len is None: + self.start_len = self.input_ids.shape[1] + else: + outputs = self.tokenizer.batch_decode(output_ids[:, self.start_len:], skip_special_tokens=True)[0] + for keyword in self.keywords: + if keyword in outputs: + return True + return False + + +@torch.inference_mode() +def eval_model(model_name, questions_file, answers_file): + # Model + disable_torch_init() + model_name = os.path.expanduser(model_name) + tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) + model = AutoModelForCausalLM.from_pretrained(model_name, + torch_dtype=torch.float16).cuda() + + + ques_file = open(os.path.expanduser(questions_file), "r") + ans_file = open(os.path.expanduser(answers_file), "w") + for i, line in enumerate(tqdm(ques_file)): + idx = json.loads(line)["question_id"] + qs = json.loads(line)["text"] + cat = json.loads(line)["category"] + conv = default_conversation.copy() + conv.append_message(conv.roles[0], qs) + prompt = conv.get_prompt() + inputs = tokenizer([prompt]) + input_ids = torch.as_tensor(inputs.input_ids).cuda() + stopping_criteria = KeywordsStoppingCriteria([conv.sep], tokenizer, input_ids) + output_ids = model.generate( + input_ids, + do_sample=True, + use_cache=True, + temperature=0.7, + max_new_tokens=1024, + stopping_criteria=[stopping_criteria]) + outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] + try: + index = outputs.index(conv.sep, len(prompt)) + except ValueError: + outputs += conv.sep + index = outputs.index(conv.sep, len(prompt)) + + outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip() + ans_id = shortuuid.uuid() + ans_file.write(json.dumps({"question_id": idx, + "text": outputs, + "answer_id": ans_id, + "model_id": model_name, + "metadata": {}}) + "\n") + ans_file.flush() + ans_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-name", type=str, default="facebook/opt-350m") + parser.add_argument("--question-file", type=str, default="tables/question.jsonl") + parser.add_argument("--answers-file", type=str, default="answer.jsonl") + args = parser.parse_args() + + eval_model(args.model_name, args.question_file, args.answers_file) diff --git a/llava/eval/model_vqa.py b/llava/eval/model_vqa.py new file mode 100644 index 0000000..ff9a122 --- /dev/null +++ b/llava/eval/model_vqa.py @@ -0,0 +1,112 @@ +import argparse +import torch +import os +import json +from tqdm import tqdm +import shortuuid + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_X_token, get_model_name_from_path, KeywordsStoppingCriteria + +from PIL import Image +import math + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + + +def eval_model(args): + # Model + disable_torch_init() + model_path = os.path.expanduser(args.model_path) + model_name = get_model_name_from_path(model_path) + tokenizer, model, processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) + + questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")] + questions = get_chunk(questions, args.num_chunks, args.chunk_idx) + answers_file = os.path.expanduser(args.answers_file) + os.makedirs(os.path.dirname(answers_file), exist_ok=True) + ans_file = open(answers_file, "w") + for line in tqdm(questions): + idx = line["question_id"] + image_file = line["image"] + qs = line["text"] + cur_prompt = qs + if model.config.mm_use_x_start_end: + qs = DEFAULT_X_START_TOKEN['IMAGE'] + DEFAULT_X_TOKEN['IMAGE'] + DEFAULT_X_END_TOKEN['IMAGE'] + '\n' + qs + else: + qs = DEFAULT_X_TOKEN['IMAGE'] + '\n' + qs + + conv = conv_templates[args.conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['IMAGE'], return_tensors='pt').unsqueeze(0).cuda() + + image = Image.open(os.path.join(args.image_folder, image_file)).convert('RGB') + image_tensor = processor['image'].preprocess(image, return_tensors='pt')['pixel_values'][0] + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[[image_tensor.half().cuda()], ['image']], + do_sample=True if args.temperature > 0 else False, + temperature=args.temperature, + top_p=args.top_p, + num_beams=args.num_beams, + # no_repeat_ngram_size=3, + max_new_tokens=1024, + use_cache=True) + + input_token_len = input_ids.shape[1] + n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() + if n_diff_input_output > 0: + print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') + outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] + outputs = outputs.strip() + if outputs.endswith(stop_str): + outputs = outputs[:-len(stop_str)] + outputs = outputs.strip() + + ans_id = shortuuid.uuid() + ans_file.write(json.dumps({"question_id": idx, + "prompt": cur_prompt, + "text": outputs, + "answer_id": ans_id, + "model_id": model_name, + "metadata": {}}) + "\n") + ans_file.flush() + ans_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-folder", type=str, default="") + parser.add_argument("--question-file", type=str, default="tables/question.jsonl") + parser.add_argument("--answers-file", type=str, default="answer.jsonl") + parser.add_argument("--conv-mode", type=str, default="llava_v1") + parser.add_argument("--num-chunks", type=int, default=1) + parser.add_argument("--chunk-idx", type=int, default=0) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--top_p", type=float, default=None) + parser.add_argument("--num_beams", type=int, default=1) + args = parser.parse_args() + + eval_model(args) diff --git a/llava/eval/model_vqa_loader.py b/llava/eval/model_vqa_loader.py new file mode 100644 index 0000000..e767097 --- /dev/null +++ b/llava/eval/model_vqa_loader.py @@ -0,0 +1,148 @@ +import argparse +import torch +import os +import json +from tqdm import tqdm +import shortuuid + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_X_token, process_images, get_model_name_from_path +from torch.utils.data import Dataset, DataLoader + +from PIL import Image +import math + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + + +# Custom dataset class +class CustomDataset(Dataset): + def __init__(self, questions, image_folder, tokenizer, image_processor, model_config): + self.questions = questions + self.image_folder = image_folder + self.tokenizer = tokenizer + self.image_processor = image_processor + self.model_config = model_config + + def __getitem__(self, index): + line = self.questions[index] + image_file = line["image"] + qs = line["text"] + # if self.model_config.mm_use_im_start_end: + # qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs + # else: + # qs = DEFAULT_IMAGE_TOKEN + '\n' + qs + + if self.model_config.mm_use_x_start_end: + qs = DEFAULT_X_START_TOKEN['IMAGE'] + DEFAULT_X_TOKEN['IMAGE'] + DEFAULT_X_END_TOKEN['IMAGE'] + '\n' + qs + else: + qs = DEFAULT_X_TOKEN['IMAGE'] + '\n' + qs + conv = conv_templates[args.conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB') + image_tensor = process_images([image], self.image_processor, self.model_config)[0] + + input_ids = tokenizer_X_token(prompt, self.tokenizer, X_TOKEN_INDEX['IMAGE'], return_tensors='pt') + + return input_ids, image_tensor + + def __len__(self): + return len(self.questions) + + +# DataLoader +def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4): + assert batch_size == 1, "batch_size must be 1" + dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config) + data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) + return data_loader + + +def eval_model(args): + # Model + disable_torch_init() + model_path = os.path.expanduser(args.model_path) + model_name = get_model_name_from_path(model_path) + tokenizer, model, processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) + + questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")] + questions = get_chunk(questions, args.num_chunks, args.chunk_idx) + answers_file = os.path.expanduser(args.answers_file) + os.makedirs(os.path.dirname(answers_file), exist_ok=True) + ans_file = open(answers_file, "w") + + if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode: + args.conv_mode = args.conv_mode + '_mmtag' + print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.') + + data_loader = create_data_loader(questions, args.image_folder, tokenizer, processor['image'], model.config) + + for (input_ids, image_tensor), line in tqdm(zip(data_loader, questions), total=len(questions)): + idx = line["question_id"] + cur_prompt = line["text"] + + stop_str = conv_templates[args.conv_mode].sep if conv_templates[args.conv_mode].sep_style != SeparatorStyle.TWO else conv_templates[args.conv_mode].sep2 + input_ids = input_ids.to(device='cuda', non_blocking=True) + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[[image_tensor[0].to(dtype=torch.float16, device='cuda', non_blocking=True)], ['image']], + do_sample=True if args.temperature > 0 else False, + temperature=args.temperature, + top_p=args.top_p, + num_beams=args.num_beams, + max_new_tokens=128, + use_cache=True) + + input_token_len = input_ids.shape[1] + n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() + if n_diff_input_output > 0: + print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') + outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] + outputs = outputs.strip() + if outputs.endswith(stop_str): + outputs = outputs[:-len(stop_str)] + outputs = outputs.strip() + + ans_id = shortuuid.uuid() + ans_file.write(json.dumps({"question_id": idx, + "prompt": cur_prompt, + "text": outputs, + "answer_id": ans_id, + "model_id": model_name, + "metadata": {}}) + "\n") + # ans_file.flush() + ans_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-folder", type=str, default="") + parser.add_argument("--question-file", type=str, default="tables/question.jsonl") + parser.add_argument("--answers-file", type=str, default="answer.jsonl") + parser.add_argument("--conv-mode", type=str, default="llava_v1") + parser.add_argument("--num-chunks", type=int, default=1) + parser.add_argument("--chunk-idx", type=int, default=0) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--top_p", type=float, default=None) + parser.add_argument("--num_beams", type=int, default=1) + args = parser.parse_args() + + eval_model(args) diff --git a/llava/eval/model_vqa_mmbench.py b/llava/eval/model_vqa_mmbench.py new file mode 100644 index 0000000..4e7e753 --- /dev/null +++ b/llava/eval/model_vqa_mmbench.py @@ -0,0 +1,175 @@ +import argparse +import torch +import os +import json +import pandas as pd +from tqdm import tqdm +import shortuuid + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_X_token, process_images, load_image_from_base64, get_model_name_from_path + +from PIL import Image +import math + + +all_options = ['A', 'B', 'C', 'D'] + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + + +def is_none(value): + if value is None: + return True + if type(value) is float and math.isnan(value): + return True + if type(value) is str and value.lower() == 'nan': + return True + if type(value) is str and value.lower() == 'none': + return True + return False + +def get_options(row, options): + parsed_options = [] + for option in options: + option_value = row[option] + if is_none(option_value): + break + parsed_options.append(option_value) + return parsed_options + + +def eval_model(args): + # Model + disable_torch_init() + model_path = os.path.expanduser(args.model_path) + model_name = get_model_name_from_path(model_path) + tokenizer, model, processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) + + questions = pd.read_table(os.path.expanduser(args.question_file)) + questions = get_chunk(questions, args.num_chunks, args.chunk_idx) + answers_file = os.path.expanduser(args.answers_file) + os.makedirs(os.path.dirname(answers_file), exist_ok=True) + ans_file = open(answers_file, "w") + + if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode: + args.conv_mode = args.conv_mode + '_mmtag' + print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.') + + for index, row in tqdm(questions.iterrows(), total=len(questions)): + options = get_options(row, all_options) + cur_option_char = all_options[:len(options)] + + if args.all_rounds: + num_rounds = len(options) + else: + num_rounds = 1 + + for round_idx in range(num_rounds): + idx = row['index'] + question = row['question'] + hint = row['hint'] + image = load_image_from_base64(row['image']) + if not is_none(hint): + question = hint + '\n' + question + for option_char, option in zip(all_options[:len(options)], options): + question = question + '\n' + option_char + '. ' + option + qs = cur_prompt = question + # if model.config.mm_use_im_start_end: + # qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs + # else: + # qs = DEFAULT_IMAGE_TOKEN + '\n' + qs + if model.config.mm_use_x_start_end: + qs = DEFAULT_X_START_TOKEN['IMAGE'] + DEFAULT_X_TOKEN['IMAGE'] + DEFAULT_X_END_TOKEN[ + 'IMAGE'] + '\n' + qs + else: + qs = DEFAULT_X_TOKEN['IMAGE'] + '\n' + qs + + if args.single_pred_prompt: + if args.lang == 'cn': + qs = qs + '\n' + "请直接回答选项字母。" + else: + qs = qs + '\n' + "Answer with the option's letter from the given choices directly." + + conv = conv_templates[args.conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['IMAGE'], return_tensors='pt').unsqueeze(0).cuda() + + image_tensor = process_images([image], processor['image'], model.config)[0] + # image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[[image_tensor.half().cuda()], ['image']], + do_sample=True if args.temperature > 0 else False, + temperature=args.temperature, + top_p=args.top_p, + num_beams=args.num_beams, + # no_repeat_ngram_size=3, + max_new_tokens=1024, + use_cache=True) + + input_token_len = input_ids.shape[1] + n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() + if n_diff_input_output > 0: + print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') + outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] + outputs = outputs.strip() + if outputs.endswith(stop_str): + outputs = outputs[:-len(stop_str)] + outputs = outputs.strip() + + ans_id = shortuuid.uuid() + ans_file.write(json.dumps({"question_id": idx, + "round_id": round_idx, + "prompt": cur_prompt, + "text": outputs, + "options": options, + "option_char": cur_option_char, + "answer_id": ans_id, + "model_id": model_name, + "metadata": {}}) + "\n") + ans_file.flush() + + # rotate options + options = options[1:] + options[:1] + cur_option_char = cur_option_char[1:] + cur_option_char[:1] + ans_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-folder", type=str, default="") + parser.add_argument("--question-file", type=str, default="tables/question.jsonl") + parser.add_argument("--answers-file", type=str, default="answer.jsonl") + parser.add_argument("--conv-mode", type=str, default="llava_v1") + parser.add_argument("--num-chunks", type=int, default=1) + parser.add_argument("--chunk-idx", type=int, default=0) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--top_p", type=float, default=None) + parser.add_argument("--num_beams", type=int, default=1) + parser.add_argument("--all-rounds", action="store_true") + parser.add_argument("--single-pred-prompt", action="store_true") + parser.add_argument("--lang", type=str, default="en") + args = parser.parse_args() + + eval_model(args) diff --git a/llava/eval/model_vqa_science.py b/llava/eval/model_vqa_science.py new file mode 100644 index 0000000..3815ffe --- /dev/null +++ b/llava/eval/model_vqa_science.py @@ -0,0 +1,150 @@ +import argparse +import torch +import os +import json +from tqdm import tqdm +import shortuuid + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_X_token, get_model_name_from_path, KeywordsStoppingCriteria + +from PIL import Image +import math + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + + +def eval_model(args): + # Model + disable_torch_init() + model_path = os.path.expanduser(args.model_path) + model_name = get_model_name_from_path(model_path) + tokenizer, model, processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) + print(model) + print(processor) + print(model_path, model_name) + questions = json.load(open(os.path.expanduser(args.question_file), "r")) + questions = get_chunk(questions, args.num_chunks, args.chunk_idx) + answers_file = os.path.expanduser(args.answers_file) + os.makedirs(os.path.dirname(answers_file), exist_ok=True) + ans_file = open(answers_file, "w") + for i, line in enumerate(tqdm(questions)): + idx = line["id"] + question = line['conversations'][0] + qs = question['value'].replace('', '').strip() + cur_prompt = qs + + if 'image' in line: + image_file = line["image"] + image = Image.open(os.path.join(args.image_folder, image_file)) + image_tensor = processor['image'].preprocess(image, return_tensors='pt')['pixel_values'][0] + # images = image_tensor.unsqueeze(0).half().cuda() ########## + images = image_tensor.half().cuda() + if getattr(model.config, 'mm_use_x_start_end', False): + qs = DEFAULT_X_START_TOKEN['IMAGE'] + DEFAULT_X_TOKEN['IMAGE'] + DEFAULT_X_END_TOKEN['IMAGE'] + '\n' + qs + else: + qs = DEFAULT_X_TOKEN['IMAGE'] + '\n' + qs + cur_prompt = '' + '\n' + cur_prompt + else: + images = None + + if args.single_pred_prompt: + qs = qs + '\n' + "Answer with the option's letter from the given choices directly." + cur_prompt = cur_prompt + '\n' + "Answer with the option's letter from the given choices directly." + + conv = conv_templates[args.conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['IMAGE'], return_tensors='pt').unsqueeze(0).cuda() + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = [KeywordsStoppingCriteria(keywords, tokenizer, input_ids)] if conv.version == "v0" else None + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[[images], ['image']], + do_sample=True if args.temperature > 0 else False, + temperature=args.temperature, + max_new_tokens=1024, + use_cache=True, + stopping_criteria=stopping_criteria, + ) + + input_token_len = input_ids.shape[1] + n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() + if n_diff_input_output > 0: + print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') + outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] + outputs = outputs.strip() + if outputs.endswith(stop_str): + outputs = outputs[:-len(stop_str)] + outputs = outputs.strip() + + # prompt for answer + if args.answer_prompter: + outputs_reasoning = outputs + input_ids = tokenizer_X_token(prompt + outputs_reasoning + ' ###\nANSWER:', tokenizer, X_TOKEN_INDEX['IMAGE'], return_tensors='pt').unsqueeze(0).cuda() + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[[images], ['image']], + do_sample=True if args.temperature > 0 else False, + temperature=args.temperature, + max_new_tokens=64, + use_cache=True, + stopping_criteria=[stopping_criteria]) + + input_token_len = input_ids.shape[1] + n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() + if n_diff_input_output > 0: + print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') + outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] + outputs = outputs.strip() + if outputs.endswith(stop_str): + outputs = outputs[:-len(stop_str)] + outputs = outputs.strip() + outputs = outputs_reasoning + '\n The answer is ' + outputs + + ans_id = shortuuid.uuid() + ans_file.write(json.dumps({"question_id": idx, + "prompt": cur_prompt, + "text": outputs, + "answer_id": ans_id, + "model_id": model_name, + "metadata": {}}) + "\n") + ans_file.flush() + ans_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-folder", type=str, default="") + parser.add_argument("--question-file", type=str, default="tables/question.json") + parser.add_argument("--answers-file", type=str, default="answer.jsonl") + parser.add_argument("--conv-mode", type=str, default="llava_v0") + parser.add_argument("--num-chunks", type=int, default=1) + parser.add_argument("--chunk-idx", type=int, default=0) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--answer-prompter", action="store_true") + parser.add_argument("--single-pred-prompt", action="store_true") + args = parser.parse_args() + + eval_model(args) diff --git a/llava/eval/qa_baseline_gpt35.py b/llava/eval/qa_baseline_gpt35.py new file mode 100644 index 0000000..babab6e --- /dev/null +++ b/llava/eval/qa_baseline_gpt35.py @@ -0,0 +1,74 @@ +"""Generate answers with GPT-3.5""" +# Note: you need to be using OpenAI Python v0.27.0 for the code below to work +import argparse +import json +import os +import time +import concurrent.futures + +import openai +import tqdm +import shortuuid + +MODEL = 'gpt-3.5-turbo' +MODEL_ID = 'gpt-3.5-turbo:20230327' + +def get_answer(question_id: int, question: str, max_tokens: int): + ans = { + 'answer_id': shortuuid.uuid(), + 'question_id': question_id, + 'model_id': MODEL_ID, + } + for _ in range(3): + try: + response = openai.ChatCompletion.create( + model=MODEL, + messages=[{ + 'role': 'system', + 'content': 'You are a helpful assistant.' + }, { + 'role': 'user', + 'content': question, + }], + max_tokens=max_tokens, + ) + ans['text'] = response['choices'][0]['message']['content'] + return ans + except Exception as e: + print('[ERROR]', e) + ans['text'] = '#ERROR#' + time.sleep(1) + return ans + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='ChatGPT answer generation.') + parser.add_argument('-q', '--question') + parser.add_argument('-o', '--output') + parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') + args = parser.parse_args() + + questions_dict = {} + with open(os.path.expanduser(args.question)) as f: + for line in f: + if not line: + continue + q = json.loads(line) + questions_dict[q['question_id']] = q['text'] + + answers = [] + + with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor: + futures = [] + for qid, question in questions_dict.items(): + future = executor.submit(get_answer, qid, question, args.max_tokens) + futures.append(future) + + for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)): + answers.append(future.result()) + + answers.sort(key=lambda x: x['question_id']) + + with open(os.path.expanduser(args.output), 'w') as f: + table = [json.dumps(ans) for ans in answers] + f.write('\n'.join(table)) diff --git a/llava/eval/run_llava.py b/llava/eval/run_llava.py new file mode 100644 index 0000000..b54f0d6 --- /dev/null +++ b/llava/eval/run_llava.py @@ -0,0 +1,97 @@ +import argparse +import torch + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_X_token, get_model_name_from_path, KeywordsStoppingCriteria + +from PIL import Image + +import requests +from PIL import Image +from io import BytesIO + + +def load_image(image_file): + if image_file.startswith('http') or image_file.startswith('https'): + response = requests.get(image_file) + image = Image.open(BytesIO(response.content)).convert('RGB') + else: + image = Image.open(image_file).convert('RGB') + return image + + +def eval_model(args): + # Model + disable_torch_init() + + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name) + + qs = args.query + if model.config.mm_use_im_start_end: + qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs + else: + qs = DEFAULT_IMAGE_TOKEN + '\n' + qs + + if 'llama-2' in model_name.lower(): + conv_mode = "llava_llama_2" + elif "v1" in model_name.lower(): + conv_mode = "llava_v1" + elif "mpt" in model_name.lower(): + conv_mode = "mpt" + else: + conv_mode = "llava_v0" + + if args.conv_mode is not None and conv_mode != args.conv_mode: + print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode)) + else: + args.conv_mode = conv_mode + + conv = conv_templates[args.conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + image = load_image(args.image_file) + image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().cuda() + + input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=image_tensor, + do_sample=True, + temperature=0.2, + max_new_tokens=1024, + use_cache=True, + stopping_criteria=[stopping_criteria]) + + input_token_len = input_ids.shape[1] + n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() + if n_diff_input_output > 0: + print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') + outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] + outputs = outputs.strip() + if outputs.endswith(stop_str): + outputs = outputs[:-len(stop_str)] + outputs = outputs.strip() + print(outputs) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-file", type=str, required=True) + parser.add_argument("--query", type=str, required=True) + parser.add_argument("--conv-mode", type=str, default=None) + args = parser.parse_args() + + eval_model(args) diff --git a/llava/eval/summarize_gpt_review.py b/llava/eval/summarize_gpt_review.py new file mode 100644 index 0000000..0f796a3 --- /dev/null +++ b/llava/eval/summarize_gpt_review.py @@ -0,0 +1,60 @@ +import json +import os +from collections import defaultdict + +import numpy as np + +import argparse + +def parse_args(): + parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') + parser.add_argument('-d', '--dir', default=None) + parser.add_argument('-v', '--version', default=None) + parser.add_argument('-s', '--select', nargs='*', default=None) + parser.add_argument('-f', '--files', nargs='*', default=[]) + parser.add_argument('-i', '--ignore', nargs='*', default=[]) + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_args() + + if args.ignore is not None: + args.ignore = [int(x) for x in args.ignore] + + if len(args.files) > 0: + review_files = args.files + else: + review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)] + + for review_file in sorted(review_files): + config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '') + if args.select is not None and any(x not in config for x in args.select): + continue + if '0613' in config: + version = '0613' + else: + version = '0314' + if args.version is not None and args.version != version: + continue + scores = defaultdict(list) + print(config) + with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f: + for review_str in f: + review = json.loads(review_str) + if review['question_id'] in args.ignore: + continue + if 'category' in review: + scores[review['category']].append(review['tuple']) + scores['all'].append(review['tuple']) + else: + if 'tuple' in review: + scores['all'].append(review['tuple']) + else: + scores['all'].append(review['score']) + for k, v in sorted(scores.items()): + stats = np.asarray(v).mean(0).tolist() + stats = [round(x, 3) for x in stats] + # print(k, stats, round(stats[1]/stats[0]*100, 1)) + print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1)) + print('=================================') diff --git a/llava/eval/table/caps_boxes_coco2014_val_80.jsonl b/llava/eval/table/caps_boxes_coco2014_val_80.jsonl new file mode 100644 index 0000000..a0130f3 --- /dev/null +++ b/llava/eval/table/caps_boxes_coco2014_val_80.jsonl @@ -0,0 +1,80 @@ +{"id": "000000296284", "image": "000000296284.jpg", "captions": ["A donut shop is full of different flavors of donuts.", "Fruit flavored donuts lined up in a glass fronted cabinet", "A rack with some doughnuts in a glass case.", "A display case in a bakery filled with donuts.", "An assortment of doughnuts are arranged in a display case."], "instances": [{"category": "donut", "bbox": [0.37, 0.584, 0.504, 0.709]}, {"category": "donut", "bbox": [0.369, 0.22, 0.492, 0.317]}, {"category": "donut", "bbox": [0.471, 0.587, 0.639, 0.706]}, {"category": "donut", "bbox": [0.544, 0.213, 0.679, 0.316]}, {"category": "donut", "bbox": [0.035, 0.22, 0.196, 0.328]}, {"category": "donut", "bbox": [0.054, 0.608, 0.221, 0.711]}, {"category": "donut", "bbox": [0.283, 0.586, 0.429, 0.708]}, {"category": "donut", "bbox": [0.466, 0.226, 0.585, 0.32]}, {"category": "donut", "bbox": [0.28, 0.232, 0.393, 0.322]}, {"category": "donut", "bbox": [0.0, 0.609, 0.097, 0.722]}]} +{"id": "000000151358", "image": "000000151358.jpg", "captions": ["A newspaper that has sunglasses on top of it sitting in front of books.", "an apple sunglasses books and a teddy bear", "A folded newspaper and sunglasses are on a table with an apple, books, and teddy bear behind.", "An apple sitting on a table next to sunglasses and a news paper.", "There are sunglasses laying on the folded newspaper."], "instances": [{"category": "tie", "bbox": [0.258, 0.074, 0.527, 0.589]}, {"category": "apple", "bbox": [0.621, 0.482, 0.853, 0.645]}, {"category": "book", "bbox": [0.154, 0.107, 0.275, 0.59]}, {"category": "book", "bbox": [0.535, 0.09, 0.735, 0.583]}, {"category": "book", "bbox": [0.051, 0.112, 0.159, 0.6]}, {"category": "teddy bear", "bbox": [0.753, 0.084, 1.0, 0.517]}, {"category": "book", "bbox": [0.681, 0.097, 0.796, 0.483]}, {"category": "book", "bbox": [0.443, 0.099, 0.574, 0.588]}, {"category": "book", "bbox": [0.267, 0.337, 0.386, 0.579]}]} +{"id": "000000052312", "image": "000000052312.jpg", "captions": ["The old man literally has a toothbrush mustache.", "An old man with a tooth brush head under his nose, mimicking Hitler", "A man wearing a toothbrush for a moustache.", "A man with the head of a toothbrush under his nose like a mustache", "An elderly man wearing the head of a toothbrush as a moustache."], "instances": [{"category": "toothbrush", "bbox": [0.345, 0.59, 0.594, 0.679]}, {"category": "person", "bbox": [0.0, 0.03, 1.0, 0.99]}]} +{"id": "000000473210", "image": "000000473210.jpg", "captions": ["two people taking apart their wii controllers to replace batteries", "People taking apart video game remote controls on a table", "People handling a couple of remotes taking them apart.", "two sets of hands a wooden table and two controllers", "Two people who are taking apart a video game controller."], "instances": [{"category": "person", "bbox": [0.002, 0.334, 0.453, 0.986]}, {"category": "remote", "bbox": [0.407, 0.207, 0.727, 0.604]}, {"category": "remote", "bbox": [0.088, 0.344, 0.313, 0.547]}, {"category": "laptop", "bbox": [0.001, 0.049, 0.1, 0.197]}, {"category": "person", "bbox": [0.484, 0.254, 0.998, 0.985]}, {"category": "dining table", "bbox": [0.0, 0.003, 1.0, 0.956]}]} +{"id": "000000097131", "image": "000000097131.jpg", "captions": ["A car parked by a parking meter in front of a building.", "A car is sitting parked at a curb in front of a parking meter.", "A black car on the street next to a parking meter.", "A gray car parked in front of two parking meters.", "A black car parked on the side of the road."], "instances": [{"category": "car", "bbox": [0.227, 0.362, 0.946, 0.761]}, {"category": "car", "bbox": [0.793, 0.322, 0.88, 0.4]}, {"category": "car", "bbox": [0.0, 0.447, 0.028, 0.726]}, {"category": "parking meter", "bbox": [0.156, 0.35, 0.186, 0.453]}, {"category": "truck", "bbox": [0.907, 0.331, 1.0, 0.408]}, {"category": "parking meter", "bbox": [0.188, 0.349, 0.218, 0.448]}]} +{"id": "000000543364", "image": "000000543364.jpg", "captions": ["There is a table in the middle of the room.", "A room with a couch, table, lamp and a chaise.", "A living room with couch, chaise, track lighting, and a large window.", "A room with large windows, a couch and a table.", "A living room with lots of furniture and a large window."], "instances": [{"category": "dining table", "bbox": [0.388, 0.644, 0.636, 0.879]}, {"category": "couch", "bbox": [0.194, 0.531, 0.552, 0.777]}, {"category": "couch", "bbox": [0.568, 0.488, 0.907, 0.783]}, {"category": "remote", "bbox": [0.524, 0.651, 0.556, 0.675]}, {"category": "chair", "bbox": [0.661, 0.478, 0.802, 0.604]}]} +{"id": "000000217181", "image": "000000217181.jpg", "captions": ["They are standing next to some stylish motorcycles.", "Three men are standing around looking at sports motorcycles.", "A small group of men are standing around a motorcycle.", "Two men surrounding a blue motorcycle and others", "A few blue motorcycles are parked in a lot."], "instances": [{"category": "car", "bbox": [0.011, 0.177, 0.2, 0.336]}, {"category": "motorcycle", "bbox": [0.032, 0.139, 0.907, 0.982]}, {"category": "motorcycle", "bbox": [0.0, 0.239, 0.148, 0.613]}, {"category": "motorcycle", "bbox": [0.0, 0.301, 0.106, 0.45]}, {"category": "person", "bbox": [0.775, 0.043, 0.93, 0.463]}, {"category": "person", "bbox": [0.717, 0.116, 0.81, 0.509]}, {"category": "person", "bbox": [0.296, 0.008, 0.472, 0.325]}, {"category": "person", "bbox": [0.115, 0.19, 0.164, 0.269]}, {"category": "truck", "bbox": [0.63, 0.227, 0.731, 0.335]}]} +{"id": "000000140289", "image": "000000140289.jpg", "captions": ["Two born bears walking though a forest surrounded by trees.", "Two full grown brown bears in a habitat.", "Two bears are roaming around in the woods.", "Two bears around logs in front of a large rock.", "Two big bears wandering through the woods together"], "instances": [{"category": "bear", "bbox": [0.131, 0.269, 0.375, 0.65]}, {"category": "bear", "bbox": [0.568, 0.193, 0.809, 0.827]}]} +{"id": "000000460149", "image": "000000460149.jpg", "captions": ["A clock hosted on a pole on a pavement next to a building", "Street clock on quiet street with trees and bicycles.", "A tall clock stands on an empty sidewalk.", "A pole that has a clock on the top of it.", "a clock on a short tower and potted plants along the sidewalk"], "instances": [{"category": "potted plant", "bbox": [0.14, 0.71, 0.338, 0.856]}, {"category": "bicycle", "bbox": [0.65, 0.671, 0.766, 0.733]}, {"category": "car", "bbox": [0.38, 0.608, 0.488, 0.656]}, {"category": "clock", "bbox": [0.468, 0.048, 0.699, 0.216]}, {"category": "bicycle", "bbox": [0.669, 0.662, 0.719, 0.67]}, {"category": "car", "bbox": [0.786, 0.625, 0.86, 0.668]}, {"category": "potted plant", "bbox": [0.756, 0.637, 0.819, 0.682]}, {"category": "person", "bbox": [0.942, 0.615, 0.954, 0.641]}, {"category": "bicycle", "bbox": [0.648, 0.68, 0.714, 0.747]}, {"category": "car", "bbox": [0.837, 0.619, 0.88, 0.659]}, {"category": "potted plant", "bbox": [0.017, 0.197, 0.443, 0.686]}]} +{"id": "000000225738", "image": "000000225738.jpg", "captions": ["A group of giraffes standing up in their natural habitat.", "A group of giraffe standing in a grass field.", "A group of four giraffes near the same tree.", "there are four giraffes standing among some dry brush", "A herd of giraffe standing on top of a grass field."], "instances": [{"category": "giraffe", "bbox": [0.648, 0.231, 0.855, 0.915]}, {"category": "giraffe", "bbox": [0.33, 0.136, 0.521, 0.93]}, {"category": "giraffe", "bbox": [0.406, 0.261, 0.515, 1.0]}, {"category": "giraffe", "bbox": [0.347, 0.194, 0.583, 0.922]}]} +{"id": "000000109532", "image": "000000109532.jpg", "captions": ["An adorable husky dog sleeping in a dog bed next to a fan.", "A dark room with a dog sleeping on a dog bed.", "A dog is sleeping in a dark room.", "a large dog laying in a dog bed in a living room", "A dog sleeping on a dog bed in a room."], "instances": [{"category": "dog", "bbox": [0.426, 0.661, 0.582, 0.925]}, {"category": "potted plant", "bbox": [0.603, 0.261, 0.781, 0.613]}, {"category": "chair", "bbox": [0.67, 0.515, 0.899, 0.801]}, {"category": "potted plant", "bbox": [0.671, 0.439, 0.763, 0.612]}, {"category": "chair", "bbox": [0.852, 0.653, 0.948, 0.818]}]} +{"id": "000000118606", "image": "000000118606.jpg", "captions": ["A man riding skis on top of a rail.", "a person riding a pair of skis on a rail", "Someone on a pair of skis on a ramp at the ski slope", "Person with skis in the air above the snow.", "A man performing a trick on a rail while skiing."], "instances": [{"category": "person", "bbox": [0.444, 0.361, 0.537, 0.633]}, {"category": "skis", "bbox": [0.413, 0.554, 0.539, 0.664]}, {"category": "person", "bbox": [0.342, 0.585, 0.352, 0.62]}, {"category": "person", "bbox": [0.439, 0.565, 0.446, 0.58]}]} +{"id": "000000385873", "image": "000000385873.jpg", "captions": ["Three pizzas sitting next to each other in boxes.", "Two smaller pizzas sit beside a large pizza topped with tortilla chips.", "Three pizzas inside their delivery boxes, one with two side orders of sauce.", "One pizza is larger than two other pizzas.", "Three pizza boxes with pizza in them are open."], "instances": [{"category": "bowl", "bbox": [0.634, 0.624, 0.736, 0.752]}, {"category": "pizza", "bbox": [0.3, 0.382, 0.615, 0.733]}, {"category": "pizza", "bbox": [0.0, 0.4, 0.287, 0.745]}, {"category": "pizza", "bbox": [0.624, 0.279, 0.999, 0.753]}, {"category": "bowl", "bbox": [0.94, 0.247, 1.0, 0.352]}]} +{"id": "000000092109", "image": "000000092109.jpg", "captions": ["A giraffe's head is pictured in this clear, colorful photo.", "A giraffe is standing tall in the middle of several bright green trees", "The face of a giraffe looking to the side.", "the close up head shot of a giraffe", "this is a giraffe chewing on some leaves"], "instances": [{"category": "giraffe", "bbox": [0.236, 0.122, 1.0, 0.987]}]} +{"id": "000000163076", "image": "000000163076.jpg", "captions": ["There's an outdoor dining area featuring a fountain.", "A table sitting next to a water fountain covered by an umbrella.", "An empty restaurant patio with tables and umbrellas.", "An outdoor restaurant with a fountain at night", "A fountain bubbles in the plaza of an outdoor cafe."], "instances": [{"category": "umbrella", "bbox": [0.064, 0.069, 0.95, 0.844]}, {"category": "chair", "bbox": [0.198, 0.574, 0.355, 0.704]}, {"category": "chair", "bbox": [0.42, 0.571, 0.55, 0.738]}, {"category": "dining table", "bbox": [0.066, 0.741, 0.766, 0.925]}, {"category": "dining table", "bbox": [0.059, 0.584, 0.27, 0.659]}, {"category": "chair", "bbox": [0.432, 0.567, 0.52, 0.624]}, {"category": "chair", "bbox": [0.433, 0.555, 0.504, 0.6]}, {"category": "chair", "bbox": [0.109, 0.673, 0.374, 0.796]}]} +{"id": "000000560371", "image": "000000560371.jpg", "captions": ["Street signs from the corner of 8th ave. and 22 3/4 st.", "A two way street sign with one sign that changes from one name to another.", "A street sign is pointing towards 8th avenue and the other is pointing towards 22 3/4 street in the middle of the forest.", "A street sign standing in front of some trees.", "Peculiar street sign showing intersection of 23 3/4 St and 8th Ave/CTH D."], "instances": []} +{"id": "000000367571", "image": "000000367571.jpg", "captions": ["A couple of different doughnuts in a box", "There are four donuts in a box, and some are cake donuts and a doughnut with nuts and coconut on top.", "A box of glazed doughnuts on a table.", "Three donuts with toppings on them sitting inside a box.", "A box that is filled with different kinds of doughnuts."], "instances": [{"category": "donut", "bbox": [0.412, 0.335, 0.711, 0.681]}, {"category": "donut", "bbox": [0.093, 0.493, 0.486, 0.922]}, {"category": "donut", "bbox": [0.713, 0.423, 0.957, 0.874]}, {"category": "donut", "bbox": [0.13, 0.331, 0.397, 0.55]}]} +{"id": "000000580197", "image": "000000580197.jpg", "captions": ["Two men in bow ties standing next to steel rafter.", "Several men in suits talking together in a room.", "An older man in a tuxedo standing next to a younger man in a tuxedo wearing glasses.", "Two men wearing tuxedos glance at each other.", "Older man in tuxedo sitting next to another younger man in tuxedo."], "instances": [{"category": "tie", "bbox": [0.914, 0.46, 0.984, 0.512]}, {"category": "person", "bbox": [0.297, 0.638, 0.71, 0.989]}, {"category": "person", "bbox": [0.77, 0.177, 1.0, 0.971]}, {"category": "tie", "bbox": [0.281, 0.481, 0.368, 0.519]}, {"category": "person", "bbox": [0.103, 0.204, 0.497, 1.0]}]} +{"id": "000000506095", "image": "000000506095.jpg", "captions": ["A cat is staring at a laptop computer.", "a cat on a desk with a laptop and a mouse", "A cat that is sitting at a desk next to a laptop.", "A kitten sitting on a laptop computer sitting on top of a wooden desk.", "A kitten sits facing an open black laptop."], "instances": [{"category": "cat", "bbox": [0.658, 0.207, 1.0, 0.754]}, {"category": "laptop", "bbox": [0.108, 0.135, 0.766, 0.69]}, {"category": "book", "bbox": [0.836, 0.239, 0.954, 0.273]}, {"category": "book", "bbox": [0.0, 0.556, 0.128, 0.685]}, {"category": "book", "bbox": [0.039, 0.574, 0.257, 0.691]}, {"category": "book", "bbox": [0.825, 0.214, 0.962, 0.254]}, {"category": "book", "bbox": [0.892, 0.275, 0.958, 0.308]}, {"category": "book", "bbox": [0.922, 0.318, 0.986, 0.353]}, {"category": "book", "bbox": [0.87, 0.267, 0.951, 0.291]}, {"category": "book", "bbox": [0.949, 0.102, 0.976, 0.114]}, {"category": "book", "bbox": [0.936, 0.161, 0.958, 0.168]}]} +{"id": "000000024996", "image": "000000024996.jpg", "captions": ["A bathroom with a glass door and a sink.", "A blue lined bathroom with an open glass door.", "A nice bathroom with a sink, toilet, and tiled shower.", "A bathroom that is clean and shiny in the day.", "a bathroom with a sink and a mirror and a window"], "instances": [{"category": "toilet", "bbox": [0.842, 0.934, 0.95, 1.0]}, {"category": "sink", "bbox": [0.506, 0.724, 0.683, 0.834]}]} +{"id": "000000457882", "image": "000000457882.jpg", "captions": ["a girl in a bikini and a brown and white dog and a few other people", "A woman with a swimsuit on sitting with a dog.", "A woman is sitting with a dog on her lap.", "A dog sitting next to a woman in her swimsuit.", "WOMAN SITTING WITH HER DOG, AND OTHER WOMEN ARE AROUND"], "instances": [{"category": "dog", "bbox": [0.202, 0.409, 0.54, 0.81]}, {"category": "dog", "bbox": [0.61, 0.428, 0.729, 0.723]}, {"category": "boat", "bbox": [0.003, 0.705, 0.939, 0.974]}, {"category": "person", "bbox": [0.236, 0.001, 0.558, 0.784]}, {"category": "person", "bbox": [0.681, 0.001, 0.957, 0.798]}, {"category": "person", "bbox": [0.849, 0.478, 1.0, 0.946]}, {"category": "person", "bbox": [0.345, 0.187, 0.634, 0.828]}, {"category": "person", "bbox": [0.033, 0.345, 0.109, 0.434]}]} +{"id": "000000081552", "image": "000000081552.jpg", "captions": ["A cat sitting and curled up on a red couch", "A cat laying on a red couch sleeping.", "a tan and black cat curled up asleep on a red velvet seat", "A cat is curled up on a red sofa.", "Cat curled up, sleeping on a red plush couch."], "instances": [{"category": "cat", "bbox": [0.412, 0.237, 0.634, 0.482]}, {"category": "couch", "bbox": [0.003, 0.005, 1.0, 0.99]}]} +{"id": "000000273450", "image": "000000273450.jpg", "captions": ["A person flipping of a parking meter on the side of a road.", "A man holds up his middle finger to a parking meter.", "Person giving the middle finger to a parking meter.", "a black silver white blue red an orange parking meter and a hand flipping it off", "A person is flipping off a parking meter."], "instances": [{"category": "person", "bbox": [0.0, 0.475, 0.565, 0.987]}, {"category": "car", "bbox": [0.0, 0.0, 0.531, 0.734]}, {"category": "parking meter", "bbox": [0.0, 0.0, 1.0, 0.987]}]} +{"id": "000000203879", "image": "000000203879.jpg", "captions": ["There is a small cellphone displayed between a set of ear buds and two paper weights.", "a cell phone lays next to some diamonds", "a close up of a cell phone on a table near earbuds", "A cell phone sits on a table next to some jewels.", "A cell phone, ear buds, and two jewels laying near each other."], "instances": [{"category": "cell phone", "bbox": [0.322, 0.233, 0.62, 0.79]}]} +{"id": "000000346875", "image": "000000346875.jpg", "captions": ["two zebras in a field near one another", "A couple of zebra walking across a green field.", "Two zebra are walking near a gravel road.", "two zebras in a green field of grass and some trees", "A zebra follows another zebra through a park."], "instances": [{"category": "zebra", "bbox": [0.591, 0.263, 0.82, 0.466]}, {"category": "zebra", "bbox": [0.293, 0.243, 0.561, 0.45]}]} +{"id": "000000525439", "image": "000000525439.jpg", "captions": ["a man stands in front of a flipped skate boarder", "A man standing next to a skateboard that is laying on the ground wheels pointed up.", "Skateboard laying upside down on cement with someone standing next to it.", "A boy in camo shorts stands before an overturned skateboard.", "a person with an upside down skate board"], "instances": [{"category": "person", "bbox": [0.307, 0.001, 0.63, 0.739]}, {"category": "skateboard", "bbox": [0.0, 0.592, 0.626, 0.969]}]} +{"id": "000000304749", "image": "000000304749.jpg", "captions": ["The woman is taking a picture in the bathroom mirror.", "A picture of a woman in a mirror.", "A woman's midsection reflected in a round mirror.", "A circular mirror reflecting a woman's stomach in turquoise shirt.", "A selfie taken of a person from the neck down."], "instances": [{"category": "person", "bbox": [0.092, 0.001, 0.646, 0.496]}]} +{"id": "000000323760", "image": "000000323760.jpg", "captions": ["A toilet is shown in a bare room.", "A ugly bathroom with a section of the wall missing.", "A toilet in a stripped bathroom with studs, bricks and plaster showing", "A bathroom with no walls and a toilet bowl", "A white toilet next to some torn out walls."], "instances": [{"category": "toilet", "bbox": [0.167, 0.585, 0.714, 1.0]}]} +{"id": "000000066144", "image": "000000066144.jpg", "captions": ["A woman standing in front of window next to a bug and a stop sign.", "A car parked on the street next to a tree and stop sign.", "A lone Volkswagen is parked by a stop sign.", "A window view of a small car near a street stop sign.", "An old VW Bug standing at a stop sign."], "instances": [{"category": "stop sign", "bbox": [0.501, 0.328, 0.569, 0.428]}, {"category": "car", "bbox": [0.242, 0.488, 0.56, 0.726]}, {"category": "car", "bbox": [0.279, 0.325, 0.33, 0.363]}, {"category": "car", "bbox": [0.153, 0.333, 0.29, 0.405]}, {"category": "car", "bbox": [0.11, 0.339, 0.177, 0.373]}, {"category": "car", "bbox": [0.0, 0.654, 0.082, 0.826]}, {"category": "car", "bbox": [0.0, 0.322, 0.064, 0.364]}, {"category": "car", "bbox": [0.451, 0.333, 0.51, 0.392]}]} +{"id": "000000455772", "image": "000000455772.jpg", "captions": ["A person in a field jumping to catch a Frisbee.", "A guy jumping to catch a frisbee in mid-air.", "A person that is trying to get a frisbee.", "Nice reach, but the Frisbee flies on, victorious.", "A man playing frisbee in a grassy yard."], "instances": [{"category": "car", "bbox": [0.148, 0.339, 0.201, 0.476]}, {"category": "car", "bbox": [0.376, 0.396, 0.424, 0.476]}, {"category": "person", "bbox": [0.547, 0.122, 0.698, 0.904]}, {"category": "frisbee", "bbox": [0.479, 0.154, 0.555, 0.231]}, {"category": "car", "bbox": [0.001, 0.299, 0.085, 0.394]}]} +{"id": "000000511117", "image": "000000511117.jpg", "captions": ["A couple of kids standing on top of a grass covered field.", "A little boy wearing a baseball uniform stands by a little girl.", "A young boy in a baseball uniform and a young girl are standing in front of a chain link fence.", "A little boy and girl standing on a baseball field. The boy has a uniform on.", "A young baseball player is standing next to a young girl."], "instances": [{"category": "person", "bbox": [0.514, 0.178, 0.776, 0.774]}, {"category": "baseball glove", "bbox": [0.468, 0.462, 0.593, 0.609]}, {"category": "person", "bbox": [0.174, 0.051, 0.598, 0.839]}, {"category": "bench", "bbox": [0.558, 0.125, 1.0, 0.315]}]} +{"id": "000000207151", "image": "000000207151.jpg", "captions": ["A vegetarian pizza is half eaten on a pizza holder.", "A couple of pieces of pizza with vegetable slices on them.", "A wooden pan serving tray with a pizza on it.", "A pizza on a cutting board is half gone.", "A Pizza is nearly finished with only three pieces left."], "instances": [{"category": "bottle", "bbox": [0.001, 0.001, 0.121, 0.231]}, {"category": "cup", "bbox": [0.0, 0.002, 0.121, 0.238]}, {"category": "pizza", "bbox": [0.17, 0.472, 0.526, 0.82]}, {"category": "pizza", "bbox": [0.398, 0.106, 0.962, 0.679]}, {"category": "dining table", "bbox": [0.0, 0.001, 1.0, 0.988]}]} +{"id": "000000431165", "image": "000000431165.jpg", "captions": ["A baby elephant standing in front of a brick building.", "An elephant is standing near a dirt mount in an exhibit.", "Grey elephant standing next to a large sand dune in a pen.", "An elephant standing alone inside of an enclosure.", "The baby elephant is alone in the pen."], "instances": [{"category": "elephant", "bbox": [0.303, 0.399, 0.638, 0.78]}]} +{"id": "000000378545", "image": "000000378545.jpg", "captions": ["A pole that has a clock on top of it.", "A clock mounted on an outdoor post with Roman numerals.", "a clock on a pole saying it is 12:45", "An ornamental standing clock is at the foreground of a row of houses.", "A black and gold clock on a pole in front of a building."], "instances": [{"category": "clock", "bbox": [0.216, 0.249, 0.749, 0.658]}]} +{"id": "000000555904", "image": "000000555904.jpg", "captions": ["A man sitting at a bar filled with liquor.", "People sitting a a take near several bottles of wine on shelves.", "Several people are sitting at a table drinking.", "Several people in a bar sitting at a long table.", "People eating in a restaurant near wine bottles."], "instances": [{"category": "dining table", "bbox": [0.123, 0.663, 0.317, 0.811]}, {"category": "person", "bbox": [0.715, 0.239, 1.0, 0.998]}, {"category": "person", "bbox": [0.142, 0.528, 0.281, 0.742]}, {"category": "person", "bbox": [0.529, 0.53, 0.606, 0.69]}, {"category": "person", "bbox": [0.705, 0.518, 0.796, 0.673]}, {"category": "wine glass", "bbox": [0.247, 0.669, 0.27, 0.718]}, {"category": "person", "bbox": [0.281, 0.524, 0.534, 1.0]}, {"category": "bottle", "bbox": [0.168, 0.346, 0.189, 0.425]}, {"category": "bottle", "bbox": [0.379, 0.264, 0.431, 0.433]}, {"category": "bottle", "bbox": [0.252, 0.313, 0.277, 0.429]}, {"category": "bottle", "bbox": [0.294, 0.295, 0.326, 0.43]}, {"category": "bottle", "bbox": [0.589, 0.35, 0.613, 0.444]}, {"category": "bottle", "bbox": [0.433, 0.281, 0.473, 0.437]}, {"category": "bottle", "bbox": [0.478, 0.289, 0.513, 0.44]}, {"category": "wine glass", "bbox": [0.688, 0.615, 0.709, 0.69]}, {"category": "cup", "bbox": [0.589, 0.647, 0.612, 0.693]}, {"category": "person", "bbox": [0.732, 0.356, 0.953, 0.806]}, {"category": "bottle", "bbox": [0.555, 0.337, 0.585, 0.438]}, {"category": "bottle", "bbox": [0.337, 0.29, 0.378, 0.432]}, {"category": "bottle", "bbox": [0.21, 0.333, 0.232, 0.426]}, {"category": "bottle", "bbox": [0.134, 0.36, 0.148, 0.422]}, {"category": "bottle", "bbox": [0.516, 0.312, 0.557, 0.439]}, {"category": "cup", "bbox": [0.231, 0.718, 0.26, 0.763]}, {"category": "chair", "bbox": [0.517, 0.828, 0.65, 0.999]}, {"category": "chair", "bbox": [0.643, 0.804, 0.738, 0.841]}, {"category": "chair", "bbox": [0.347, 0.908, 0.519, 1.0]}, {"category": "chair", "bbox": [0.64, 0.806, 0.74, 0.998]}, {"category": "cup", "bbox": [0.205, 0.692, 0.232, 0.767]}, {"category": "dining table", "bbox": [0.536, 0.676, 0.743, 0.838]}, {"category": "person", "bbox": [0.002, 0.501, 0.263, 0.987]}, {"category": "bottle", "bbox": [0.531, 0.461, 0.542, 0.526]}, {"category": "bottle", "bbox": [0.237, 0.354, 0.702, 0.629]}]} +{"id": "000000415393", "image": "000000415393.jpg", "captions": ["a man on a skate board looks like he is falling", "A man does a skateboard trick on a skateboard ramp", "Guy falling off a skateboard in a room.", "A man riding a skateboard on top of a table.", "a man skating on part of a ramp with his skateboard"], "instances": [{"category": "person", "bbox": [0.361, 0.016, 0.809, 0.888]}, {"category": "skateboard", "bbox": [0.606, 0.809, 0.889, 0.901]}, {"category": "person", "bbox": [0.479, 0.091, 0.576, 0.386]}, {"category": "person", "bbox": [0.047, 0.441, 0.197, 0.759]}, {"category": "person", "bbox": [0.038, 0.453, 0.076, 0.545]}, {"category": "person", "bbox": [0.249, 0.307, 0.311, 0.591]}]} +{"id": "000000161011", "image": "000000161011.jpg", "captions": ["Three skiers posing for a picture on the slope.", "Three skiers pause for a photo at the top of a mountain.", "Three people standing on a mountain taking a picture as they ski.", "A woman and two men on skis on a snowy hillside surrounded by trees", "Three skiers have stopped to pose for a picture."], "instances": [{"category": "person", "bbox": [0.36, 0.321, 0.509, 0.82]}, {"category": "person", "bbox": [0.179, 0.281, 0.349, 0.795]}, {"category": "person", "bbox": [0.611, 0.292, 0.751, 0.809]}, {"category": "skis", "bbox": [0.595, 0.743, 0.732, 0.961]}, {"category": "skis", "bbox": [0.341, 0.724, 0.621, 0.907]}, {"category": "skis", "bbox": [0.212, 0.705, 0.398, 0.905]}]} +{"id": "000000284296", "image": "000000284296.jpg", "captions": ["Three giraffe's leaning over to get a sip of water.", "an image of a herd of giraffes in the water", "three giraffes banding down to drink water with trees in the background", "Three giraffe drinking from a pond with brush in back.", "Giraffes leaning down to drink at a watering hole"], "instances": [{"category": "giraffe", "bbox": [0.624, 0.387, 0.822, 0.635]}, {"category": "giraffe", "bbox": [0.4, 0.326, 0.561, 0.58]}, {"category": "giraffe", "bbox": [0.152, 0.291, 0.343, 0.551]}]} +{"id": "000000056013", "image": "000000056013.jpg", "captions": ["a number of luggage bags on a cart in a lobby", "Wheeled cart with luggage at lobby of commercial business.", "Trolley used for transporting personal luggage to guests rooms.", "A luggage cart topped with lots of luggage.", "a cart filled with suitcases and bags"], "instances": [{"category": "backpack", "bbox": [0.276, 0.52, 0.456, 0.678]}, {"category": "suitcase", "bbox": [0.41, 0.58, 0.597, 0.827]}, {"category": "suitcase", "bbox": [0.173, 0.645, 0.363, 0.836]}, {"category": "person", "bbox": [0.959, 0.297, 1.0, 0.478]}, {"category": "suitcase", "bbox": [0.526, 0.519, 0.712, 0.706]}, {"category": "person", "bbox": [0.762, 0.253, 0.871, 0.46]}, {"category": "backpack", "bbox": [0.517, 0.514, 0.694, 0.698]}, {"category": "handbag", "bbox": [0.316, 0.181, 0.431, 0.426]}, {"category": "suitcase", "bbox": [0.747, 0.453, 0.858, 0.557]}]} +{"id": "000000293505", "image": "000000293505.jpg", "captions": ["A person on a motor bike next to a cow.", "A woman riding a motorcycle down a dirt road.", "there is a woman riding a scooter down a dirt road", "A woman on a moped, two men and animals walking down the road.", "A woman on a motorcycle is next to a man walking a dog along with other people going down a dirt road."], "instances": [{"category": "cow", "bbox": [0.602, 0.472, 0.721, 0.816]}, {"category": "motorcycle", "bbox": [0.402, 0.512, 0.516, 0.788]}, {"category": "person", "bbox": [0.408, 0.4, 0.514, 0.639]}, {"category": "person", "bbox": [0.754, 0.301, 1.0, 1.0]}, {"category": "person", "bbox": [0.705, 0.415, 0.789, 0.714]}, {"category": "cow", "bbox": [0.347, 0.44, 0.373, 0.509]}, {"category": "cow", "bbox": [0.361, 0.436, 0.381, 0.501]}]} +{"id": "000000305873", "image": "000000305873.jpg", "captions": ["A little girl holding a red black dotted umbrella.", "A little girl with rain boots and a rain jacket on and an open umbrella to match her jacket.", "a little girl holding onto a lady bug pattern umbrella", "The child wears a labybug rain coat with a matching umbrella.", "A little girl wearing a ladybug raincoat and green rubber boots holding a ladybug umbrella"], "instances": [{"category": "umbrella", "bbox": [0.246, 0.002, 0.992, 0.415]}, {"category": "person", "bbox": [0.35, 0.132, 0.699, 0.791]}, {"category": "car", "bbox": [0.614, 0.0, 1.0, 0.465]}]} +{"id": "000000034096", "image": "000000034096.jpg", "captions": ["A house being built with lots of wood.", "A big pile of building material is placed on the floor in the wooden structure.", "A partially-built house with wooden studs and staircase in view.", "A house full of wood getting built at the moment.", "The beginning stages of a home still being made."], "instances": [{"category": "bed", "bbox": [0.505, 0.42, 0.721, 0.59]}, {"category": "tv", "bbox": [0.192, 0.441, 0.335, 0.606]}]} +{"id": "000000165257", "image": "000000165257.jpg", "captions": ["A large black counter top sitting next to a sink.", "a clean kitchen counter with a clean sink", "A kitchen with a sink, dishwasher and some boxes on the counter.", "A kitchen with a sink, dishwasher and boxes on the counter.", "a black counter on a wood cabinet in a kitchen", "a new kitchen cabinet with a sink being installed"], "instances": [{"category": "sink", "bbox": [0.513, 0.243, 0.718, 0.314]}]} +{"id": "000000431026", "image": "000000431026.jpg", "captions": ["a street sign on a city street near some tall bushes", "street signs on a metal pole lining a sidewalk lined with shrubbery.", "a large hedge of bushes on a corner near a street sign.", "Two street signs on sidewalk next to bushes and trees.", "Street signs along a well manicured street with large houses."], "instances": []} +{"id": "000000524575", "image": "000000524575.jpg", "captions": ["Three giraffe and a wildebeest in a field.", "A moose and several giraffes are grazing in the field.", "Zebras in the wild with a wildebeest behind them", "Two giraffe and a ox standing in a field eating grass.", "Giraffes and other safari animals graze in a sunlit field."], "instances": [{"category": "cow", "bbox": [0.46, 0.716, 0.643, 0.999]}, {"category": "giraffe", "bbox": [0.285, 0.5, 0.401, 0.826]}, {"category": "giraffe", "bbox": [0.083, 0.554, 0.179, 0.821]}, {"category": "giraffe", "bbox": [0.887, 0.481, 0.968, 0.715]}]} +{"id": "000000326550", "image": "000000326550.jpg", "captions": ["Black and white photograph of a person holding a surfboard by water.", "A person with a surfboard standing next to the water.", "A surfer stands on the rocks watching a wave crash.", "A man standing on a beach holding a surfboard.", "a person looking at the waves ready to surf"], "instances": [{"category": "person", "bbox": [0.327, 0.461, 0.492, 0.897]}, {"category": "surfboard", "bbox": [0.282, 0.56, 0.606, 0.741]}, {"category": "person", "bbox": [0.924, 0.352, 0.933, 0.362]}, {"category": "person", "bbox": [0.912, 0.348, 0.919, 0.36]}]} +{"id": "000000018476", "image": "000000018476.jpg", "captions": ["A tie that is sitting on top of a shirt.", "This photograph appears to be looking truly wonderful.", "a uniform complete with shoes laying on a bed", "Suit laid out with a red tie, white shirt and black shoes.", "a white shirt a red tie and some black shoes"], "instances": [{"category": "tie", "bbox": [0.457, 0.09, 0.853, 0.984]}, {"category": "bed", "bbox": [0.005, 0.005, 1.0, 0.379]}]} +{"id": "000000480652", "image": "000000480652.jpg", "captions": ["These suitcases are sitting next to a chair.", "An assortment of luggage bags stacked by a kitchen chair.", "A stack of luggage by a chair and table.", "a table and chair with several pieces of luggage nearby", "A pile of luggage sitting on the floor."], "instances": [{"category": "chair", "bbox": [0.483, 0.192, 1.0, 0.769]}, {"category": "backpack", "bbox": [0.433, 0.429, 0.742, 0.856]}, {"category": "suitcase", "bbox": [0.059, 0.414, 0.453, 0.841]}, {"category": "handbag", "bbox": [0.19, 0.184, 0.779, 0.475]}, {"category": "suitcase", "bbox": [0.175, 0.204, 0.583, 0.462]}]} +{"id": "000000012748", "image": "000000012748.jpg", "captions": ["A man and child next to a horse.", "a little boy touching the nose of a brown horse", "A man holding a baby whose petting a horse.", "a man letting his baby pet a horse", "man holding a baby and petting a horse"], "instances": [{"category": "horse", "bbox": [0.003, 0.079, 0.504, 0.868]}, {"category": "person", "bbox": [0.452, 0.294, 1.0, 0.989]}, {"category": "person", "bbox": [0.46, 0.217, 1.0, 0.988]}]} +{"id": "000000247840", "image": "000000247840.jpg", "captions": ["Large group of people standing outside a restaurant together.", "A dairy queen has people standing outside waiting", "an image of people standing outside and ice cream store", "Several people are lined up outside of a store.", "The front of a Dairy Queen restaurant with people entering the side."], "instances": [{"category": "fire hydrant", "bbox": [0.774, 0.674, 0.83, 0.807]}, {"category": "person", "bbox": [0.741, 0.465, 0.824, 0.755]}, {"category": "person", "bbox": [0.806, 0.471, 0.839, 0.722]}, {"category": "person", "bbox": [0.831, 0.499, 0.866, 0.726]}, {"category": "bench", "bbox": [0.061, 0.69, 0.219, 0.768]}, {"category": "handbag", "bbox": [0.859, 0.558, 0.877, 0.603]}, {"category": "person", "bbox": [0.719, 0.504, 0.75, 0.626]}, {"category": "potted plant", "bbox": [0.7, 0.648, 0.764, 0.743]}, {"category": "handbag", "bbox": [0.827, 0.548, 0.837, 0.577]}, {"category": "sandwich", "bbox": [0.359, 0.618, 0.417, 0.694]}]} +{"id": "000000399452", "image": "000000399452.jpg", "captions": ["a sandwhich sitting on a plate next to a glass of tea, bowl of soup", "a sandwich on a white plate a drink on a brown table", "A sandwich and chips sit on a white plate.", "a large plate of food with a glass of soda by it", "A sandwich sitting on top of a white plate next to a cup of coffee."], "instances": [{"category": "sandwich", "bbox": [0.175, 0.326, 0.605, 0.71]}, {"category": "cup", "bbox": [0.504, 0.024, 0.687, 0.419]}, {"category": "knife", "bbox": [0.742, 0.283, 0.857, 0.376]}, {"category": "spoon", "bbox": [0.618, 0.46, 0.797, 0.809]}, {"category": "fork", "bbox": [0.684, 0.254, 0.805, 0.395]}, {"category": "bowl", "bbox": [0.782, 0.366, 1.0, 0.62]}, {"category": "chair", "bbox": [0.202, 0.0, 0.671, 0.148]}, {"category": "dining table", "bbox": [0.002, 0.126, 0.996, 0.987]}]} +{"id": "000000515716", "image": "000000515716.jpg", "captions": ["A couple of women standing on either side of a man wearing glasses.", "Two women and a man are holding glasses up at a wine tasting.", "Three young adults holding wine glasses while standing at a bar.", "A group of people sit holding glasses and smiling at a table with several bottles.", "A group of people at a celebration having a taste of wine."], "instances": [{"category": "bottle", "bbox": [0.529, 0.604, 0.637, 0.908]}, {"category": "bottle", "bbox": [0.379, 0.398, 0.481, 0.892]}, {"category": "bottle", "bbox": [0.942, 0.464, 0.988, 0.653]}, {"category": "person", "bbox": [0.0, 0.126, 0.136, 0.811]}, {"category": "person", "bbox": [0.05, 0.093, 0.211, 0.471]}, {"category": "person", "bbox": [0.401, 0.031, 0.678, 0.683]}, {"category": "person", "bbox": [0.617, 0.191, 0.94, 0.858]}, {"category": "person", "bbox": [0.723, 0.098, 0.947, 0.564]}, {"category": "wine glass", "bbox": [0.634, 0.434, 0.697, 0.628]}, {"category": "wine glass", "bbox": [0.285, 0.346, 0.372, 0.558]}, {"category": "wine glass", "bbox": [0.522, 0.422, 0.583, 0.544]}, {"category": "handbag", "bbox": [0.704, 0.601, 1.0, 0.916]}, {"category": "person", "bbox": [0.944, 0.319, 0.999, 0.604]}, {"category": "bottle", "bbox": [0.921, 0.46, 0.953, 0.636]}, {"category": "person", "bbox": [0.116, 0.171, 0.41, 0.829]}]} +{"id": "000000116173", "image": "000000116173.jpg", "captions": ["The boy is on his surfboard in the water riding it.", "a young boy riding a boogie board in the water", "A boy riding surf board in the ocean.", "A young boy is riding a surfboard on a small wave.", "A young boy is surfing in the ocean."], "instances": [{"category": "person", "bbox": [0.485, 0.238, 0.702, 0.821]}, {"category": "person", "bbox": [0.866, 0.223, 0.921, 0.29]}, {"category": "person", "bbox": [0.752, 0.146, 0.775, 0.188]}, {"category": "surfboard", "bbox": [0.239, 0.758, 0.782, 0.846]}, {"category": "surfboard", "bbox": [0.853, 0.277, 0.981, 0.29]}, {"category": "surfboard", "bbox": [0.727, 0.169, 0.801, 0.198]}, {"category": "person", "bbox": [0.637, 0.194, 0.677, 0.261]}]} +{"id": "000000186013", "image": "000000186013.jpg", "captions": ["A beach scene includes many different kites flying in a cloudy sky.", "Kites being flown at the beach at twilight.", "A beach with flags in the ground and kites overhead in the sky.", "A beach with rows of flags in the sand and kites flying overhead.", "A beach filled with kites and wind sails next to the ocean."], "instances": [{"category": "kite", "bbox": [0.174, 0.4, 0.351, 0.483]}, {"category": "kite", "bbox": [0.144, 0.13, 0.273, 0.17]}, {"category": "kite", "bbox": [0.236, 0.269, 0.268, 0.294]}, {"category": "kite", "bbox": [0.464, 0.204, 0.598, 0.271]}, {"category": "kite", "bbox": [0.61, 0.304, 0.659, 0.342]}, {"category": "kite", "bbox": [0.545, 0.435, 0.565, 0.452]}, {"category": "kite", "bbox": [0.027, 0.558, 0.151, 0.59]}, {"category": "kite", "bbox": [0.93, 0.429, 0.973, 0.536]}, {"category": "kite", "bbox": [0.684, 0.36, 0.697, 0.374]}, {"category": "surfboard", "bbox": [0.393, 0.627, 0.446, 0.934]}, {"category": "person", "bbox": [0.959, 0.685, 0.984, 0.713]}, {"category": "person", "bbox": [0.919, 0.681, 0.94, 0.725]}, {"category": "person", "bbox": [0.8, 0.597, 0.805, 0.61]}, {"category": "person", "bbox": [0.079, 0.928, 0.116, 0.975]}, {"category": "kite", "bbox": [0.743, 0.307, 0.755, 0.319]}, {"category": "kite", "bbox": [0.78, 0.322, 0.795, 0.335]}, {"category": "kite", "bbox": [0.536, 0.526, 0.597, 0.617]}, {"category": "person", "bbox": [0.941, 0.694, 0.961, 0.726]}, {"category": "kite", "bbox": [0.575, 0.446, 0.594, 0.471]}]} +{"id": "000000015029", "image": "000000015029.jpg", "captions": ["A man holding a white frisbee standing on top of a field.", "A man is playing frisbee next to a tent.", "Guy at the park holding a frisbee with people in the back under a tent", "A man is holding a Frisbee standing in the grass.", "Young adult male holding a frisbee at an event."], "instances": [{"category": "frisbee", "bbox": [0.138, 0.359, 0.215, 0.587]}, {"category": "person", "bbox": [0.16, 0.002, 0.726, 0.995]}, {"category": "person", "bbox": [0.81, 0.73, 0.852, 0.825]}, {"category": "person", "bbox": [0.786, 0.749, 0.833, 0.814]}, {"category": "person", "bbox": [0.847, 0.743, 0.89, 0.804]}, {"category": "person", "bbox": [0.614, 0.749, 0.706, 0.936]}]} +{"id": "000000500565", "image": "000000500565.jpg", "captions": ["A woman holding a child wrapped in a towel brushing her teeth.", "A woman is holding a baby who is wrapped in a towel and holding a toothbrush", "A woman holding a little boy who is brushing his teeth.", "A baby with a toothbrush in his mouth while being held by a woman", "a close up of an adult holding a child brushing their teeth"], "instances": [{"category": "toothbrush", "bbox": [0.586, 0.66, 0.754, 0.821]}, {"category": "person", "bbox": [0.002, 0.007, 0.637, 0.991]}, {"category": "person", "bbox": [0.357, 0.196, 0.998, 0.984]}]} +{"id": "000000297323", "image": "000000297323.jpg", "captions": ["Two buses are parked against a curb in front of a building.", "Two automobiles parked on the side of a building.", "two tourist buses parked on street in front of old industrial building", "Two unique city buses stopped at a stop sign.", "Buses parked outside by a building and stop sign."], "instances": [{"category": "bus", "bbox": [0.7, 0.711, 0.92, 0.881]}, {"category": "person", "bbox": [0.936, 0.771, 0.972, 0.833]}, {"category": "stop sign", "bbox": [0.237, 0.666, 0.285, 0.728]}, {"category": "bus", "bbox": [0.334, 0.71, 0.678, 0.935]}, {"category": "truck", "bbox": [0.335, 0.72, 0.683, 0.934]}, {"category": "person", "bbox": [0.34, 0.791, 0.367, 0.834]}]} +{"id": "000000441147", "image": "000000441147.jpg", "captions": ["Two antique suitcases sit stacked one on top of the other.", "Two suitcases are stacked on each other and one is black while the other is brown and yellow.", "a close up of two luggage suit cases stacked on each other", "A stack of antique luggage is displayed with price tags.", "two suitcases made of leather and stacked on top of each other"], "instances": [{"category": "suitcase", "bbox": [0.167, 0.025, 0.989, 0.445]}, {"category": "suitcase", "bbox": [0.002, 0.31, 0.994, 0.996]}]} +{"id": "000000353536", "image": "000000353536.jpg", "captions": ["A table topped with plates and glasses with eating utensils..", "a fork is laying on a small white plate", "dirty dishes on a table, and a bottle of something.", "a table top with some dishes on top of it", "A table full of dirty dishes is pictured in this image."], "instances": [{"category": "dining table", "bbox": [0.0, 0.007, 0.998, 0.988]}, {"category": "bottle", "bbox": [0.554, 0.002, 0.768, 0.411]}, {"category": "cup", "bbox": [0.372, 0.011, 0.544, 0.427]}, {"category": "fork", "bbox": [0.442, 0.464, 0.818, 0.572]}, {"category": "fork", "bbox": [0.089, 0.233, 0.272, 0.456]}, {"category": "spoon", "bbox": [0.144, 0.218, 0.326, 0.413]}, {"category": "cup", "bbox": [0.688, 0.056, 0.812, 0.361]}]} +{"id": "000000416256", "image": "000000416256.jpg", "captions": ["A cat laying on the floor next to a keyboard.", "an orange and white cat is laying next to a keyboard and some wires", "A cat is laying next to a computer keyboard.", "a cat laying on a floor next to a keyboard", "A CAT LAYING ON THE FLOOR AMIDST A COMPUTER,SPEAKERS,CORDS"], "instances": [{"category": "cat", "bbox": [0.235, 0.23, 0.737, 0.639]}, {"category": "keyboard", "bbox": [0.243, 0.562, 0.631, 0.836]}, {"category": "keyboard", "bbox": [0.058, 0.33, 0.277, 0.608]}]} +{"id": "000000214367", "image": "000000214367.jpg", "captions": ["Wood shading on the side of a window with brick siding.", "A tree filled with lots of red fruit near a building.", "By the window outside is a apple tree, where the apples are ready to be picked.", "Some very nice looking red fruity by a window,", "A shuttered window has a fruit tree outside it."], "instances": [{"category": "apple", "bbox": [0.214, 0.112, 0.408, 0.266]}, {"category": "apple", "bbox": [0.472, 0.166, 0.618, 0.293]}, {"category": "apple", "bbox": [0.055, 0.592, 0.172, 0.686]}, {"category": "apple", "bbox": [0.126, 0.661, 0.236, 0.739]}, {"category": "apple", "bbox": [0.52, 0.09, 0.609, 0.143]}, {"category": "apple", "bbox": [0.226, 0.354, 0.285, 0.409]}, {"category": "apple", "bbox": [0.0, 0.698, 0.096, 0.771]}, {"category": "apple", "bbox": [0.001, 0.646, 0.042, 0.713]}, {"category": "apple", "bbox": [0.258, 0.719, 0.329, 0.778]}]} +{"id": "000000210299", "image": "000000210299.jpg", "captions": ["A little boy riding his bike and wearing a helmet", "A little boy raveling down a road on a bike, with a yellow helmet on.", "The boy wears a helmet while riding his bicycle.", "a small child wearing a helmet and riding a bike", "A little boy wearing a helmet and riding a bike."], "instances": [{"category": "person", "bbox": [0.198, 0.259, 0.399, 0.679]}, {"category": "bicycle", "bbox": [0.213, 0.383, 0.408, 0.835]}]} +{"id": "000000088218", "image": "000000088218.jpg", "captions": ["Signs proclaim the famous Haight Ashbury intersection and district.", "a pole with street lights, signs and wires attached to it", "A traffic light at the intersection of Haight and Ashbury", "A traffic sign is shown with traffic signs above it.", "The street signs and traffic signal are below wires attached to the pole."], "instances": [{"category": "traffic light", "bbox": [0.443, 0.435, 0.658, 0.721]}]} +{"id": "000000020650", "image": "000000020650.jpg", "captions": ["Burger with broccoli, pickle, and fork on orange plate", "On a plate is kept a burger and a bowl of broccoli and a fork.", "There is half a sandwich on an orange plate with a pickle and a bowl of broccoli", "A A bowl and a sandwich on an orange plate on a table.", "A plate has a sandwich, broccoli, and a pickle."], "instances": [{"category": "sandwich", "bbox": [0.436, 0.155, 0.805, 0.859]}, {"category": "sandwich", "bbox": [0.311, 0.006, 0.748, 0.293]}, {"category": "fork", "bbox": [0.0, 0.665, 0.578, 0.876]}, {"category": "bowl", "bbox": [0.002, 0.263, 0.487, 0.744]}, {"category": "bowl", "bbox": [0.708, 0.003, 0.828, 0.03]}, {"category": "broccoli", "bbox": [0.185, 0.288, 0.366, 0.546]}, {"category": "broccoli", "bbox": [0.017, 0.344, 0.384, 0.654]}, {"category": "broccoli", "bbox": [0.31, 0.191, 0.466, 0.463]}, {"category": "broccoli", "bbox": [0.104, 0.107, 0.285, 0.342]}, {"category": "broccoli", "bbox": [0.092, 0.276, 0.242, 0.442]}, {"category": "dining table", "bbox": [0.002, 0.0, 0.999, 0.987]}]} +{"id": "000000514915", "image": "000000514915.jpg", "captions": ["A large black dog laying on a kitchen floor.", "A dog is laying down on the floor in the home.", "Black dog laying down on the kitchen floor next to it's bowls and toy", "A black dog with a red collar laying on a tiled floor.", "A black dog that is laying on the floor."], "instances": [{"category": "dog", "bbox": [0.087, 0.276, 0.812, 0.792]}, {"category": "bowl", "bbox": [0.437, 0.09, 0.533, 0.213]}, {"category": "bowl", "bbox": [0.537, 0.035, 0.665, 0.141]}]} +{"id": "000000205183", "image": "000000205183.jpg", "captions": ["A duck walking along a paved road next to a patch of grass.", "A close up of a duck walking on a path.", "a duck walks along a cement patch while looking down", "A white duck out of water, walking on the ground.", "A goose standing in the road, looking at the ground."], "instances": [{"category": "bird", "bbox": [0.291, 0.235, 0.859, 0.889]}]} +{"id": "000000534270", "image": "000000534270.jpg", "captions": ["Man and woman with umbrella hats sitting on top of a bridge.", "A couple equipped with umbrella hats taking a break from walking their dog on a bridge on a rainy day.", "Two people in ridiculous looking umbrella hats.", "two people with umbrella hats near one another", "A couple of people wearing umbrella hats next to the ocean."], "instances": [{"category": "dog", "bbox": [0.456, 0.832, 0.6, 0.983]}, {"category": "person", "bbox": [0.433, 0.464, 0.636, 0.975]}, {"category": "person", "bbox": [0.263, 0.321, 0.459, 0.978]}, {"category": "boat", "bbox": [0.912, 0.4, 0.978, 0.433]}, {"category": "boat", "bbox": [0.211, 0.236, 0.478, 0.304]}, {"category": "boat", "bbox": [0.144, 0.328, 0.189, 0.361]}, {"category": "umbrella", "bbox": [0.443, 0.402, 0.607, 0.473]}, {"category": "umbrella", "bbox": [0.325, 0.311, 0.483, 0.432]}, {"category": "umbrella", "bbox": [0.207, 0.738, 0.284, 0.778]}, {"category": "umbrella", "bbox": [0.489, 0.713, 0.649, 0.83]}]} +{"id": "000000408439", "image": "000000408439.jpg", "captions": ["Cliffs rise on the edge of a placid lake.", "A scenic view of a river with a train on the edge of it in the distance.", "A large lake surrounded by beautiful tree covered mountains.", "a landscape scene with water, mountains and trees", "A train on a waterfront track surrounded by mountains."], "instances": [{"category": "train", "bbox": [0.008, 0.591, 0.562, 0.644]}]} +{"id": "000000474253", "image": "000000474253.jpg", "captions": ["A man riding on the back of a horse through a river.", "A person is riding a horse through water.", "Horse and rider crossing waterway during competitive event.", "A woman riding a horse splashes through a large puddle.", "A young man riding a horse through some water."], "instances": [{"category": "horse", "bbox": [0.385, 0.235, 0.651, 0.814]}, {"category": "person", "bbox": [0.396, 0.06, 0.576, 0.675]}, {"category": "person", "bbox": [0.29, 0.148, 0.355, 0.333]}, {"category": "person", "bbox": [0.129, 0.163, 0.212, 0.349]}, {"category": "person", "bbox": [0.005, 0.014, 0.038, 0.165]}, {"category": "person", "bbox": [0.144, 0.011, 0.193, 0.155]}, {"category": "person", "bbox": [0.089, 0.007, 0.133, 0.162]}]} +{"id": "000000098029", "image": "000000098029.jpg", "captions": ["a table with many plates on it with a bread basket", "A table set for four has many foods and fruits on it.", "Several objects displayed on a kitchen table including bread, oranges and plating.", "Several dishes and food items sit on a table.", "An assortment of foods sitting on a round brown table."], "instances": [{"category": "refrigerator", "bbox": [0.013, 0.004, 0.37, 0.317]}, {"category": "bottle", "bbox": [0.467, 0.517, 0.555, 0.638]}, {"category": "bottle", "bbox": [0.602, 0.536, 0.658, 0.609]}, {"category": "chair", "bbox": [0.747, 0.367, 1.0, 0.592]}, {"category": "chair", "bbox": [0.044, 0.368, 0.358, 0.544]}, {"category": "cup", "bbox": [0.296, 0.465, 0.359, 0.54]}, {"category": "cup", "bbox": [0.709, 0.67, 0.782, 0.736]}, {"category": "cup", "bbox": [0.213, 0.684, 0.294, 0.753]}, {"category": "knife", "bbox": [0.787, 0.699, 0.922, 0.797]}, {"category": "knife", "bbox": [0.161, 0.539, 0.265, 0.584]}, {"category": "spoon", "bbox": [0.813, 0.674, 0.922, 0.759]}, {"category": "spoon", "bbox": [0.156, 0.555, 0.233, 0.587]}, {"category": "spoon", "bbox": [0.596, 0.467, 0.613, 0.509]}, {"category": "bowl", "bbox": [0.241, 0.753, 0.505, 0.935]}, {"category": "banana", "bbox": [0.632, 0.138, 0.718, 0.161]}, {"category": "apple", "bbox": [0.701, 0.152, 0.758, 0.191]}, {"category": "orange", "bbox": [0.607, 0.66, 0.692, 0.716]}, {"category": "orange", "bbox": [0.565, 0.636, 0.611, 0.667]}, {"category": "orange", "bbox": [0.526, 0.624, 0.572, 0.652]}, {"category": "orange", "bbox": [0.61, 0.628, 0.656, 0.657]}, {"category": "orange", "bbox": [0.599, 0.649, 0.643, 0.677]}, {"category": "dining table", "bbox": [0.013, 0.439, 0.964, 0.986]}, {"category": "cup", "bbox": [0.612, 0.489, 0.669, 0.548]}, {"category": "knife", "bbox": [0.605, 0.457, 0.638, 0.53]}, {"category": "apple", "bbox": [0.502, 0.137, 0.537, 0.159]}, {"category": "orange", "bbox": [0.54, 0.135, 0.563, 0.151]}, {"category": "orange", "bbox": [0.527, 0.129, 0.554, 0.142]}, {"category": "orange", "bbox": [0.611, 0.155, 0.641, 0.171]}, {"category": "chair", "bbox": [0.0, 0.843, 0.29, 0.989]}, {"category": "cup", "bbox": [0.353, 0.469, 0.411, 0.511]}, {"category": "cup", "bbox": [0.609, 0.716, 0.682, 0.786]}, {"category": "orange", "bbox": [0.638, 0.158, 0.679, 0.177]}, {"category": "cake", "bbox": [0.38, 0.821, 0.481, 0.895]}, {"category": "chair", "bbox": [0.79, 0.747, 1.0, 1.0]}, {"category": "bottle", "bbox": [0.719, 0.55, 0.769, 0.616]}, {"category": "bottle", "bbox": [0.795, 0.546, 0.873, 0.613]}, {"category": "knife", "bbox": [0.17, 0.799, 0.264, 0.88]}, {"category": "cup", "bbox": [0.317, 0.695, 0.391, 0.752]}]} +{"id": "000000294073", "image": "000000294073.jpg", "captions": ["A woman and a man standing between two brown horses.", "A COUPLE WEARING YELLOW DRESS STANDING NEAR TWO HORSES.", "An older couple stands between two horses.", "A man and a woman standing with two horses", "A man and a woman stand in between two horses."], "instances": [{"category": "horse", "bbox": [0.0, 0.052, 0.49, 0.989]}, {"category": "horse", "bbox": [0.632, 0.23, 1.0, 0.989]}, {"category": "person", "bbox": [0.425, 0.326, 0.696, 0.987]}, {"category": "person", "bbox": [0.627, 0.203, 0.828, 0.986]}, {"category": "book", "bbox": [0.525, 0.597, 0.644, 0.833]}]} +{"id": "000000203629", "image": "000000203629.jpg", "captions": ["A man on a cell phone in a public area holding his thumb up.", "A group of people gathered inside of a room.", "A man on his cellphone posing for a picture.", "A man giving a thumbs up while on a cell phone.", "The man is giving a thumbs up while on his phone."], "instances": [{"category": "cell phone", "bbox": [0.43, 0.459, 0.449, 0.503]}, {"category": "cup", "bbox": [0.756, 0.838, 0.865, 0.98]}, {"category": "person", "bbox": [0.232, 0.317, 0.603, 0.98]}, {"category": "person", "bbox": [0.602, 0.405, 1.0, 0.999]}, {"category": "person", "bbox": [0.003, 0.339, 0.313, 0.987]}, {"category": "person", "bbox": [0.164, 0.379, 0.258, 0.733]}, {"category": "person", "bbox": [0.564, 0.36, 0.673, 0.645]}, {"category": "person", "bbox": [0.241, 0.379, 0.336, 0.512]}, {"category": "person", "bbox": [0.682, 0.372, 0.736, 0.502]}, {"category": "person", "bbox": [0.654, 0.428, 0.734, 0.536]}, {"category": "person", "bbox": [0.718, 0.368, 0.787, 0.508]}, {"category": "person", "bbox": [0.148, 0.362, 0.205, 0.529]}, {"category": "person", "bbox": [0.001, 0.431, 0.044, 0.564]}, {"category": "cup", "bbox": [0.901, 0.808, 0.995, 0.982]}]} +{"id": "000000119876", "image": "000000119876.jpg", "captions": ["A man dressed loudly is using his cell phone.", "A man talking on the phone while he walks down the street.", "A man with pink hair talking on a cell phone.", "A man in a purple shirt and tie and purple hair.", "a man colored his hair in purple walking on the road"], "instances": [{"category": "bicycle", "bbox": [0.525, 0.222, 0.924, 0.608]}, {"category": "bicycle", "bbox": [0.895, 0.249, 1.0, 0.642]}, {"category": "person", "bbox": [0.0, 0.0, 0.738, 1.0]}, {"category": "tie", "bbox": [0.319, 0.255, 0.423, 0.638]}, {"category": "cell phone", "bbox": [0.411, 0.13, 0.426, 0.161]}, {"category": "handbag", "bbox": [0.369, 0.205, 0.575, 0.839]}]} +{"id": "000000164255", "image": "000000164255.jpg", "captions": ["An umbrella that is standing in the sand.", "An umbrella is stuck in the sand on the beach.", "a colorful striped umbrella on the beach near the ocean", "A colorful umbrella is set up at the beach.", "The colorful umbrella is sitting by the beach,"], "instances": [{"category": "umbrella", "bbox": [0.0, 0.101, 0.567, 0.575]}]} +{"id": "000000192817", "image": "000000192817.jpg", "captions": ["A view from a window high up in the sky.", "A bunch of mountains seen from a plane window.", "The window from a plane overlooking the ground.", "The view of a mountain area from an airplane window.", "An aerial view of mountains and lakes from an airplane window."], "instances": []} +{"id": "000000258285", "image": "000000258285.jpg", "captions": ["Two large passenger jets flying over a beach filled with birds.", "A plane is flying over a bird filed lake", "Two airplanes are in the sky over blue water.", "An airplane landing over an airplane on the ground.", "A photo of two plans with water and birds surrounding it , one plane in the air one one the ground."], "instances": [{"category": "bird", "bbox": [0.507, 0.941, 0.536, 0.973]}, {"category": "bird", "bbox": [0.304, 0.933, 0.315, 0.95]}, {"category": "bird", "bbox": [0.129, 0.885, 0.143, 0.912]}, {"category": "bird", "bbox": [0.158, 0.851, 0.165, 0.87]}, {"category": "bird", "bbox": [0.404, 0.839, 0.429, 0.864]}, {"category": "bird", "bbox": [0.498, 0.833, 0.513, 0.861]}, {"category": "airplane", "bbox": [0.276, 0.085, 0.825, 0.316]}, {"category": "airplane", "bbox": [0.478, 0.252, 0.983, 0.495]}, {"category": "bird", "bbox": [0.552, 0.828, 0.564, 0.844]}, {"category": "bird", "bbox": [0.789, 0.812, 0.798, 0.836]}, {"category": "bird", "bbox": [0.927, 0.82, 0.936, 0.838]}, {"category": "bird", "bbox": [0.65, 0.828, 0.664, 0.849]}, {"category": "bird", "bbox": [0.752, 0.81, 0.763, 0.83]}, {"category": "bird", "bbox": [0.841, 0.817, 0.852, 0.828]}, {"category": "bird", "bbox": [0.292, 0.849, 0.311, 0.868]}, {"category": "bird", "bbox": [0.005, 0.727, 0.981, 0.998]}]} +{"id": "000000506483", "image": "000000506483.jpg", "captions": ["An art installation is placed by a street.", "People sit near a display of large artworks including an oversize bench and painted feline heads.", "Looking down on a giant rocking bench and large animal heads.", "An over sized wooden bench next to two massive animal art sculptures.", "artistic sculptures and images on a city street"], "instances": [{"category": "car", "bbox": [0.656, 0.939, 0.933, 1.0]}, {"category": "person", "bbox": [0.08, 0.664, 0.147, 0.805]}, {"category": "person", "bbox": [0.154, 0.646, 0.217, 0.821]}, {"category": "bench", "bbox": [0.316, 0.124, 0.951, 0.635]}, {"category": "backpack", "bbox": [0.062, 0.701, 0.097, 0.769]}, {"category": "person", "bbox": [0.0, 0.132, 0.031, 0.197]}]} +{"id": "000000502168", "image": "000000502168.jpg", "captions": ["a fleet of naval ships in the ocean", "A group of men on aircraft carrier with other boats in the distance.", "A large ship floating in the ocean next to other ships.", "Several men on a boat looking over the side.", "The men wear hardhats as they work on the aircraft carrier."], "instances": [{"category": "boat", "bbox": [0.634, 0.292, 1.0, 0.982]}, {"category": "person", "bbox": [0.675, 0.507, 0.736, 0.731]}, {"category": "person", "bbox": [0.684, 0.737, 0.817, 1.0]}, {"category": "person", "bbox": [0.803, 0.691, 0.883, 0.932]}, {"category": "person", "bbox": [0.741, 0.56, 0.798, 0.767]}, {"category": "person", "bbox": [0.924, 0.269, 0.951, 0.367]}, {"category": "boat", "bbox": [0.079, 0.171, 0.172, 0.231]}, {"category": "boat", "bbox": [0.863, 0.131, 0.961, 0.239]}, {"category": "boat", "bbox": [0.435, 0.288, 0.46, 0.313]}, {"category": "boat", "bbox": [0.591, 0.186, 0.605, 0.222]}, {"category": "person", "bbox": [0.451, 0.289, 0.455, 0.296]}, {"category": "person", "bbox": [0.446, 0.29, 0.451, 0.296]}, {"category": "person", "bbox": [0.872, 0.627, 0.957, 0.966]}, {"category": "person", "bbox": [0.44, 0.288, 0.446, 0.3]}]} +{"id": "000000319432", "image": "000000319432.jpg", "captions": ["Man holding two shirts with luggage and window", "A man holding clothes on a hanger with a suitcase in front of him.", "A man show a red and a white clothing hangers.", "A man holding his garment bags in both hands", "A man holding up some clothes in some hanger bags."], "instances": [{"category": "person", "bbox": [0.0, 0.092, 0.776, 0.852]}, {"category": "suitcase", "bbox": [0.153, 0.798, 0.587, 1.0]}]} +{"id": "000000131019", "image": "000000131019.jpg", "captions": ["Two zebras and two monkeys walking on the grass.", "Two giraffes and another animal are on green grass.", "A baboon and two zebras grazing on the savannah.", "A baboon and its baby eat by two zebras in the grass", "Monkey standing behind two zebras as they graze."], "instances": [{"category": "zebra", "bbox": [0.367, 0.258, 0.834, 0.646]}, {"category": "zebra", "bbox": [0.161, 0.13, 0.396, 0.375]}, {"category": "bird", "bbox": [0.309, 0.138, 0.34, 0.163]}]} diff --git a/llava/eval/table/model.jsonl b/llava/eval/table/model.jsonl new file mode 100644 index 0000000..61b6de1 --- /dev/null +++ b/llava/eval/table/model.jsonl @@ -0,0 +1,5 @@ +{"model_id": "vicuna-13b:20230322-clean-lang", "model_name": "vicuna-13b", "model_version": "20230322-clean-lang", "model_metadata": "vicuna-13b-20230322-clean-lang"} +{"model_id": "alpaca-13b:v1", "model_name": "alpaca-13b", "model_version": "v1", "model_metadata": "alpaca-13b"} +{"model_id": "llama-13b:v1", "model_name": "llama-13b", "model_version": "v1", "model_metadata": "hf-llama-13b"} +{"model_id": "bard:20230327", "model_name": "bard", "model_version": "20230327", "model_metadata": "Google Bard 20230327"} +{"model_id": "gpt-3.5-turbo:20230327", "model_name": "gpt-3.5-turbo", "model_version": "20230327", "model_metadata": "OpenAI ChatGPT gpt-3.5-turbo Chat Completion"} diff --git a/llava/eval/table/prompt.jsonl b/llava/eval/table/prompt.jsonl new file mode 100644 index 0000000..a46845a --- /dev/null +++ b/llava/eval/table/prompt.jsonl @@ -0,0 +1,4 @@ +{"prompt_id": 1, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, "description": "Prompt for general questions"} +{"prompt_id": 2, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "Your task is to evaluate the coding abilities of the above two assistants. They have been asked to implement a program to solve a given problem. Please review their code submissions, paying close attention to their problem-solving approach, code structure, readability, and the inclusion of helpful comments.\n\nPlease ensure that the assistants' submissions:\n\n1. Correctly implement the given problem statement.\n2. Contain accurate and efficient code.\n3. Include clear and concise comments that explain the code's logic and functionality.\n4. Adhere to proper coding standards and best practices.\n\nOnce you have carefully reviewed both submissions, provide detailed feedback on their strengths and weaknesses, along with any suggestions for improvement. You should first output a single line containing two scores on the scale of 1-10 (1: no code/no sense; 10: perfect) for Assistant 1 and 2, respectively. Then give extra comments starting from the next line."}, "description": "Prompt for coding questions"} +{"prompt_id": 3, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "We would like to request your feedback on the mathematical proficiency of two AI assistants regarding the given user question.\nFirstly, please solve the problem independently, without referring to the answers provided by Assistant 1 and Assistant 2.\nAfterward, please examine the problem-solving process of Assistant 1 and Assistant 2 step-by-step to ensure their correctness, identifying any incorrect steps if present. Your evaluation should take into account not only the answer but also the problem-solving steps.\nFinally, please output a Python tuple containing two numerical scores for Assistant 1 and Assistant 2, ranging from 1 to 10, respectively. If applicable, explain the reasons for any variations in their scores and determine which assistant performed better."}, "description": "Prompt for math questions"} +{"prompt_id": 4, "system_prompt": "You are a helpful and precise assistant for checking the quality of the answer.", "prompt_template": "[Visual Context]\n{context}\n[Question]\n{question}\n\n[Assistant 1]\n{answer_1}\n\n[End of Assistant 1]\n\n[Assistant 2]\n{answer_2}\n\n[End of Assistant 2]\n\n[System]\n{prompt}\n\n", "defaults": {"prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, "description": "Prompt for visual questions"} diff --git a/llava/eval/table/question.jsonl b/llava/eval/table/question.jsonl new file mode 100644 index 0000000..c946b8f --- /dev/null +++ b/llava/eval/table/question.jsonl @@ -0,0 +1,80 @@ +{"question_id": 1, "text": "How can I improve my time management skills?", "category": "generic"} +{"question_id": 2, "text": "What are the most effective ways to deal with stress?", "category": "generic"} +{"question_id": 3, "text": "What are the main differences between Python and JavaScript programming languages?", "category": "generic"} +{"question_id": 4, "text": "How can I increase my productivity while working from home?", "category": "generic"} +{"question_id": 5, "text": "Can you explain the basics of quantum computing?", "category": "generic"} +{"question_id": 6, "text": "What are the differences between plant-based and animal-based protein sources?", "category": "generic"} +{"question_id": 7, "text": "How can I develop my critical thinking skills?", "category": "generic"} +{"question_id": 8, "text": "What are the major challenges faced by the education sector today?", "category": "generic"} +{"question_id": 9, "text": "What are the primary factors that influence consumer behavior?", "category": "generic"} +{"question_id": 10, "text": "What are the most effective strategies for conflict resolution in the workplace?", "category": "generic"} +{"question_id": 11, "text": "What are some potential implications of using a single-use plastic bottle versus a reusable bottle on both the environment and human health?", "category": "knowledge"} +{"question_id": 12, "text": "What factors would you consider when designing an inclusive and accessible public transportation system?", "category": "knowledge"} +{"question_id": 13, "text": "How can governments utilize fiscal and monetary policies to combat economic recessions?", "category": "knowledge"} +{"question_id": 14, "text": "How do language and cultural barriers affect the way people communicate and form relationships in multicultural societies?", "category": "knowledge"} +{"question_id": 15, "text": "Describe a scenario where artificial intelligence could be used to improve the quality and efficiency of healthcare delivery.", "category": "knowledge"} +{"question_id": 16, "text": "Explain the process of gene editing using CRISPR-Cas9 technology, and discuss its potential applications and ethical implications.", "category": "knowledge"} +{"question_id": 17, "text": "How do vaccinations work to protect individuals and communities from infectious diseases, and what is herd immunity?", "category": "knowledge"} +{"question_id": 18, "text": "How do social media platforms influence the way people consume and share news, and what are the potential implications for the spread of misinformation?", "category": "knowledge"} +{"question_id": 19, "text": "How do cultural, social, and economic factors influence people's food choices, and how can this knowledge be used to promote healthier diets?", "category": "knowledge"} +{"question_id": 20, "text": "Explain the process of natural selection and how it contributes to the evolution and adaptation of species.", "category": "knowledge"} +{"question_id": 21, "text": "How would you introduce yourself as a medieval knight at a royal banquet?", "category": "roleplay"} +{"question_id": 22, "text": "As a pirate captain, what would you say to your crew to motivate them to search for hidden treasure?", "category": "roleplay"} +{"question_id": 23, "text": "If you were a Shakespearean character, how would you declare your love for someone in a soliloquy?", "category": "roleplay"} +{"question_id": 24, "text": "As a superhero, how would you explain your origin story to a curious child?", "category": "roleplay"} +{"question_id": 25, "text": "Imagine you are a time traveler from the year 3000. What technological advancements would you tell people about?", "category": "roleplay"} +{"question_id": 26, "text": "As a sports commentator, describe the winning play in the final seconds of a championship game.", "category": "roleplay"} +{"question_id": 27, "text": "Pretend to be a world-famous chef. How would you describe your signature dish to a panel of judges?", "category": "roleplay"} +{"question_id": 28, "text": "You are a mountain climber reaching the summit of Mount Everest. Describe your emotions and the view from the top.", "category": "roleplay"} +{"question_id": 29, "text": "As a space colonist on Mars, describe your daily life and the challenges you face living on another planet.", "category": "roleplay"} +{"question_id": 30, "text": "Pretend to be a character in a post-apocalyptic world. Describe how you survive and the allies you encounter.", "category": "roleplay"} +{"question_id": 31, "text": "How can you determine if a restaurant is popular among locals or mainly attracts tourists, and why might this information be useful?", "category": "common-sense"} +{"question_id": 32, "text": "What are some subtle clues that suggest someone is pretending to understand a topic or conversation when they are actually confused or uninformed?", "category": "common-sense"} +{"question_id": 33, "text": "Why might someone choose to use a paper map or ask for directions instead of relying on a GPS device or smartphone app?", "category": "common-sense"} +{"question_id": 34, "text": "How can you determine if a person is genuinely interested in a conversation or simply being polite?", "category": "common-sense"} +{"question_id": 35, "text": "Why might someone prefer to shop at a small, locally-owned business instead of a large chain store, even if the prices are higher?", "category": "common-sense"} +{"question_id": 36, "text": "How can you assess the credibility of a source of information, such as a news article or blog post, without relying solely on the reputation of the author or publisher?", "category": "common-sense"} +{"question_id": 37, "text": "Why do some people enjoy the sensation of being scared, such as by watching horror movies or going on roller coasters, while others avoid these experiences?", "category": "common-sense"} +{"question_id": 38, "text": "How can observing the behavior of other people in a social situation provide clues about cultural norms and expectations?", "category": "common-sense"} +{"question_id": 39, "text": "Do we have a moral obligation to explore space, or should we focus on solving Earth's problems first?", "category": "common-sense"} +{"question_id": 40, "text": "In a world where automation is becoming increasingly prevalent, is it more important to prioritize job creation or technological progress?", "category": "common-sense"} +{"question_id": 41, "text": "How many times does the average human blink in a lifetime? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 42, "text": "How many atoms are in a grain of salt? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 43, "text": "How many lightning strikes occur on Earth each day? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 44, "text": "How many balloons would it take to lift a house like in the movie \"Up\"? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 45, "text": "How many text messages are sent globally in a minute? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 46, "text": "How many words are spoken daily on Earth? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 47, "text": "How many snowflakes fall during a typical winter? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 48, "text": "How many pages are in all the books ever written? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 49, "text": "How many times has the Earth orbited the Sun since the beginning of life? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 50, "text": "How many songs have been recorded throughout history? Try to explain your answer. Your explanation should take the reader through your reasoning step-by-step.", "category": "fermi"} +{"question_id": 51, "text": "What if the Internet had been invented during the Renaissance period?", "category": "counterfactual"} +{"question_id": 52, "text": "What if the Aztecs had successfully repelled the Spanish conquistadors?", "category": "counterfactual"} +{"question_id": 53, "text": "What if the Black Death had not occurred in the 14th century?", "category": "counterfactual"} +{"question_id": 54, "text": "What if Isaac Newton had focused on biology instead of physics?", "category": "counterfactual"} +{"question_id": 55, "text": "What if the Beatles had never formed as a band?", "category": "counterfactual"} +{"question_id": 56, "text": "What if Alan Turing had not cracked the Enigma code during World War II?", "category": "counterfactual"} +{"question_id": 57, "text": "What if the Suez Canal had never been constructed?", "category": "counterfactual"} +{"question_id": 58, "text": "What if the Maya civilization had never mysteriously collapsed?", "category": "counterfactual"} +{"question_id": 59, "text": "What if Christopher Columbus had not discovered the Americas?", "category": "counterfactual"} +{"question_id": 60, "text": "What if Vincent van Gogh had been a successful artist during his lifetime?", "category": "counterfactual"} +{"question_id": 61, "text": "Develop a C++ program that reads a text file line by line and counts the number of occurrences of a specific word in the file.", "category": "coding"} +{"question_id": 62, "text": "Implement a Python function to find the longest common subsequence of two input strings using dynamic programming.", "category": "coding"} +{"question_id": 63, "text": "Implement a regular expression in Python to validate an email address.", "category": "coding"} +{"question_id": 64, "text": "Write a program to find the nth Fibonacci number using dynamic programming.", "category": "coding"} +{"question_id": 65, "text": "Implement a binary search algorithm to find a specific element in a sorted array.", "category": "coding"} +{"question_id": 66, "text": "Implement a queue data structure using two stacks in Python.", "category": "coding"} +{"question_id": 67, "text": "Implement a program to find the common elements in two arrays without using any extra data structures.", "category": "coding"} +{"question_id": 68, "text": "Given that f(x) = 5x^3 - 2x + 3, find the value of f(2).", "category": "math"} +{"question_id": 69, "text": "Solve for x in the equation 3x + 10 = 5(x - 2).", "category": "math"} +{"question_id": 70, "text": "If the endpoints of a line segment are (2, -2) and (10, 4), what is the length of the segment?", "category": "math"} +{"question_id": 71, "text": "Can you help me write a formal email to a potential business partner proposing a joint venture?", "category": "writing"} +{"question_id": 72, "text": "Can you help me write a resignation letter to my current employer, while leaving on good terms and expressing gratitude for the opportunities provided?", "category": "writing"} +{"question_id": 73, "text": "Use an appropriate format to structure a formal letter of recommendation for a student applying to a prestigious graduate program in computer science.", "category": "writing"} +{"question_id": 74, "text": "Write a compelling product launch announcement email to inform our customers of our new software solution.", "category": "writing"} +{"question_id": 75, "text": "Draft an apology email to a customer who experienced a delay in their order, and provide reassurance that the issue has been resolved.", "category": "writing"} +{"question_id": 76, "text": "Write a script for a YouTube video exploring the history and cultural significance of jazz.", "category": "writing"} +{"question_id": 77, "text": "Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions.", "category": "writing"} +{"question_id": 78, "text": "Write a captivating movie review for a recently released science fiction film, discussing its plot, characters, and special effects.", "category": "writing"} +{"question_id": 79, "text": "Structure a podcast script for an episode discussing the influence of streaming platforms on the music industry.", "category": "writing"} +{"question_id": 80, "text": "Write a symphony concert review, discussing the orchestra's performance and overall audience experience.", "category": "writing"} diff --git a/llava/eval/table/reviewer.jsonl b/llava/eval/table/reviewer.jsonl new file mode 100644 index 0000000..e96127c --- /dev/null +++ b/llava/eval/table/reviewer.jsonl @@ -0,0 +1,4 @@ +{"reviewer_id": "gpt-4-0328-default", "prompt_id": 1, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for general questions"} +{"reviewer_id": "gpt-4-0328-coding", "prompt_id": 2, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for coding questions"} +{"reviewer_id": "gpt-4-0328-math", "prompt_id": 3, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for math questions"} +{"reviewer_id": "gpt-4-0417-visual", "prompt_id": 4, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for math questions"} diff --git a/llava/eval/table/rule.json b/llava/eval/table/rule.json new file mode 100644 index 0000000..26c7f4e --- /dev/null +++ b/llava/eval/table/rule.json @@ -0,0 +1,11 @@ +{ + "coding": {"role": "Assistant", "prompt": "Your task is to evaluate the coding abilities of the above two assistants. They have been asked to implement a program to solve a given problem. Please review their code submissions, paying close attention to their problem-solving approach, code structure, readability, and the inclusion of helpful comments.\n\nPlease ensure that the assistants' submissions:\n\n1. Correctly implement the given problem statement.\n2. Contain accurate and efficient code.\n3. Include clear and concise comments that explain the code's logic and functionality.\n4. Adhere to proper coding standards and best practices.\n\nOnce you have carefully reviewed both submissions, provide detailed feedback on their strengths and weaknesses, along with any suggestions for improvement. You should first output a single line containing two scores on the scale of 1-10 (1: no code/no sense; 10: perfect) for Assistant 1 and 2, respectively. Then give extra comments starting from the next line."}, + "math": {"role": "Assistant", "prompt": "We would like to request your feedback on the mathematical proficiency of two AI assistants regarding the given user question.\nFirstly, please solve the problem independently, without referring to the answers provided by Assistant 1 and Assistant 2.\nAfterward, please examine the problem-solving process of Assistant 1 and Assistant 2 step-by-step to ensure their correctness, identifying any incorrect steps if present. Your evaluation should take into account not only the answer but also the problem-solving steps.\nFinally, please output a Python tuple containing two numerical scores for Assistant 1 and Assistant 2, ranging from 1 to 10, respectively. If applicable, explain the reasons for any variations in their scores and determine which assistant performed better."}, + "default": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, + "conv": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, + "detail": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, + "complex": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, + "llava_bench_conv": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, + "llava_bench_detail": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}, + "llava_bench_complex": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."} +} \ No newline at end of file diff --git a/llava/eval/video/eval_benchmark_1_correctness.py b/llava/eval/video/eval_benchmark_1_correctness.py new file mode 100644 index 0000000..324be0b --- /dev/null +++ b/llava/eval/video/eval_benchmark_1_correctness.py @@ -0,0 +1,191 @@ +import openai +import os +import argparse +import json +import ast +from multiprocessing.pool import Pool + + +def parse_args(): + parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3") + parser.add_argument("--pred_path", required=True, help="The path to file containing prediction.") + parser.add_argument("--output_dir", required=True, help="The path to save annotation json files.") + parser.add_argument("--output_json", required=True, help="The path to save annotation final combined json file.") + parser.add_argument("--api_key", required=True, help="OpenAI API key.") + parser.add_argument("--api_base", default="", type=str, help="OpenAI API base.") + parser.add_argument("--num_tasks", required=True, type=int, help="Number of splits.") + args = parser.parse_args() + return args + + +def annotate(prediction_set, caption_files, output_dir, args): + """ + Evaluates question and answer pairs using GPT-3 + Returns a score for correctness. + """ + # Set the OpenAI API key. + openai.api_key = args.api_key + if args.api_base is not None: + openai.api_base = args.api_base + for file in caption_files: + key = file[:-5] # Strip file extension + qa_set = prediction_set[key] + question = qa_set['q'] + answer = qa_set['a'] + pred = qa_set['pred'] + try: + # Compute the correctness score + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": + "You are an intelligent chatbot designed for evaluating the factual accuracy of generative outputs for video-based question-answer pairs. " + "Your task is to compare the predicted answer with the correct answer and determine if they are factually consistent. Here's how you can accomplish the task:" + "------" + "##INSTRUCTIONS: " + "- Focus on the factual consistency between the predicted answer and the correct answer. The predicted answer should not contain any misinterpretations or misinformation.\n" + "- The predicted answer must be factually accurate and align with the video content.\n" + "- Consider synonyms or paraphrases as valid matches.\n" + "- Evaluate the factual accuracy of the prediction compared to the answer." + }, + { + "role": "user", + "content": + "Please evaluate the following video-based question-answer pair:\n\n" + f"Question: {question}\n" + f"Correct Answer: {answer}\n" + f"Predicted Answer: {pred}\n\n" + "Provide your evaluation only as a factual accuracy score where the factual accuracy score is an integer value between 0 and 5, with 5 indicating the highest level of factual consistency. " + "Please generate the response in the form of a Python dictionary string with keys 'score', where its value is the factual accuracy score in INTEGER, not STRING." + "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " + "For example, your response should look like this: {''score': 4.8}." + } + ] + ) + # Convert response to a Python dictionary. + response_message = completion["choices"][0]["message"]["content"] + response_dict = ast.literal_eval(response_message) + result_qa_pair = [response_dict, qa_set] + + # Save the question-answer pairs to a json file. + with open(f"{output_dir}/{key}.json", "w") as f: + json.dump(result_qa_pair, f) + + except Exception as e: + print(f"Error processing file '{key}': {e}") + + +def main(): + """ + Main function to control the flow of the program. + """ + # Parse arguments. + args = parse_args() + + file = open(args.pred_path) + pred_contents = json.load(file) + + # Dictionary to store the count of occurrences for each video_id + video_id_counts = {} + new_pred_contents = [] + + # Iterate through each sample in pred_contents + for sample in pred_contents: + video_id = sample['video_name'] + if video_id in video_id_counts: + video_id_counts[video_id] += 1 + else: + video_id_counts[video_id] = 0 + + # Create a new sample with the modified key + new_sample = sample + new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}" + new_pred_contents.append(new_sample) + + # Generating list of id's and corresponding files + id_list = [x['video_name'] for x in new_pred_contents] + caption_files = [f"{id}.json" for id in id_list] + + output_dir = args.output_dir + # Generate output directory if not exists. + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # Preparing dictionary of question-answer sets + prediction_set = {} + for sample in new_pred_contents: + id = sample['video_name'] + question = sample['Q'] + answer = sample['A'] + pred = sample['pred'] + qa_set = {"q": question, "a": answer, "pred": pred} + prediction_set[id] = qa_set + + # Set the OpenAI API key. + openai.api_key = args.api_key + num_tasks = args.num_tasks + + # While loop to ensure that all captions are processed. + while True: + try: + # Files that have not been processed yet. + completed_files = os.listdir(output_dir) + print(f"completed_files: {len(completed_files)}") + + # Files that have not been processed yet. + incomplete_files = [f for f in caption_files if f not in completed_files] + print(f"incomplete_files: {len(incomplete_files)}") + + # Break the loop when there are no incomplete files + if len(incomplete_files) == 0: + break + if len(incomplete_files) <= num_tasks: + num_tasks = 1 + + # Split tasks into parts. + part_len = len(incomplete_files) // num_tasks + all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)] + task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts] + + # Use a pool of workers to process the files in parallel. + with Pool() as pool: + pool.starmap(annotate, task_args) + + except Exception as e: + print(f"Error: {e}") + + # Combine all the processed files into one + combined_contents = {} + json_path = args.output_json + + # Iterate through json files + for file_name in os.listdir(output_dir): + if file_name.endswith(".json"): + file_path = os.path.join(output_dir, file_name) + with open(file_path, "r") as json_file: + content = json.load(json_file) + combined_contents[file_name[:-5]] = content + + # Write combined content to a json file + with open(json_path, "w") as json_file: + json.dump(combined_contents, json_file) + print("All evaluation completed!") + + # Calculate average score + score_sum = 0 + count = 0 + for key, result in combined_contents.items(): + count += 1 + score_match = result[0]['score'] + score = int(score_match) + score_sum += score + average_score = score_sum / count + + print("Average score for correctness:", average_score) + + +if __name__ == "__main__": + main() + diff --git a/llava/eval/video/eval_benchmark_2_detailed_orientation.py b/llava/eval/video/eval_benchmark_2_detailed_orientation.py new file mode 100644 index 0000000..7a852ff --- /dev/null +++ b/llava/eval/video/eval_benchmark_2_detailed_orientation.py @@ -0,0 +1,191 @@ +import openai +import os +import argparse +import json +import ast +from multiprocessing.pool import Pool + + +def parse_args(): + parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3") + parser.add_argument("--pred_path", required=True, help="The path to file containing prediction.") + parser.add_argument("--output_dir", required=True, help="The path to save annotation json files.") + parser.add_argument("--output_json", required=True, help="The path to save annotation final combined json file.") + parser.add_argument("--api_key", required=True, help="OpenAI API key.") + parser.add_argument("--api_base", default="", type=str, help="OpenAI API base.") + parser.add_argument("--num_tasks", required=True, type=int, help="Number of splits.") + args = parser.parse_args() + return args + + +def annotate(prediction_set, caption_files, output_dir, args): + """ + Evaluates question and answer pairs using GPT-3 and + returns a score for detailed orientation. + """ + # Set the OpenAI API key. + openai.api_key = args.api_key + if args.api_base is not None: + openai.api_base = args.api_base + for file in caption_files: + key = file[:-5] # Strip file extension + qa_set = prediction_set[key] + question = qa_set['q'] + answer = qa_set['a'] + pred = qa_set['pred'] + try: + # Compute the detailed-orientation score + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": + "You are an intelligent chatbot designed for evaluating the detail orientation of generative outputs for video-based question-answer pairs. " + "Your task is to compare the predicted answer with the correct answer and determine its level of detail, considering both completeness and specificity. Here's how you can accomplish the task:" + "------" + "##INSTRUCTIONS: " + "- Check if the predicted answer covers all major points from the video. The response should not leave out any key aspects.\n" + "- Evaluate whether the predicted answer includes specific details rather than just generic points. It should provide comprehensive information that is tied to specific elements of the video.\n" + "- Consider synonyms or paraphrases as valid matches.\n" + "- Provide a single evaluation score that reflects the level of detail orientation of the prediction, considering both completeness and specificity." + }, + { + "role": "user", + "content": + "Please evaluate the following video-based question-answer pair:\n\n" + f"Question: {question}\n" + f"Correct Answer: {answer}\n" + f"Predicted Answer: {pred}\n\n" + "Provide your evaluation only as a detail orientation score where the detail orientation score is an integer value between 0 and 5, with 5 indicating the highest level of detail orientation. " + "Please generate the response in the form of a Python dictionary string with keys 'score', where its value is the detail orientation score in INTEGER, not STRING." + "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " + "For example, your response should look like this: {''score': 4.8}." + } + ] + ) + # Convert response to a Python dictionary. + response_message = completion["choices"][0]["message"]["content"] + response_dict = ast.literal_eval(response_message) + result_qa_pair = [response_dict, qa_set] + + # Save the question-answer pairs to a json file. + with open(f"{output_dir}/{key}.json", "w") as f: + json.dump(result_qa_pair, f) + + except Exception as e: + print(f"Error processing file '{key}': {e}") + + +def main(): + """ + Main function to control the flow of the program. + """ + # Parse arguments. + args = parse_args() + + file = open(args.pred_path) + pred_contents = json.load(file) + + # Dictionary to store the count of occurrences for each video_id + video_id_counts = {} + new_pred_contents = [] + + # Iterate through each sample in pred_contents + for sample in pred_contents: + video_id = sample['video_name'] + if video_id in video_id_counts: + video_id_counts[video_id] += 1 + else: + video_id_counts[video_id] = 0 + + # Create a new sample with the modified key + new_sample = sample + new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}" + new_pred_contents.append(new_sample) + + # Generating list of id's and corresponding files + id_list = [x['video_name'] for x in new_pred_contents] + caption_files = [f"{id}.json" for id in id_list] + + output_dir = args.output_dir + # Generate output directory if not exists. + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # Preparing dictionary of question-answer sets + prediction_set = {} + for sample in new_pred_contents: + id = sample['video_name'] + question = sample['Q'] + answer = sample['A'] + pred = sample['pred'] + qa_set = {"q": question, "a": answer, "pred": pred} + prediction_set[id] = qa_set + + # Set the OpenAI API key. + openai.api_key = args.api_key + num_tasks = args.num_tasks + + # While loop to ensure that all captions are processed. + while True: + try: + # Files that have not been processed yet. + completed_files = os.listdir(output_dir) + print(f"completed_files: {len(completed_files)}") + + # Files that have not been processed yet. + incomplete_files = [f for f in caption_files if f not in completed_files] + print(f"incomplete_files: {len(incomplete_files)}") + + # Break the loop when there are no incomplete files + if len(incomplete_files) == 0: + break + if len(incomplete_files) <= num_tasks: + num_tasks = 1 + + # Split tasks into parts. + part_len = len(incomplete_files) // num_tasks + all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)] + task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts] + + # Use a pool of workers to process the files in parallel. + with Pool() as pool: + pool.starmap(annotate, task_args) + + except Exception as e: + print(f"Error: {e}") + + # Combine all the processed files into one + combined_contents = {} + json_path = args.output_json + + # Iterate through json files + for file_name in os.listdir(output_dir): + if file_name.endswith(".json"): + file_path = os.path.join(output_dir, file_name) + with open(file_path, "r") as json_file: + content = json.load(json_file) + combined_contents[file_name[:-5]] = content + + # Write combined content to a json file + with open(json_path, "w") as json_file: + json.dump(combined_contents, json_file) + print("All evaluation completed!") + + # Calculate average score + score_sum = 0 + count = 0 + for key, result in combined_contents.items(): + count += 1 + score_match = result[0]['score'] + score = int(score_match) + score_sum += score + average_score = score_sum / count + + print("Average score for detailed orientation:", average_score) + + +if __name__ == "__main__": + main() + diff --git a/llava/eval/video/eval_benchmark_3_context.py b/llava/eval/video/eval_benchmark_3_context.py new file mode 100644 index 0000000..bb8d260 --- /dev/null +++ b/llava/eval/video/eval_benchmark_3_context.py @@ -0,0 +1,191 @@ +import openai +import os +import argparse +import json +import ast +from multiprocessing.pool import Pool + + +def parse_args(): + parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3") + parser.add_argument("--pred_path", required=True, help="The path to file containing prediction.") + parser.add_argument("--output_dir", required=True, help="The path to save annotation json files.") + parser.add_argument("--output_json", required=True, help="The path to save annotation final combined json file.") + parser.add_argument("--api_key", required=True, help="OpenAI API key.") + parser.add_argument("--api_base", default="", type=str, help="OpenAI API base.") + parser.add_argument("--num_tasks", required=True, type=int, help="Number of splits.") + args = parser.parse_args() + return args + + +def annotate(prediction_set, caption_files, output_dir, args): + """ + Evaluates question and answer pairs using GPT-3 and + returns a score for contextual understanding. + """ + # Set the OpenAI API key. + openai.api_key = args.api_key + if args.api_base is not None: + openai.api_base = args.api_base + for file in caption_files: + key = file[:-5] # Strip file extension + qa_set = prediction_set[key] + question = qa_set['q'] + answer = qa_set['a'] + pred = qa_set['pred'] + try: + # Compute the contextual understanding score + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": + "You are an intelligent chatbot designed for evaluating the contextual understanding of generative outputs for video-based question-answer pairs. " + "Your task is to compare the predicted answer with the correct answer and determine if the generated response aligns with the overall context of the video content. Here's how you can accomplish the task:" + "------" + "##INSTRUCTIONS: " + "- Evaluate whether the predicted answer aligns with the overall context of the video content. It should not provide information that is out of context or misaligned.\n" + "- The predicted answer must capture the main themes and sentiments of the video.\n" + "- Consider synonyms or paraphrases as valid matches.\n" + "- Provide your evaluation of the contextual understanding of the prediction compared to the answer." + }, + { + "role": "user", + "content": + "Please evaluate the following video-based question-answer pair:\n\n" + f"Question: {question}\n" + f"Correct Answer: {answer}\n" + f"Predicted Answer: {pred}\n\n" + "Provide your evaluation only as a contextual understanding score where the contextual understanding score is an integer value between 0 and 5, with 5 indicating the highest level of contextual understanding. " + "Please generate the response in the form of a Python dictionary string with keys 'score', where its value is contextual understanding score in INTEGER, not STRING." + "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " + "For example, your response should look like this: {''score': 4.8}." + } + ] + ) + # Convert response to a Python dictionary. + response_message = completion["choices"][0]["message"]["content"] + response_dict = ast.literal_eval(response_message) + result_qa_pair = [response_dict, qa_set] + + # Save the question-answer pairs to a json file. + with open(f"{output_dir}/{key}.json", "w") as f: + json.dump(result_qa_pair, f) + + except Exception as e: + print(f"Error processing file '{key}': {e}") + + +def main(): + """ + Main function to control the flow of the program. + """ + # Parse arguments. + args = parse_args() + + file = open(args.pred_path) + pred_contents = json.load(file) + + # Dictionary to store the count of occurrences for each video_id + video_id_counts = {} + new_pred_contents = [] + + # Iterate through each sample in pred_contents + for sample in pred_contents: + video_id = sample['video_name'] + if video_id in video_id_counts: + video_id_counts[video_id] += 1 + else: + video_id_counts[video_id] = 0 + + # Create a new sample with the modified key + new_sample = sample + new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}" + new_pred_contents.append(new_sample) + + # Generating list of id's and corresponding files + id_list = [x['video_name'] for x in new_pred_contents] + caption_files = [f"{id}.json" for id in id_list] + + output_dir = args.output_dir + # Generate output directory if not exists. + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # Preparing dictionary of question-answer sets + prediction_set = {} + for sample in new_pred_contents: + id = sample['video_name'] + question = sample['Q'] + answer = sample['A'] + pred = sample['pred'] + qa_set = {"q": question, "a": answer, "pred": pred} + prediction_set[id] = qa_set + + # Set the OpenAI API key. + openai.api_key = args.api_key + num_tasks = args.num_tasks + + # While loop to ensure that all captions are processed. + while True: + try: + # Files that have not been processed yet. + completed_files = os.listdir(output_dir) + print(f"completed_files: {len(completed_files)}") + + # Files that have not been processed yet. + incomplete_files = [f for f in caption_files if f not in completed_files] + print(f"incomplete_files: {len(incomplete_files)}") + + # Break the loop when there are no incomplete files + if len(incomplete_files) == 0: + break + if len(incomplete_files) <= num_tasks: + num_tasks = 1 + + # Split tasks into parts. + part_len = len(incomplete_files) // num_tasks + all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)] + task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts] + + # Use a pool of workers to process the files in parallel. + with Pool() as pool: + pool.starmap(annotate, task_args) + + except Exception as e: + print(f"Error: {e}") + + # Combine all the processed files into one + combined_contents = {} + json_path = args.output_json + + # Iterate through json files + for file_name in os.listdir(output_dir): + if file_name.endswith(".json"): + file_path = os.path.join(output_dir, file_name) + with open(file_path, "r") as json_file: + content = json.load(json_file) + combined_contents[file_name[:-5]] = content + + # Write combined content to a json file + with open(json_path, "w") as json_file: + json.dump(combined_contents, json_file) + print("All evaluation completed!") + + # Calculate average score + score_sum = 0 + count = 0 + for key, result in combined_contents.items(): + count += 1 + score_match = result[0]['score'] + score = int(score_match) + score_sum += score + average_score = score_sum / count + + print("Average score for contextual understanding:", average_score) + + +if __name__ == "__main__": + main() + diff --git a/llava/eval/video/eval_benchmark_4_temporal.py b/llava/eval/video/eval_benchmark_4_temporal.py new file mode 100644 index 0000000..46d3112 --- /dev/null +++ b/llava/eval/video/eval_benchmark_4_temporal.py @@ -0,0 +1,190 @@ +import openai +import os +import argparse +import json +import ast +from multiprocessing.pool import Pool + + +def parse_args(): + parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3") + parser.add_argument("--pred_path", required=True, help="The path to file containing prediction.") + parser.add_argument("--output_dir", required=True, help="The path to save annotation json files.") + parser.add_argument("--output_json", required=True, help="The path to save annotation final combined json file.") + parser.add_argument("--api_key", required=True, help="OpenAI API key.") + parser.add_argument("--api_base", default="", type=str, help="OpenAI API base.") + parser.add_argument("--num_tasks", required=True, type=int, help="Number of splits.") + args = parser.parse_args() + return args + + +def annotate(prediction_set, caption_files, output_dir, args): + """ + Evaluates question and answer pairs using GPT-3 and + returns a score for temporal understanding. + """ + # Set the OpenAI API key. + openai.api_key = args.api_key + if args.api_base is not None: + openai.api_base = args.api_base + for file in caption_files: + key = file[:-5] # Strip file extension + qa_set = prediction_set[key] + question = qa_set['q'] + answer = qa_set['a'] + pred = qa_set['pred'] + try: + # Compute the temporal understanding score + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": + "You are an intelligent chatbot designed for evaluating the temporal understanding of generative outputs for video-based question-answer pairs. " + "Your task is to compare the predicted answer with the correct answer and determine if they correctly reflect the temporal sequence of events in the video content. Here's how you can accomplish the task:" + "------" + "##INSTRUCTIONS: " + "- Focus on the temporal consistency between the predicted answer and the correct answer. The predicted answer should correctly reflect the sequence of events or details as they are presented in the video content.\n" + "- Consider synonyms or paraphrases as valid matches, but only if the temporal order is maintained.\n" + "- Evaluate the temporal accuracy of the prediction compared to the answer." + }, + { + "role": "user", + "content": + "Please evaluate the following video-based question-answer pair:\n\n" + f"Question: {question}\n" + f"Correct Answer: {answer}\n" + f"Predicted Answer: {pred}\n\n" + "Provide your evaluation only as a temporal accuracy score where the temporal accuracy score is an integer value between 0 and 5, with 5 indicating the highest level of temporal consistency. " + "Please generate the response in the form of a Python dictionary string with keys 'score', where its value is the temporal accuracy score in INTEGER, not STRING." + "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " + "For example, your response should look like this: {''score': 4.8}." + } + ] + ) + # Convert response to a Python dictionary. + response_message = completion["choices"][0]["message"]["content"] + response_dict = ast.literal_eval(response_message) + result_qa_pair = [response_dict, qa_set] + + # Save the question-answer pairs to a json file. + with open(f"{output_dir}/{key}.json", "w") as f: + json.dump(result_qa_pair, f) + + except Exception as e: + print(f"Error processing file '{key}': {e}") + + +def main(): + """ + Main function to control the flow of the program. + """ + # Parse arguments. + args = parse_args() + + file = open(args.pred_path) + pred_contents = json.load(file) + + # Dictionary to store the count of occurrences for each video_id + video_id_counts = {} + new_pred_contents = [] + + # Iterate through each sample in pred_contents + for sample in pred_contents: + video_id = sample['video_name'] + if video_id in video_id_counts: + video_id_counts[video_id] += 1 + else: + video_id_counts[video_id] = 0 + + # Create a new sample with the modified key + new_sample = sample + new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}" + new_pred_contents.append(new_sample) + + # Generating list of id's and corresponding files + id_list = [x['video_name'] for x in new_pred_contents] + caption_files = [f"{id}.json" for id in id_list] + + output_dir = args.output_dir + # Generate output directory if not exists. + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # Preparing dictionary of question-answer sets + prediction_set = {} + for sample in new_pred_contents: + id = sample['video_name'] + question = sample['Q'] + answer = sample['A'] + pred = sample['pred'] + qa_set = {"q": question, "a": answer, "pred": pred} + prediction_set[id] = qa_set + + # Set the OpenAI API key. + openai.api_key = args.api_key + num_tasks = args.num_tasks + + # While loop to ensure that all captions are processed. + while True: + try: + # Files that have not been processed yet. + completed_files = os.listdir(output_dir) + print(f"completed_files: {len(completed_files)}") + + # Files that have not been processed yet. + incomplete_files = [f for f in caption_files if f not in completed_files] + print(f"incomplete_files: {len(incomplete_files)}") + + # Break the loop when there are no incomplete files + if len(incomplete_files) == 0: + break + if len(incomplete_files) <= num_tasks: + num_tasks = 1 + + # Split tasks into parts. + part_len = len(incomplete_files) // num_tasks + all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)] + task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts] + + # Use a pool of workers to process the files in parallel. + with Pool() as pool: + pool.starmap(annotate, task_args) + + except Exception as e: + print(f"Error: {e}") + + # Combine all the processed files into one + combined_contents = {} + json_path = args.output_json + + # Iterate through json files + for file_name in os.listdir(output_dir): + if file_name.endswith(".json"): + file_path = os.path.join(output_dir, file_name) + with open(file_path, "r") as json_file: + content = json.load(json_file) + combined_contents[file_name[:-5]] = content + + # Write combined content to a json file + with open(json_path, "w") as json_file: + json.dump(combined_contents, json_file) + print("All evaluation completed!") + + # Calculate average score + score_sum = 0 + count = 0 + for key, result in combined_contents.items(): + count += 1 + score_match = result[0]['score'] + score = int(score_match) + score_sum += score + average_score = score_sum / count + + print("Average score temporal understanding:", average_score) + + +if __name__ == "__main__": + main() + diff --git a/llava/eval/video/eval_benchmark_5_consistency.py b/llava/eval/video/eval_benchmark_5_consistency.py new file mode 100644 index 0000000..86fdc55 --- /dev/null +++ b/llava/eval/video/eval_benchmark_5_consistency.py @@ -0,0 +1,198 @@ +import openai +import os +import argparse +import json +import ast +from multiprocessing.pool import Pool + + +def parse_args(): + parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3") + parser.add_argument("--pred_path", required=True, help="The path to file containing prediction.") + parser.add_argument("--output_dir", required=True, help="The path to save annotation json files.") + parser.add_argument("--output_json", required=True, help="The path to save annotation final combined json file.") + parser.add_argument("--api_key", required=True, help="OpenAI API key.") + parser.add_argument("--api_base", default="", type=str, help="OpenAI API base.") + parser.add_argument("--num_tasks", required=True, type=int, help="Number of splits.") + args = parser.parse_args() + return args + + +def annotate(prediction_set, caption_files, output_dir, args): + """ + Evaluates question and answer pairs using GPT-3 and + returns a score for consistency. + """ + # Set the OpenAI API key. + openai.api_key = args.api_key + if args.api_base is not None: + openai.api_base = args.api_base + for file in caption_files: + key = file[:-5] # Strip file extension + qa_set = prediction_set[key] + question1 = qa_set['q1'] + question2 = qa_set['q2'] + answer = qa_set['a'] + pred1 = qa_set['pred1'] + pred2 = qa_set['pred2'] + try: + # Compute the consistency score + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": + "You are an intelligent chatbot designed for evaluating the consistency of generative outputs for similar video-based question-answer pairs. " + "You will be given two very similar questions, a common answer common to both the questions and predicted answers for the two questions ." + "Your task is to compare the predicted answers for two very similar question, with a common correct answer and determine if they are consistent. Here's how you can accomplish the task:" + "------" + "##INSTRUCTIONS: " + "- Focus on the consistency between the two predicted answers and the correct answer. Both predicted answers should correspond to the correct answer and to each other, and should not contain any contradictions or significant differences in the conveyed information.\n" + "- Both predicted answers must be consistent with each other and the correct answer, in terms of the information they provide about the video content.\n" + "- Consider synonyms or paraphrases as valid matches, but only if they maintain the consistency in the conveyed information.\n" + "- Evaluate the consistency of the two predicted answers compared to the correct answer." + }, + { + "role": "user", + "content": + "Please evaluate the following video-based question-answer pair:\n\n" + f"Question 1: {question1}\n" + f"Question 2: {question2}\n" + f"Correct Answer: {answer}\n" + f"Predicted Answer to Question 1: {pred1}\n" + f"Predicted Answer to Question 2: {pred2}\n\n" + "Provide your evaluation only as a consistency score where the consistency score is an integer value between 0 and 5, with 5 indicating the highest level of consistency. " + "Please generate the response in the form of a Python dictionary string with keys 'score', where its value is the consistency score in INTEGER, not STRING." + "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " + "For example, your response should look like this: {''score': 4.8}." + } + ] + ) + # Convert response to a Python dictionary. + response_message = completion["choices"][0]["message"]["content"] + response_dict = ast.literal_eval(response_message) + result_qa_pair = [response_dict, qa_set] + + # Save the question-answer pairs to a json file. + with open(f"{output_dir}/{key}.json", "w") as f: + json.dump(result_qa_pair, f) + + except Exception as e: + print(f"Error processing file '{key}': {e}") + + +def main(): + """ + Main function to control the flow of the program. + """ + # Parse arguments. + args = parse_args() + + file = open(args.pred_path) + pred_contents = json.load(file) + + # Dictionary to store the count of occurrences for each video_id + video_id_counts = {} + new_pred_contents = [] + + # Iterate through each sample in pred_contents + for sample in pred_contents: + video_id = sample['video_name'] + if video_id in video_id_counts: + video_id_counts[video_id] += 1 + else: + video_id_counts[video_id] = 0 + + # Create a new sample with the modified key + new_sample = sample + new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}" + new_pred_contents.append(new_sample) + + # Generating list of id's and corresponding files + id_list = [x['video_name'] for x in new_pred_contents] + caption_files = [f"{id}.json" for id in id_list] + + output_dir = args.output_dir + # Generate output directory if not exists. + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # Preparing dictionary of question-answer sets + prediction_set = {} + for sample in new_pred_contents: + id = sample['video_name'] + question1 = sample['Q1'] + question2 = sample['Q1'] + answer = sample['A'] + pred1 = sample['pred1'] + pred2 = sample['pred2'] + qa_set = {"q1": question1, "q2": question2, "a": answer, "pred1": pred1, "pred2": pred2} + prediction_set[id] = qa_set + + # Set the OpenAI API key. + openai.api_key = args.api_key + num_tasks = args.num_tasks + + # While loop to ensure that all captions are processed. + while True: + try: + # Files that have not been processed yet. + completed_files = os.listdir(output_dir) + print(f"completed_files: {len(completed_files)}") + + # Files that have not been processed yet. + incomplete_files = [f for f in caption_files if f not in completed_files] + print(f"incomplete_files: {len(incomplete_files)}") + + # Break the loop when there are no incomplete files + if len(incomplete_files) == 0: + break + if len(incomplete_files) <= num_tasks: + num_tasks = 1 + + # Split tasks into parts. + part_len = len(incomplete_files) // num_tasks + all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)] + task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts] + + # Use a pool of workers to process the files in parallel. + with Pool() as pool: + pool.starmap(annotate, task_args) + + except Exception as e: + print(f"Error: {e}") + + # Combine all the processed files into one + combined_contents = {} + json_path = args.output_json + + # Iterate through json files + for file_name in os.listdir(output_dir): + if file_name.endswith(".json"): + file_path = os.path.join(output_dir, file_name) + with open(file_path, "r") as json_file: + content = json.load(json_file) + combined_contents[file_name[:-5]] = content + + # Write combined content to a json file + with open(json_path, "w") as json_file: + json.dump(combined_contents, json_file) + print("All evaluation completed!") + + # Calculate average score + score_sum = 0 + count = 0 + for key, result in combined_contents.items(): + count += 1 + score_match = result[0]['score'] + score = int(score_match) + score_sum += score + average_score = score_sum / count + + print("Average score for consistency:", average_score) + + +if __name__ == "__main__": + main() + diff --git a/llava/eval/video/eval_video_qa.py b/llava/eval/video/eval_video_qa.py new file mode 100644 index 0000000..61956df --- /dev/null +++ b/llava/eval/video/eval_video_qa.py @@ -0,0 +1,206 @@ +import openai +import os +import argparse +import json +import ast +from multiprocessing.pool import Pool +from tqdm import tqdm + +def parse_args(): + parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3") + parser.add_argument("--pred_path", default=r'', help="The path to file containing prediction.") + parser.add_argument("--output_dir", default=r'', help="The path to save annotation json files.") + parser.add_argument("--output_json", default=r'', help="The path to save annotation final combined json file.") + parser.add_argument("--api_key", default="", help="OpenAI API key.") + parser.add_argument("--api_base", default="", type=str, help="OpenAI API base.") + parser.add_argument("--num_tasks", default=1, type=int, help="Number of splits.") + args = parser.parse_args() + return args + + +def annotate(prediction_set, caption_files, output_dir, args): + """ + Evaluates question and answer pairs using GPT-3 + Returns a score for correctness. + """ + # Set the OpenAI API key. + openai.api_key = args.api_key + if args.api_base is not None: + openai.api_base = args.api_base + for file in caption_files: + key = file[:-5] # Strip file extension + qa_set = prediction_set[key] + question = qa_set['q'] + answer = qa_set['a'] + pred = qa_set['pred'] + try: + # Compute the correctness score + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + { + "role": "system", + "content": + "You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. " + "Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:" + "------" + "##INSTRUCTIONS: " + "- Focus on the meaningful match between the predicted answer and the correct answer.\n" + "- Consider synonyms or paraphrases as valid matches.\n" + "- Evaluate the correctness of the prediction compared to the answer." + }, + { + "role": "user", + "content": + "Please evaluate the following video-based question-answer pair:\n\n" + f"Question: {question}\n" + f"Correct Answer: {answer}\n" + f"Predicted Answer: {pred}\n\n" + "Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. " + "Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING." + "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " + "For example, your response should look like this: {'pred': 'yes', 'score': 4.8}." + } + ] + ) + # Convert response to a Python dictionary. + response_message = completion["choices"][0]["message"]["content"] + response_dict = ast.literal_eval(response_message) + result_qa_pair = [response_dict, qa_set] + + # Save the question-answer pairs to a json file. + with open(f"{output_dir}/{key}.json", "w") as f: + json.dump(result_qa_pair, f) + + except Exception as e: + print(f"Error processing file '{key}': {e}") + + +def main(): + """ + Main function to control the flow of the program. + """ + # Parse arguments. + args = parse_args() + + file = open(args.pred_path) + new_pred_contents = [eval(i.strip()) for i in file.readlines()] + + ''' + # Dictionary to store the count of occurrences for each video_id + video_id_counts = {} + new_pred_contents = [] + + # Iterate through each sample in pred_contents + for sample in pred_contents: + video_id = sample['video_name'] + if video_id in video_id_counts: + video_id_counts[video_id] += 1 + else: + video_id_counts[video_id] = 0 + + # Create a new sample with the modified key + new_sample = sample + new_sample['video_name'] = f"{video_id}_{video_id_counts[video_id]}" + new_pred_contents.append(new_sample) + ''' + # Generating list of id's and corresponding files + id_list = [x['id'] for x in new_pred_contents] + caption_files = [f"{id}.json" for id in id_list] + + output_dir = args.output_dir + # Generate output directory if not exists. + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # Preparing dictionary of question-answer sets + prediction_set = {} + for sample in new_pred_contents: + id = sample['id'] + question = sample['question'] + answer = sample['answer'] + pred = sample['pred'] + qa_set = {"q": question, "a": answer, "pred": pred} + prediction_set[id] = qa_set + + num_tasks = args.num_tasks + + # While loop to ensure that all captions are processed. + while True: + try: + # Files that have not been processed yet. + completed_files = os.listdir(output_dir) + print(f"completed_files: {len(completed_files)}") + + # Files that have not been processed yet. + incomplete_files = [f for f in caption_files if f not in completed_files] + print(f"incomplete_files: {len(incomplete_files)}") + + # Break the loop when there are no incomplete files + if len(incomplete_files) == 0: + break + if len(incomplete_files) <= num_tasks: + num_tasks = 1 + + # Split tasks into parts. + part_len = len(incomplete_files) // num_tasks + all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)] + task_args = [(prediction_set, part, args.output_dir, args) for part in all_parts] + + # Use a pool of workers to process the files in parallel. + with Pool() as pool: + pool.starmap(annotate, task_args) + + except Exception as e: + print(f"Error: {e}") + + # Combine all the processed files into one + combined_contents = {} + json_path = args.output_json + + # Iterate through json files + for file_name in os.listdir(output_dir): + if file_name.endswith(".json"): + file_path = os.path.join(output_dir, file_name) + with open(file_path, "r") as json_file: + content = json.load(json_file) + combined_contents[file_name[:-5]] = content + + # Write combined content to a json file + with open(json_path, "w") as json_file: + json.dump(combined_contents, json_file) + print("All evaluation completed!") + + # Calculate average score and accuracy + score_sum = 0 + count = 0 + yes_count = 0 + no_count = 0 + for key, result in tqdm(combined_contents.items()): + try: + # Computing score + count += 1 + score_match = result[0]['score'] + score = int(score_match) + score_sum += score + + # Computing accuracy + pred = result[0]['pred'] + if "yes" in pred.lower(): + yes_count += 1 + elif "no" in pred.lower(): + no_count += 1 + except: + print(result) + + average_score = score_sum / count + accuracy = yes_count / (yes_count + no_count) + print("Yes count:", yes_count) + print("No count:", no_count) + print("Accuracy:", accuracy) + print("Average score:", average_score) + + +if __name__ == "__main__": + main() + diff --git a/llava/eval/video/run_inference_benchmark_consistency.py b/llava/eval/video/run_inference_benchmark_consistency.py new file mode 100644 index 0000000..72c5030 --- /dev/null +++ b/llava/eval/video/run_inference_benchmark_consistency.py @@ -0,0 +1,96 @@ +import os +import argparse +import json +from tqdm import tqdm +# from video_chatgpt.eval.model_utils import initialize_model, load_video +# from video_chatgpt.inference import video_chatgpt_infer + +from llava.eval.video.run_inference_video_qa import get_model_output +from llava.mm_utils import get_model_name_from_path +from llava.model.builder import load_pretrained_model + + +def parse_args(): + """ + Parse command-line arguments. + """ + parser = argparse.ArgumentParser() + + # Define the command-line arguments + parser.add_argument('--model_path', help='', required=True) + parser.add_argument('--cache_dir', help='', required=True) + parser.add_argument('--video_dir', help='Directory containing video files.', required=True) + parser.add_argument('--gt_file', help='Path to the ground truth file.', required=True) + parser.add_argument('--output_dir', help='Directory to save the model results JSON.', required=True) + parser.add_argument('--output_name', help='Name of the file for storing results JSON.', required=True) + # parser.add_argument("--model-name", type=str, required=True) + parser.add_argument("--device", type=str, required=False, default='cuda:0') + parser.add_argument('--model_base', help='', default=None, type=str, required=False) + parser.add_argument("--model_max_length", type=int, required=False, default=2048) + # parser.add_argument("--conv-mode", type=str, required=False, default='video-chatgpt_v1') + # parser.add_argument("--projection_path", type=str, required=True) + + return parser.parse_args() + + +def run_inference(args): + """ + Run inference on a set of video files using the provided model. + + Args: + args: Command-line arguments. + """ + # Initialize the model + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name) + model = model.to(args.device) + + # Load the ground truth file + with open(args.gt_file) as file: + gt_contents = json.load(file) + + # Create the output directory if it doesn't exist + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + output_list = [] # List to store the output results + # conv_mode = args.conv_mode + + video_formats = ['.mp4', '.avi', '.mov', '.mkv'] + + # Iterate over each sample in the ground truth file + for sample in tqdm(gt_contents): + video_name = sample['video_name'] + sample_set = sample + question_1 = sample['Q1'] + question_2 = sample['Q2'] + + try: + # Load the video file + for fmt in video_formats: # Added this line + temp_path = os.path.join(args.video_dir, f"{video_name}{fmt}") + if os.path.exists(temp_path): + video_path = temp_path + + # Run inference on the video for the first question and add the output to the list + output_1 = get_model_output(model, processor['video'], tokenizer, video_path, question_1, args) + sample_set['pred1'] = output_1 + + # Run inference on the video for the second question and add the output to the list + output_2 = get_model_output(model, processor['video'], tokenizer, video_path, question_2, args) + sample_set['pred2'] = output_2 + + output_list.append(sample_set) + break + + except Exception as e: + print(f"Error processing video file '{video_name}': {e}") + + # Save the output list to a JSON file + with open(os.path.join(args.output_dir, f"{args.output_name}.json"), 'w') as file: + json.dump(output_list, file) + + +if __name__ == "__main__": + args = parse_args() + run_inference(args) diff --git a/llava/eval/video/run_inference_benchmark_general.py b/llava/eval/video/run_inference_benchmark_general.py new file mode 100644 index 0000000..8eed474 --- /dev/null +++ b/llava/eval/video/run_inference_benchmark_general.py @@ -0,0 +1,87 @@ +import os +import argparse +import json +from tqdm import tqdm +# from video_chatgpt.eval.model_utils import initialize_model, load_video +# from video_chatgpt.inference import video_chatgpt_infer + +from llava.eval.video.run_inference_video_qa import get_model_output +from llava.mm_utils import get_model_name_from_path +from llava.model.builder import load_pretrained_model + + +def parse_args(): + """ + Parse command-line arguments. + """ + parser = argparse.ArgumentParser() + + # Define the command-line arguments + parser.add_argument('--model_path', help='', required=True) + parser.add_argument('--cache_dir', help='', required=True) + parser.add_argument('--video_dir', help='Directory containing video files.', required=True) + parser.add_argument('--gt_file', help='Path to the ground truth file.', required=True) + parser.add_argument('--output_dir', help='Directory to save the model results JSON.', required=True) + parser.add_argument('--output_name', help='Name of the file for storing results JSON.', required=True) + # parser.add_argument("--model-name", type=str, required=True) + parser.add_argument("--device", type=str, required=False, default='cuda:0') + parser.add_argument('--model_base', help='', default=None, type=str, required=False) + parser.add_argument("--model_max_length", type=int, required=False, default=2048) + # parser.add_argument("--conv-mode", type=str, required=False, default='video-chatgpt_v1') + # parser.add_argument("--projection_path", type=str, required=True) + + return parser.parse_args() + + +def run_inference(args): + """ + Run inference on a set of video files using the provided model. + + Args: + args: Command-line arguments. + """# Initialize the model + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name) + model = model.to(args.device) + + # Load the ground truth file + with open(args.gt_file) as file: + gt_contents = json.load(file) + + # Create the output directory if it doesn't exist + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + output_list = [] # List to store the output results + # conv_mode = args.conv_mode + + video_formats = ['.mp4', '.avi', '.mov', '.mkv'] + + # Iterate over each sample in the ground truth file + for sample in tqdm(gt_contents): + video_name = sample['video_name'] + sample_set = sample + question = sample['Q'] + + try: + # Load the video file + for fmt in video_formats: # Added this line + temp_path = os.path.join(args.video_dir, f"{video_name}{fmt}") + if os.path.exists(temp_path): + video_path = temp_path + output = get_model_output(model, processor['video'], tokenizer, video_path, question, args) + sample_set['pred'] = output + output_list.append(sample_set) + break + + except Exception as e: + print(f"Error processing video file '{video_name}': {e}") + + # Save the output list to a JSON file + with open(os.path.join(args.output_dir, f"{args.output_name}.json"), 'w') as file: + json.dump(output_list, file) + + +if __name__ == "__main__": + args = parse_args() + run_inference(args) diff --git a/llava/eval/video/run_inference_video_qa.py b/llava/eval/video/run_inference_video_qa.py new file mode 100644 index 0000000..0adf6af --- /dev/null +++ b/llava/eval/video/run_inference_video_qa.py @@ -0,0 +1,171 @@ +import math +import os +import argparse +import json + +import torch +import transformers +from tqdm import tqdm +from llava.conversation import conv_templates, SeparatorStyle +from llava.constants import DEFAULT_X_START_TOKEN, DEFAULT_X_TOKEN, DEFAULT_X_END_TOKEN, X_TOKEN_INDEX +from llava.mm_utils import get_model_name_from_path, tokenizer_X_token, KeywordsStoppingCriteria +from llava.model.builder import load_pretrained_model +from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM +from llava.train.train import smart_tokenizer_and_embedding_resize + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + +def parse_args(): + """ + Parse command-line arguments. + """ + parser = argparse.ArgumentParser() + + # Define the command-line arguments + parser.add_argument('--model_path', help='', required=True) + parser.add_argument('--cache_dir', help='', required=True) + parser.add_argument('--video_dir', help='Directory containing video files.', required=True) + parser.add_argument('--gt_file_question', help='Path to the ground truth file containing question.', required=True) + parser.add_argument('--gt_file_answers', help='Path to the ground truth file containing answers.', required=True) + parser.add_argument('--output_dir', help='Directory to save the model results JSON.', required=True) + parser.add_argument('--output_name', help='Name of the file for storing results JSON.', required=True) + parser.add_argument("--num_chunks", type=int, default=1) + parser.add_argument("--chunk_idx", type=int, default=0) + parser.add_argument("--device", type=str, required=False, default='cuda:0') + parser.add_argument('--model_base', help='', default=None, type=str, required=False) + parser.add_argument("--model_max_length", type=int, required=False, default=2048) + + return parser.parse_args() + +def get_model_output(model, video_processor, tokenizer, video, qs, args): + if model.config.mm_use_x_start_end: + qs = DEFAULT_X_START_TOKEN['VIDEO'] + DEFAULT_X_TOKEN['VIDEO'] + DEFAULT_X_END_TOKEN['VIDEO'] + '\n' + qs + else: + qs = DEFAULT_X_TOKEN['VIDEO'] + '\n' + qs + + conv_mode = "llava_v1" + args.conv_mode = conv_mode + + conv = conv_templates[args.conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + + video_tensor = video_processor.preprocess(video, return_tensors='pt')['pixel_values'][0].half().to(args.device) + # print(video_tensor.shape) + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['VIDEO'], return_tensors='pt').unsqueeze(0).to(args.device) + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + ''' + images (X_modalities) [ + [img_feature, img_feature, video_feature, audio_feature], + ['image', 'image', 'video', 'audio'] + ] + ''' + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[[video_tensor], ['video']], + do_sample=True, + temperature=0.2, + max_new_tokens=1024, + use_cache=True, + stopping_criteria=[stopping_criteria]) + + input_token_len = input_ids.shape[1] + n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() + if n_diff_input_output > 0: + print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') + outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] + outputs = outputs.strip() + if outputs.endswith(stop_str): + outputs = outputs[:-len(stop_str)] + outputs = outputs.strip() + print(outputs) + return outputs + + +def run_inference(args): + """ + Run inference on ActivityNet QA DataSet using the Video-ChatGPT model. + + Args: + args: Command-line arguments. + """ + # Initialize the model + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name) + model = model.to(args.device) + + # Load both ground truth file containing questions and answers + # with open(args.gt_file_question) as file: + # gt_questions = json.load(file) + # with open(args.gt_file_answers) as file: + # gt_answers = json.load(file) + + gt_questions = json.load(open(args.gt_file_question, "r")) + gt_questions = get_chunk(gt_questions, args.num_chunks, args.chunk_idx) + gt_answers = json.load(open(args.gt_file_answers, "r")) + # gt_answers = get_chunk(gt_answers, args.num_chunks, args.chunk_idx) + + answers_file = os.path.join(args.output_dir, f"{args.output_name}.json") + os.makedirs(args.output_dir, exist_ok=True) + ans_file = open(answers_file, "w") + + # Create the output directory if it doesn't exist + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + output_list = [] # List to store the output results + + + video_formats = ['.mp4', '.avi', '.mov', '.mkv'] + + # Iterate over each sample in the ground truth file + index = 0 + for sample in tqdm(gt_questions): + video_name = sample['video_name'] + question = sample['question'] + id = sample['question_id'] + answer = gt_answers[index]['answer'] + index += 1 + + sample_set = {'id': id, 'question': question, 'answer': answer} + + # Load the video file + for fmt in tqdm(video_formats): # Added this line + temp_path = os.path.join(args.video_dir, f"{video_name}{fmt}") + if os.path.exists(temp_path): + video_path = temp_path + # try: + # Run inference on the video and add the output to the list + output = get_model_output(model, processor['video'], tokenizer, video_path, question, args) + sample_set['pred'] = output + output_list.append(sample_set) + # except Exception as e: + # print(f"Error processing video file '{video_name}': {e}") + ans_file.write(json.dumps(sample_set) + "\n") + break + + ans_file.close() + # Save the output list to a JSON file + # with open(os.path.join(args.output_dir, f"{args.output_name}.json"), 'w') as file: + # json.dump(output_list, file) + + +if __name__ == "__main__": + args = parse_args() + run_inference(args) diff --git a/llava/eval/video/run_inference_video_qa_act.py b/llava/eval/video/run_inference_video_qa_act.py new file mode 100644 index 0000000..61eb1ba --- /dev/null +++ b/llava/eval/video/run_inference_video_qa_act.py @@ -0,0 +1,171 @@ +import math +import os +import argparse +import json + +import torch +import transformers +from tqdm import tqdm +from llava.conversation import conv_templates, SeparatorStyle +from llava.constants import DEFAULT_X_START_TOKEN, DEFAULT_X_TOKEN, DEFAULT_X_END_TOKEN, X_TOKEN_INDEX +from llava.mm_utils import get_model_name_from_path, tokenizer_X_token, KeywordsStoppingCriteria +from llava.model.builder import load_pretrained_model +from llava.model.language_model.llava_llama import LlavaLlamaForCausalLM +from llava.train.train import smart_tokenizer_and_embedding_resize + + +def split_list(lst, n): + """Split a list into n (roughly) equal-sized chunks""" + chunk_size = math.ceil(len(lst) / n) # integer division + return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] + + +def get_chunk(lst, n, k): + chunks = split_list(lst, n) + return chunks[k] + +def parse_args(): + """ + Parse command-line arguments. + """ + parser = argparse.ArgumentParser() + + # Define the command-line arguments + parser.add_argument('--model_path', help='', required=True) + parser.add_argument('--cache_dir', help='', required=True) + parser.add_argument('--video_dir', help='Directory containing video files.', required=True) + parser.add_argument('--gt_file_question', help='Path to the ground truth file containing question.', required=True) + parser.add_argument('--gt_file_answers', help='Path to the ground truth file containing answers.', required=True) + parser.add_argument('--output_dir', help='Directory to save the model results JSON.', required=True) + parser.add_argument('--output_name', help='Name of the file for storing results JSON.', required=True) + parser.add_argument("--num_chunks", type=int, default=1) + parser.add_argument("--chunk_idx", type=int, default=0) + parser.add_argument("--device", type=str, required=False, default='cuda:0') + parser.add_argument('--model_base', help='', default=None, type=str, required=False) + parser.add_argument("--model_max_length", type=int, required=False, default=2048) + + return parser.parse_args() + +def get_model_output(model, video_processor, tokenizer, video, qs, args): + if model.config.mm_use_x_start_end: + qs = DEFAULT_X_START_TOKEN['VIDEO'] + DEFAULT_X_TOKEN['VIDEO'] + DEFAULT_X_END_TOKEN['VIDEO'] + '\n' + qs + else: + qs = DEFAULT_X_TOKEN['VIDEO'] + '\n' + qs + + conv_mode = "llava_v1" + args.conv_mode = conv_mode + + conv = conv_templates[args.conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + + video_tensor = video_processor.preprocess(video, return_tensors='pt')['pixel_values'][0].half().to(args.device) + # print(video_tensor.shape) + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['VIDEO'], return_tensors='pt').unsqueeze(0).to(args.device) + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + ''' + images (X_modalities) [ + [img_feature, img_feature, video_feature, audio_feature], + ['image', 'image', 'video', 'audio'] + ] + ''' + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[[video_tensor], ['video']], + do_sample=True, + temperature=0.2, + max_new_tokens=1024, + use_cache=True, + stopping_criteria=[stopping_criteria]) + + input_token_len = input_ids.shape[1] + n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() + if n_diff_input_output > 0: + print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') + outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] + outputs = outputs.strip() + if outputs.endswith(stop_str): + outputs = outputs[:-len(stop_str)] + outputs = outputs.strip() + print(outputs) + return outputs + + +def run_inference(args): + """ + Run inference on ActivityNet QA DataSet using the Video-ChatGPT model. + + Args: + args: Command-line arguments. + """ + # Initialize the model + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name) + model = model.to(args.device) + + # Load both ground truth file containing questions and answers + # with open(args.gt_file_question) as file: + # gt_questions = json.load(file) + # with open(args.gt_file_answers) as file: + # gt_answers = json.load(file) + + gt_questions = json.load(open(args.gt_file_question, "r")) + gt_questions = get_chunk(gt_questions, args.num_chunks, args.chunk_idx) + gt_answers = json.load(open(args.gt_file_answers, "r")) + # gt_answers = get_chunk(gt_answers, args.num_chunks, args.chunk_idx) + + answers_file = os.path.join(args.output_dir, f"{args.output_name}.json") + os.makedirs(args.output_dir, exist_ok=True) + ans_file = open(answers_file, "w") + + # Create the output directory if it doesn't exist + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + + output_list = [] # List to store the output results + + + video_formats = ['.mp4', '.avi', '.mov', '.mkv'] + + # Iterate over each sample in the ground truth file + index = 0 + for sample in tqdm(gt_questions): + video_name = sample['video_name'] + question = sample['question'] + id = sample['question_id'] + answer = gt_answers[index]['answer'] + index += 1 + + sample_set = {'id': id, 'question': question, 'answer': answer} + + # Load the video file + for fmt in tqdm(video_formats): # Added this line + temp_path = os.path.join(args.video_dir, f"v_{video_name}{fmt}") + if os.path.exists(temp_path): + video_path = temp_path + # try: + # Run inference on the video and add the output to the list + output = get_model_output(model, processor['video'], tokenizer, video_path, question, args) + sample_set['pred'] = output + output_list.append(sample_set) + # except Exception as e: + # print(f"Error processing video file '{video_name}': {e}") + ans_file.write(json.dumps(sample_set) + "\n") + break + + ans_file.close() + # Save the output list to a JSON file + # with open(os.path.join(args.output_dir, f"{args.output_name}.json"), 'w') as file: + # json.dump(output_list, file) + + +if __name__ == "__main__": + args = parse_args() + run_inference(args) diff --git a/llava/eval/webpage/index.html b/llava/eval/webpage/index.html new file mode 100644 index 0000000..c2e3cf0 --- /dev/null +++ b/llava/eval/webpage/index.html @@ -0,0 +1,162 @@ + + + + + + Who's GPT-4's favorite? Battles between State-of-the-Art Chatbots + + + + + + + + 🏔️ Vicuna Evaluation Examples + + + + + + + Demo + + + Blog + + + Github + + + + + + + Who's GPT-4's favorite? Battles between State-of-the-Art Chatbots + + + + + Category + + + + Question + + + + + + keyboard_arrow_left + keyboard_arrow_right + + + + + + + + */10 + + + + + + + + + + + + + + + + + + + */10 + + + + + + + + + + + + + + + + Assistant #1 + + + + + + + + + + + + + + + + + + Assistant #2 (Vicuna, our model) + + + + + + + + + + + + + + GPT-4 Evaluation + + + + + + + + + + + + This website is co-authored with GPT-4. + + + + + + + + + + + + + + + diff --git a/llava/eval/webpage/script.js b/llava/eval/webpage/script.js new file mode 100644 index 0000000..4b71e3d --- /dev/null +++ b/llava/eval/webpage/script.js @@ -0,0 +1,245 @@ +// Description: Script for the evaluation webpage. + +let currentQuestionIndex = 1; + +// Store the model name mapping for later use. +modelNameMapping = { + "gpt35": "ChatGPT-3.5", + "gpt4": "GPT-4", + "alpaca": "Alpaca-13b", + "vicuna": "Vicuna-13b", + "llama": "LLaMA-13b", + "bard": "Bard", +}; + +modelFigureMapping = { + "vicuna": "figures/vicuna.jpeg", + // Image from: https://commons.wikimedia.org/wiki/File:ChatGPT_logo.svg + "gpt35": "figures/chatgpt.svg", + // Image from: https://www.reddit.com/r/logodesign/comments/1128aat/google_ai_bard_logo_design/ + "bard": "figures/bard.jpg", + // Image from: https://crfm.stanford.edu/2023/03/13/alpaca.html + "alpaca": "figures/alpaca.png", + // Image adapted from https://commons.wikimedia.org/wiki/File:Llama_on_Machu_Picchu.jpg + "llama": "figures/llama.jpg", +} + +// Store the question data in a mapping for later use. +questionMapping = {}; +// Store the question ids in a mapping for later use. +categoryMapping = {}; +// Store the number of questions for later use. +questionsCount = 0; + + +function text2Markdown(text) { + // Normalize the text for markdown rendering. + text = text.trim().replaceAll('\n\n', '\n').replaceAll('\n', '\n\n'); + return marked.parse(text); +} + +function capitalizeFirstChar(str) { + if (!str || str.length === 0) { + return str; + } + return str.charAt(0).toUpperCase() + str.slice(1); +} + +function updateQuestionSelect(question_id) { + const select = document.getElementById('question-select'); + // Clear the question select. + select.innerHTML = ''; + // Populate the question select. + category = questionMapping[question_id].category; + categoryMapping[category].forEach(question_id => { + const question = questionMapping[question_id]; + const option = document.createElement('option'); + option.value = question_id; + option.textContent = 'Q' + question_id.toString() + ': ' + question.question; + select.appendChild(option); + }); + select.value = question_id; +} + +function updateModelSelect() { + const select = document.getElementById('model-select'); + img_path = modelFigureMapping[select.value]; + document.getElementById('other-model-figure').src = img_path; +} + +function populateModels(models) { + const select = document.getElementById('model-select'); + models.forEach(model => { + const option = document.createElement('option'); + option.value = model; + option.textContent = modelNameMapping[model]; + select.appendChild(option); + }); + updateModelSelect(); +} + +function populateQuestions(questions) { + const category_select = document.getElementById('category-select'); + + questionsCount = questions.length; + questions.forEach(question => { + const option = document.createElement('option'); + // Store the question data in a mapping for later use. + questionMapping[question.id] = { + category: question.category, + question: question.question, + answers: question.answers, + evaluations: question.evaluations, + scores: question.scores, + }; + // Store the question id in the category mapping. + if (question.category in categoryMapping) { + categoryMapping[question.category].push(question.id); + } else { + categoryMapping[question.category] = [question.id]; + const category_option = document.createElement('option'); + category_option.value = question.category; + category_option.textContent = capitalizeFirstChar(question.category); + category_select.appendChild(category_option); + } + }); + // Set the default category. + updateQuestionSelect(currentQuestionIndex); +} + +function displayQuestion(index) { + const question = questionMapping[index].question; + document.getElementById('selected-question').innerHTML = text2Markdown('**Question:** ' + question); + displayAnswers(index); +} + +function displayAnswers(index) { + const question = questionMapping[index]; + const otherModel = document.getElementById('model-select').value; + // render the answers with markdown + document.getElementById('other-model-answer').innerHTML = text2Markdown(question.answers[otherModel]); + document.getElementById('our-model-answer').innerHTML = text2Markdown(question.answers.vicuna); + + // Display evaluation + score = question.scores[otherModel]; + score_text = modelNameMapping[otherModel] + " " + score[0] + "/10, Vicuna-13b " + score[1] + "/10"; + document.getElementById('evaluation-header').textContent = "GPT-4 Evaluation" + " (Score: " + score_text + ")"; + document.getElementById('evaluation-result').innerHTML = text2Markdown(question.evaluations[otherModel]); + + // Update model names + let assistant1_title = "Assistant #1"; // (" + modelNameMapping[otherModel] + ")"; + let assistant2_title = "Assistant #2 (Vicuna-13b, our model)"; + // Update scores/labels. + let assistant1_score_label = score[0].toString() + '/10'; + let assistant2_score_label = score[1].toString() + '/10'; + + const colorRed ='#fa9'; // '#eb978d'; + // const colorGreen = '#c9f2c9'; + const colorBlue = '#8ef'; // '#71dbf9'; + const colorYellow = '#fe7'; // '#fada57'; + let otherModelHeaderColor = ''; + let ourModelHeaderColor = ''; + // Update the winner. + if (score[0] == score[1]) { + assistant1_title = '🏆 ' + assistant1_title; + assistant1_score_label = '🏆 ' + assistant1_score_label; + assistant2_title = '🏆 ' + assistant2_title; + assistant2_score_label = '🏆 ' + assistant2_score_label; + otherModelHeaderColor = colorYellow; + ourModelHeaderColor = colorYellow; + } else if (score[0] > score[1]) { + assistant1_title = '🏆 ' + assistant1_title; + assistant1_score_label = '🏆 ' + assistant1_score_label; + otherModelHeaderColor = colorBlue; + ourModelHeaderColor = colorRed; + } else if (score[0] < score[1]) { + assistant2_title = '🏆 ' + assistant2_title; + assistant2_score_label = '🏆 ' + assistant2_score_label; + otherModelHeaderColor = colorRed; + ourModelHeaderColor = colorBlue; + } + + document.getElementById('other-model-header-bg').style.backgroundColor = otherModelHeaderColor; + document.getElementById('our-model-header').style.backgroundColor = ourModelHeaderColor; + + document.getElementById('other-model-header').textContent = assistant1_title; + document.getElementById('our-model-header').textContent = assistant2_title; + + document.getElementById('other-score-label').textContent = assistant1_score_label; + document.getElementById('our-score-label').textContent = assistant2_score_label; + + // Update expand buttons visibility for both cards after displaying answers + // Reset the expanded state and update expand buttons visibility for both cards after displaying answers + document.querySelectorAll('.expandable-card').forEach(card => { + card.classList.remove('expanded'); + updateExpandButtonVisibility(card); + const expandBtn = card.querySelector('.expand-btn'); + expandBtn.innerHTML = 'keyboard_arrow_down Show more'; // .textContent = 'Show more'; + }); +} + +document.getElementById('question-select').addEventListener('change', e => { + currentQuestionIndex = parseInt(e.target.value); + displayQuestion(currentQuestionIndex); +}); + +document.getElementById('category-select').addEventListener('change', e => { + let currentCategory = e.target.value; + const questionIds = categoryMapping[currentCategory]; + currentQuestionIndex = questionIds[0]; + updateQuestionSelect(currentQuestionIndex); + displayQuestion(currentQuestionIndex); +}); + +// Update expand buttons whenever the model is changed +document.getElementById('model-select').addEventListener('change', () => { + displayAnswers(currentQuestionIndex); + document.querySelectorAll('.expandable-card').forEach(card => { + updateExpandButtonVisibility(card); + }); + updateModelSelect(); +}); + +function switchQuestionAndCategory() { + document.getElementById('question-select').value = currentQuestionIndex; + old_category = document.getElementById('category-select').value; + new_category = questionMapping[currentQuestionIndex].category; + if (old_category != new_category) { + document.getElementById('category-select').value = new_category; + updateQuestionSelect(currentQuestionIndex); + } + displayQuestion(currentQuestionIndex); +} + +document.getElementById('prev-question').addEventListener('click', () => { + // Question index starts from 1. + currentQuestionIndex = Math.max(1, currentQuestionIndex - 1); + switchQuestionAndCategory(); +}); + +document.getElementById('next-question').addEventListener('click', () => { + // Question index starts from 1. + currentQuestionIndex = Math.min(questionsCount, currentQuestionIndex + 1); + switchQuestionAndCategory(); +}); + +function updateExpandButtonVisibility(card) { + const cardTextContainer = card.querySelector('.card-text-container'); + const expandBtn = card.querySelector('.expand-btn'); + if (cardTextContainer.scrollHeight > cardTextContainer.offsetHeight) { + expandBtn.style.display = 'flex'; + } else { + expandBtn.style.display = 'none'; + card.classList.add('expanded'); + } +} + +document.querySelectorAll('.expand-btn').forEach(btn => { + btn.addEventListener('click', e => { + const card = e.target.closest('.expandable-card'); + card.classList.toggle('expanded'); + const more = 'keyboard_arrow_down Show more'; + const less = 'keyboard_arrow_up Show less'; + e.target.innerHTML = card.classList.contains('expanded') ? less : more; + }); +}); diff --git a/llava/eval/webpage/styles.css b/llava/eval/webpage/styles.css new file mode 100644 index 0000000..7b6d6fc --- /dev/null +++ b/llava/eval/webpage/styles.css @@ -0,0 +1,105 @@ +body { + font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; + background-color: #f8f9fa; +} + +.navbar-dark .navbar-nav .nav-link { + color: #f1cf68; + font-size: 1.1rem; + padding: 0.5rem 0.6rem; +} + +.card-header { + font-weight: bold; +} + +.card { + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); + transition: 0.3s; +} + +.card:hover { + box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2); +} + +button { + transition: background-color 0.3s; +} + +button:hover { + background-color: #007bff; +} + +@media (max-width: 767px) { + .form-row .form-group { + margin-bottom: 10px; + } +} + +/* Extra styles */ + +.expandable-card .card-text-container { + max-height: 200px; + overflow-y: hidden; + position: relative; +} + +.expandable-card.expanded .card-text-container { + max-height: none; +} + +.expand-btn { + position: relative; + display: none; + background-color: rgba(255, 255, 255, 0.8); + color: #510c75; + border-color: transparent; +} + +.expand-btn:hover { + background-color: rgba(200, 200, 200, 0.8); + text-decoration: none; + border-color: transparent; + color: #510c75; +} + +.expand-btn:focus { + outline: none; + text-decoration: none; +} + +.expandable-card:not(.expanded) .card-text-container:after { + content: ""; + position: absolute; + bottom: 0; + left: 0; + width: 100%; + height: 90px; + background: linear-gradient(rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 1)); +} + +.expandable-card:not(.expanded) .expand-btn { + margin-top: -40px; +} + +.card-body { + padding-bottom: 5px; +} + +.vertical-flex-layout { + justify-content: center; + align-items: center; + height: 100%; + display: flex; + flex-direction: column; + gap: 5px; +} + +.figure-img { + max-width: 100%; + height: auto; +} + +.adjustable-font-size { + font-size: calc(0.5rem + 2vw); +} diff --git a/llava/mm_utils.py b/llava/mm_utils.py new file mode 100644 index 0000000..1027216 --- /dev/null +++ b/llava/mm_utils.py @@ -0,0 +1,123 @@ +from PIL import Image +from io import BytesIO +import base64 + +import torch +from transformers import StoppingCriteria +from llava.constants import X_INDEX_TOKEN + + +def load_image_from_base64(image): + return Image.open(BytesIO(base64.b64decode(image))) + + +def expand2square(pil_img, background_color): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + + +def process_images(images, image_processor, model_cfg): + image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None) + new_images = [] + if image_aspect_ratio == 'pad': + for image in images: + image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean)) + image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] + new_images.append(image) + else: + return image_processor(images, return_tensors='pt')['pixel_values'] + if all(x.shape == new_images[0].shape for x in new_images): + new_images = torch.stack(new_images, dim=0) + return new_images + + +# def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None): +# prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('')] +# +# def insert_separator(X, sep): +# return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] +# +# input_ids = [] +# offset = 0 +# if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: +# offset = 1 +# input_ids.append(prompt_chunks[0][0]) +# +# for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): +# input_ids.extend(x[offset:]) +# +# if return_tensors is not None: +# if return_tensors == 'pt': +# return torch.tensor(input_ids, dtype=torch.long) +# raise ValueError(f'Unsupported tensor type: {return_tensors}') +# return input_ids + + +def tokenizer_X_token(prompt, tokenizer, X_token_index, return_tensors=None): + prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split(f'<{X_INDEX_TOKEN[X_token_index].lower()}>')] + + def insert_separator(X, sep): + return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] + + input_ids = [] + offset = 0 + if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: + offset = 1 + input_ids.append(prompt_chunks[0][0]) + + for x in insert_separator(prompt_chunks, [X_token_index] * (offset + 1)): + input_ids.extend(x[offset:]) + + if return_tensors is not None: + if return_tensors == 'pt': + return torch.tensor(input_ids, dtype=torch.long) + raise ValueError(f'Unsupported tensor type: {return_tensors}') + return input_ids + +def get_model_name_from_path(model_path): + model_path = model_path.strip("/") + model_paths = model_path.split("/") + if model_paths[-1].startswith('checkpoint-'): + return model_paths[-2] + "_" + model_paths[-1] + else: + return model_paths[-1] + + + + +class KeywordsStoppingCriteria(StoppingCriteria): + def __init__(self, keywords, tokenizer, input_ids): + self.keywords = keywords + self.keyword_ids = [] + self.max_keyword_len = 0 + for keyword in keywords: + cur_keyword_ids = tokenizer(keyword).input_ids + if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id: + cur_keyword_ids = cur_keyword_ids[1:] + if len(cur_keyword_ids) > self.max_keyword_len: + self.max_keyword_len = len(cur_keyword_ids) + self.keyword_ids.append(torch.tensor(cur_keyword_ids)) + self.tokenizer = tokenizer + self.start_len = input_ids.shape[1] + + def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO + offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len) + self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids] + for keyword_id in self.keyword_ids: + if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all(): + return True + outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0] + for keyword in self.keywords: + if keyword in outputs: + return True + return False \ No newline at end of file diff --git a/llava/model/__init__.py b/llava/model/__init__.py new file mode 100644 index 0000000..fa79960 --- /dev/null +++ b/llava/model/__init__.py @@ -0,0 +1,2 @@ +from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaConfig +from .language_model.llava_mpt import LlavaMPTForCausalLM, LlavaMPTConfig diff --git a/llava/model/apply_delta.py b/llava/model/apply_delta.py new file mode 100644 index 0000000..666dd96 --- /dev/null +++ b/llava/model/apply_delta.py @@ -0,0 +1,48 @@ +""" +Usage: +python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta +""" +import argparse + +import torch +from tqdm import tqdm +from transformers import AutoTokenizer, AutoModelForCausalLM +from llava import LlavaLlamaForCausalLM + + +def apply_delta(base_model_path, target_model_path, delta_path): + print("Loading base model") + base = AutoModelForCausalLM.from_pretrained( + base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) + + print("Loading delta") + delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) + delta_tokenizer = AutoTokenizer.from_pretrained(delta_path) + + print("Applying delta") + for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"): + if name not in base.state_dict(): + assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' + continue + if param.data.shape == base.state_dict()[name].shape: + param.data += base.state_dict()[name] + else: + assert name in ['model.embed_tokens.weight', 'lm_head.weight'], \ + f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' + bparam = base.state_dict()[name] + param.data[:bparam.shape[0], :bparam.shape[1]] += bparam + + print("Saving target model") + delta.save_pretrained(target_model_path) + delta_tokenizer.save_pretrained(target_model_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--base-model-path", type=str, required=True) + parser.add_argument("--target-model-path", type=str, required=True) + parser.add_argument("--delta-path", type=str, required=True) + + args = parser.parse_args() + + apply_delta(args.base_model_path, args.target_model_path, args.delta_path) diff --git a/llava/model/builder.py b/llava/model/builder.py new file mode 100644 index 0000000..cf0cff4 --- /dev/null +++ b/llava/model/builder.py @@ -0,0 +1,173 @@ +# Copyright 2023 Haotian Liu +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import warnings +import shutil + +from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig +import torch +from llava.model import * +from llava.constants import DEFAULT_X_PATCH_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN + + +def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda"): + kwargs = {"device_map": device_map, + # "offload_folder": model_path, + "cache_dir": r'./' + } + + if load_8bit: + kwargs['load_in_8bit'] = True + elif load_4bit: + kwargs['load_in_4bit'] = True + kwargs['quantization_config'] = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4' + ) + else: + kwargs['torch_dtype'] = torch.float16 + + if 'llava' in model_name.lower(): + # Load LLaVA model + if 'lora' in model_name.lower() and model_base is None: + warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.') + if 'lora' in model_name.lower() and model_base is not None: + lora_cfg_pretrained = AutoConfig.from_pretrained(model_path) + tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) + print('Loading LLaVA from base model...') + model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs) + token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features + if model.lm_head.weight.shape[0] != token_num: + model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) + model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) + + print('Loading additional LLaVA weights...') + if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')): + non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu') + else: + # this is probably from HF Hub + from huggingface_hub import hf_hub_download + def load_from_hf(repo_id, filename, subfolder=None): + cache_file = hf_hub_download( + repo_id=repo_id, + filename=filename, + subfolder=subfolder) + return torch.load(cache_file, map_location='cpu') + non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin') + non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()} + if any(k.startswith('model.model.') for k in non_lora_trainables): + non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()} + model.load_state_dict(non_lora_trainables, strict=False) + + from peft import PeftModel + print('Loading LoRA weights...') + model = PeftModel.from_pretrained(model, model_path) + print('Merging LoRA weights...') + model = model.merge_and_unload() + print('Model is loaded...') + elif model_base is not None: + # this may be mm projector only + print('Loading LLaVA from base model...') + if 'mpt' in model_name.lower(): + if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')): + shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py')) + tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True) + cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True) + model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs) + else: + tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) + cfg_pretrained = AutoConfig.from_pretrained(model_path) + model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs) + + mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu') + mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()} + model.load_state_dict(mm_projector_weights, strict=False) + else: + if 'mpt' in model_name.lower(): + tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) + model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) + else: + tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) + # config = AutoConfig.from_pretrained(model_path) + # model1 = LlavaLlamaForCausalLM(config) + # a = torch.load(rf'{model_path}/pytorch_model-00001-of-00003.bin') + # b = torch.load(rf'{model_path}/pytorch_model-00002-of-00003.bin') + # c = torch.load(rf'{model_path}/pytorch_model-00003-of-00003.bin') + # model1.load_state_dict(a, strict=False) + # model1.load_state_dict(b, strict=False) + # model1.load_state_dict(c, strict=False) + model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) + print() + else: + # Load language model + if model_base is not None: + # PEFT model + from peft import PeftModel + tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) + model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto") + print(f"Loading LoRA weights from {model_path}") + model = PeftModel.from_pretrained(model, model_path) + print(f"Merging weights") + model = model.merge_and_unload() + print('Convert to FP16...') + model.to(torch.float16) + else: + use_fast = False + if 'mpt' in model_name.lower(): + tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) + model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs) + else: + tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) + model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) + + processor = {} + if 'llava' in model_name.lower(): + mm_use_x_start_end = getattr(model.config, "mm_use_x_start_end", False) + mm_use_x_patch_token = getattr(model.config, "mm_use_x_patch_token", True) + X = model.config.X + if mm_use_x_patch_token: + for x in X: + tokenizer.add_tokens([DEFAULT_X_PATCH_TOKEN[x.upper()]], special_tokens=True) + if mm_use_x_start_end: + for x in X: + tokenizer.add_tokens([DEFAULT_X_START_TOKEN[x.upper()], DEFAULT_X_END_TOKEN[x.upper()]], special_tokens=True) + model.resize_token_embeddings(len(tokenizer)) + print(X) + if 'Image' in X: + image_tower = model.get_image_tower() + if not image_tower.is_loaded: + image_tower.load_model() + image_tower.to(device=device, dtype=torch.float16) + image_processor = image_tower.image_processor + processor['image'] = image_processor + + if 'Video' in X: + video_tower = model.get_video_tower() + if not video_tower.is_loaded: + video_tower.load_model() + video_tower.to(device=device, dtype=torch.float16) + video_processor = video_tower.video_processor + processor['video'] = video_processor + + if hasattr(model.config, "max_sequence_length"): + context_len = model.config.max_sequence_length + else: + context_len = 2048 + + return tokenizer, model, processor, context_len + # return tokenizer, model1, processor, context_len diff --git a/llava/model/consolidate.py b/llava/model/consolidate.py new file mode 100644 index 0000000..1e32421 --- /dev/null +++ b/llava/model/consolidate.py @@ -0,0 +1,29 @@ +""" +Usage: +python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate +""" +import argparse + +import torch +from transformers import AutoTokenizer, AutoModelForCausalLM +from llava.model import * +from llava.model.utils import auto_upgrade + + +def consolidate_ckpt(src_path, dst_path): + print("Loading model") + auto_upgrade(src_path) + src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) + src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False) + src_model.save_pretrained(dst_path) + src_tokenizer.save_pretrained(dst_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--src", type=str, required=True) + parser.add_argument("--dst", type=str, required=True) + + args = parser.parse_args() + + consolidate_ckpt(args.src, args.dst) diff --git a/llava/model/language_model/llava_llama.py b/llava/model/language_model/llava_llama.py new file mode 100644 index 0000000..ef032cd --- /dev/null +++ b/llava/model/language_model/llava_llama.py @@ -0,0 +1,145 @@ +# Copyright 2023 Haotian Liu +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss + +from transformers import AutoConfig, AutoModelForCausalLM, \ + LlamaConfig, LlamaModel, LlamaForCausalLM + +from transformers.modeling_outputs import CausalLMOutputWithPast + +from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM + + +class LlavaConfig(LlamaConfig): + model_type = "llava" + + +class LlavaLlamaModel(LlavaMetaModel, LlamaModel): + config_class = LlavaConfig + + def __init__(self, config: LlamaConfig): + super(LlavaLlamaModel, self).__init__(config) + + +class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): + config_class = LlavaConfig + + def __init__(self, config): + super(LlamaForCausalLM, self).__init__(config) + self.model = LlavaLlamaModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_model(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + images: Optional[torch.FloatTensor] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + # print(222222222222222222222222222222222222222222222) + input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) + # input_ids已经没了,因为text已经融合到inputs_embeds里面:前text+img+后text,总共大几百个token + # 之后的input_ids是上一轮预测得token,图片和之前得文本信息融到past_key_values + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + + # print(66666666666666666666666) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict + ) + + # print(777777777) + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + # print(88888888888888) + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model/pipeline parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + # print(99999999999) + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + "images": kwargs.get("images", None), + } + ) + return model_inputs + +AutoConfig.register("llava", LlavaConfig) +AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM) \ No newline at end of file diff --git a/llava/model/language_model/llava_llama_v1.py b/llava/model/language_model/llava_llama_v1.py new file mode 100644 index 0000000..1419d20 --- /dev/null +++ b/llava/model/language_model/llava_llama_v1.py @@ -0,0 +1,155 @@ +# Copyright 2023 Haotian Liu +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss + +from transformers import AutoConfig, AutoModelForCausalLM, \ + LlamaConfig, LlamaModel, LlamaForCausalLM + +from transformers.modeling_outputs import CausalLMOutputWithPast + +from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM + + +class LlavaConfig(LlamaConfig): + model_type = "llava" + + +class LlavaLlamaModel(LlavaMetaModel, LlamaModel): + config_class = LlavaConfig + + def __init__(self, config: LlamaConfig): + super(LlavaLlamaModel, self).__init__(config) + + +class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): + config_class = LlavaConfig + + def __init__(self, config): + super(LlamaForCausalLM, self).__init__(config) + self.model = LlavaLlamaModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_model(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + images: Optional[torch.FloatTensor] = None, + paths: Optional[str] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + # print(paths) + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # print(images) + if past_key_values is None: + input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images, paths) + else: + input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) + # input_ids已经没了,因为text已经融合到inputs_embeds里面:前text+img+后text,总共大几百个token + # 之后的input_ids是上一轮预测得token,图片和之前得文本信息融到past_key_values + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + + # print(66666666666666666666666) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict + ) + + # print(777777777) + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + # print(88888888888888) + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model/pipeline parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + # print(99999999999) + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + if past_key_values is None: + model_inputs.update({"paths": kwargs.get("paths", None)}) + + # print(kwargs.get("paths", None)) + model_inputs.update( + { + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + "images": kwargs.get("images", None), + } + ) + return model_inputs + +AutoConfig.register("llava", LlavaConfig) +AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM) diff --git a/llava/model/language_model/llava_mpt.py b/llava/model/language_model/llava_mpt.py new file mode 100644 index 0000000..39dc880 --- /dev/null +++ b/llava/model/language_model/llava_mpt.py @@ -0,0 +1,113 @@ +# Copyright 2023 Haotian Liu +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple +import warnings + +import torch +import torch.nn.functional as F +import math + +from transformers import AutoConfig, AutoModelForCausalLM +from transformers.modeling_outputs import CausalLMOutputWithPast + +from .mpt.modeling_mpt import MPTConfig, MPTForCausalLM, MPTModel +from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM + + +class LlavaMPTConfig(MPTConfig): + model_type = "llava_mpt" + + +class LlavaMPTModel(LlavaMetaModel, MPTModel): + config_class = LlavaMPTConfig + + def __init__(self, config: MPTConfig): + config.hidden_size = config.d_model + super(LlavaMPTModel, self).__init__(config) + + def embed_tokens(self, x): + return self.wte(x) + + +class LlavaMPTForCausalLM(MPTForCausalLM, LlavaMetaForCausalLM): + config_class = LlavaMPTConfig + supports_gradient_checkpointing = True + + def __init__(self, config): + super(MPTForCausalLM, self).__init__(config) + + if not config.tie_word_embeddings: + raise ValueError('MPTForCausalLM only supports tied word embeddings') + self.transformer = LlavaMPTModel(config) + self.logit_scale = None + if config.logit_scale is not None: + logit_scale = config.logit_scale + if isinstance(logit_scale, str): + if logit_scale == 'inv_sqrt_d_model': + logit_scale = 1 / math.sqrt(config.d_model) + else: + raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.") + self.logit_scale = logit_scale + + def get_model(self): + return self.transformer + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, LlavaMPTModel): + module.gradient_checkpointing = value + + def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, images=None): + return_dict = return_dict if return_dict is not None else self.config.return_dict + use_cache = use_cache if use_cache is not None else self.config.use_cache + + input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) + outputs = self.transformer(input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache) + # FIXME: this is a hack to fix the multiple gpu inference issue in https://github.com/haotian-liu/LLaVA/issues/338 + logits = F.linear(outputs.last_hidden_state.to(self.transformer.wte.weight.device), self.transformer.wte.weight) + if self.logit_scale is not None: + if self.logit_scale == 0: + warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.') + logits *= self.logit_scale + loss = None + if labels is not None: + labels = torch.roll(labels, shifts=-1) + labels[:, -1] = -100 + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.to(logits.device).view(-1)) + return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + if inputs_embeds is not None: + raise NotImplementedError('inputs_embeds is not implemented for MPT yet') + attention_mask = kwargs['attention_mask'].bool() + if attention_mask[:, -1].sum() != attention_mask.shape[0]: + raise NotImplementedError('MPT does not support generation with right padding.') + if self.transformer.attn_uses_sequence_id and self.training: + sequence_id = torch.zeros_like(input_ids[:1]) + else: + sequence_id = None + if past_key_values is not None: + input_ids = input_ids[:, -1].unsqueeze(-1) + if self.transformer.prefix_lm: + prefix_mask = torch.ones_like(attention_mask) + if kwargs.get('use_cache') == False: + raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.') + else: + prefix_mask = None + return {'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True), "images": kwargs.get("images", None)} + + +AutoConfig.register("llava_mpt", LlavaMPTConfig) +AutoModelForCausalLM.register(LlavaMPTConfig, LlavaMPTForCausalLM) diff --git a/llava/model/llava_arch.py b/llava/model/llava_arch.py new file mode 100644 index 0000000..96431c8 --- /dev/null +++ b/llava/model/llava_arch.py @@ -0,0 +1,346 @@ +# Copyright 2023 Haotian Liu +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from abc import ABC, abstractmethod + +import torch +import torch.nn as nn + +from .multimodal_encoder.builder import build_image_tower, build_video_tower +from .multimodal_projector.builder import build_vision_projector + +from llava.constants import IGNORE_INDEX, X_TOKEN_INDEX, DEFAULT_X_PATCH_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN + + +class LlavaMetaModel: + + def __init__(self, config): + super(LlavaMetaModel, self).__init__(config) + + if hasattr(config, "mm_image_tower"): + self.image_tower = build_image_tower(config, delay_load=True) + self.mm_projector = build_vision_projector(config) + if hasattr(config, "mm_video_tower"): + self.video_tower = build_video_tower(config, delay_load=True) + self.mm_projector = build_vision_projector(config) + + def get_image_tower(self): + image_tower = getattr(self, 'image_tower', None) + if type(image_tower) is list: + image_tower = image_tower[0] + return image_tower + + def get_video_tower(self): + video_tower = getattr(self, 'video_tower', None) + if type(video_tower) is list: + video_tower = video_tower[0] + return video_tower + + def initialize_image_modules(self, model_args, fsdp=None): + image_tower = model_args.image_tower + mm_vision_select_layer = model_args.mm_vision_select_layer + mm_vision_select_feature = model_args.mm_vision_select_feature + pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter + + self.config.mm_image_tower = image_tower + + image_tower = build_image_tower(model_args) + + if fsdp is not None and len(fsdp) > 0: + self.image_tower = [image_tower] + else: + self.image_tower = image_tower + + self.config.use_mm_proj = True + self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') + self.config.mm_hidden_size = image_tower.hidden_size + self.config.mm_vision_select_layer = mm_vision_select_layer + self.config.mm_vision_select_feature = mm_vision_select_feature + + self.mm_projector = build_vision_projector(self.config) + + if pretrain_mm_mlp_adapter is not None: + mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') + def get_w(weights, keyword): + return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} + + self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) + + def initialize_video_modules(self, model_args, fsdp=None): + video_tower = model_args.video_tower + mm_vision_select_layer = model_args.mm_vision_select_layer + mm_vision_select_feature = model_args.mm_vision_select_feature + pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter + + self.config.mm_video_tower = video_tower + + video_tower = build_video_tower(model_args) + + if fsdp is not None and len(fsdp) > 0: + self.video_tower = [video_tower] + else: + self.video_tower = video_tower + + self.config.use_mm_proj = True + self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') + self.config.mm_hidden_size = video_tower.hidden_size + self.config.mm_vision_select_layer = mm_vision_select_layer + self.config.mm_vision_select_feature = mm_vision_select_feature + + self.mm_projector = build_vision_projector(self.config) + + if pretrain_mm_mlp_adapter is not None: + mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') + def get_w(weights, keyword): + return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} + + self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) + +class LlavaMetaForCausalLM(ABC): + + @abstractmethod + def get_model(self): + pass + + def get_image_tower(self): + return self.get_model().get_image_tower() + + def get_video_tower(self): + return self.get_model().get_video_tower() + + def get_all_tower(self, keys): + tower = {key: getattr(self, f'get_{key}_tower') for key in keys} + return tower + + def encode_images(self, images): + image_features = self.get_model().get_image_tower()(images) + image_features = self.get_model().mm_projector(image_features) + return image_features + + def encode_videos(self, videos): + video_features = self.get_model().get_video_tower()(videos) + print("1111",video_features.shape) + video_features = self.get_model().mm_projector(video_features) + print("2222",video_features.shape) + import pdb; pdb.set_trace() + return video_features + + def prepare_inputs_labels_for_multimodal( + self, input_ids, attention_mask, past_key_values, labels, X_modalities + ): + ''' + X_modalities [ + [img_feature, img_feature, video_feature, audio_feature], + ['image', 'image', 'video', 'audio'] + ] + ''' + Xs, keys = X_modalities + all_tower = self.get_all_tower(set(keys)) if len(keys) > 0 else None + + # print(2.5) + if all_tower is None or X_modalities[0][0] is None or input_ids.shape[1] == 1: + if past_key_values is not None and all_tower is not None and Xs is not None and input_ids.shape[1] == 1: + attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device) + return input_ids, attention_mask, past_key_values, None, labels + + + # print(keys) + # X_features = [getattr(self, f'encode_{key}s')(X.unsqueeze(0)) for X, key in zip(Xs, keys)] # expand to get batchsize + # X_features = [x.flatten(0, 1) for x in X_features] + # print(X_features[0].size()) + # import pdb; pdb.set_trace() + + X_features = [] + # Xs = [Xs] + + for X, key in zip(Xs, keys): + stackX = torch.stack(X, dim=0).view(3,3,8,224,224) + # stackX = torch.stack(X, dim=0) + # print(stackX.shape) + + encoded_feature = getattr(self, f'encode_{key}s')(stackX) + print("Feature Dim", encoded_feature.shape) + X_features.append(encoded_feature) + + # flattened_X_features = [] + # for x in X_features: + # flattened_feature = x.flatten(0, 1) + # flattened_X_features.append(flattened_feature) + + # X_features = flattened_X_features + + + new_input_embeds = [] + new_labels = [] if labels is not None else None + + # print("BATCH size", len(X_features), input_ids.shape) + for batch_idx, cur_input_ids in enumerate(input_ids): + cur_X_idx = 0 + if (torch.any(torch.stack([cur_input_ids == X_TOKEN_INDEX[key.upper()] for key in keys]), dim=0)).sum() == 0: + # multimodal LLM, but the current sample is not multimodal + # FIXME: this is a hacky fix, for deepspeed zero3 to work + half_len = cur_input_ids.shape[0] // 2 + cur_X_features = X_features[cur_X_idx] + cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len]) + cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:]) + cur_input_embeds = torch.cat([cur_input_embeds_1, cur_X_features[0:0], cur_input_embeds_2], dim=0) + new_input_embeds.append(cur_input_embeds) + if labels is not None: + new_labels.append(labels[batch_idx]) + cur_X_idx += 1 + continue + X_token_indices = torch.where(torch.any(torch.stack([cur_input_ids == X_TOKEN_INDEX[key.upper()] for key in keys]), dim=0))[0] + # print(batch_idx, cur_input_ids.size(),X_token_indices) + cur_new_input_embeds = [] + if labels is not None: + cur_labels = labels[batch_idx] + cur_new_labels = [] + assert cur_labels.shape == cur_input_ids.shape + # print(4444444444) + while X_token_indices.numel() > 0: + cur_X_features = X_features[batch_idx][cur_X_idx] + X_token_start = X_token_indices[0] + if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_x_start_end', False): + cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:X_token_start-1]).detach()) + cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[X_token_start-1:X_token_start])) + cur_new_input_embeds.append(cur_X_features) + cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[X_token_start+1:X_token_start+2])) + if labels is not None: + cur_new_labels.append(cur_labels[:X_token_start]) + cur_new_labels.append(torch.full((cur_X_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) + cur_new_labels.append(cur_labels[X_token_start:X_token_start+1]) + cur_labels = cur_labels[X_token_start+2:] + else: + cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:X_token_start])) + cur_new_input_embeds.append(cur_X_features) + if labels is not None: + cur_new_labels.append(cur_labels[:X_token_start]) + cur_new_labels.append(torch.full((cur_X_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) + cur_labels = cur_labels[X_token_start+1:] + cur_X_idx += 1 + if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_x_start_end', False): + cur_input_ids = cur_input_ids[X_token_start+2:] + else: + cur_input_ids = cur_input_ids[X_token_start+1:] + X_token_indices = torch.where(torch.any(torch.stack([cur_input_ids == X_TOKEN_INDEX[key.upper()] for key in keys]), dim=0))[0] + # print(55555555555555555) + if cur_input_ids.numel() > 0: + if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_x_start_end', False): + cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach()) + else: + cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) + if labels is not None: + cur_new_labels.append(cur_labels) + + for i in cur_new_input_embeds: + print(i.size()) + cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] + cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) + + + + new_input_embeds.append(cur_new_input_embeds) + if labels is not None: + cur_new_labels = torch.cat(cur_new_labels, dim=0) + new_labels.append(cur_new_labels) + + if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): + max_len = max(x.shape[0] for x in new_input_embeds) + + new_input_embeds_align = [] + for cur_new_embed in new_input_embeds: + cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) + new_input_embeds_align.append(cur_new_embed) + new_input_embeds = torch.stack(new_input_embeds_align, dim=0) + + print("New emb", new_input_embeds.shape) + # import pdb;pdb.set_trace() + + if labels is not None: + new_labels_align = [] + _new_labels = new_labels + for cur_new_label in new_labels: + cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) + new_labels_align.append(cur_new_label) + new_labels = torch.stack(new_labels_align, dim=0) + + if attention_mask is not None: + new_attention_mask = [] + for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): + new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) + new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) + cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) + new_attention_mask.append(cur_new_attention_mask) + attention_mask = torch.stack(new_attention_mask, dim=0) + assert attention_mask.shape == new_labels.shape + else: + new_input_embeds = torch.stack(new_input_embeds, dim=0) + if labels is not None: + new_labels = torch.stack(new_labels, dim=0) + + if attention_mask is not None: + new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) + attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) + assert attention_mask.shape == new_input_embeds.shape[:2] + + return None, attention_mask, past_key_values, new_input_embeds, new_labels + + def initialize_X_tokenizer(self, model_args, tokenizer): + if model_args.mm_use_x_patch_token: + for x in model_args.X: + tokenizer.add_tokens([DEFAULT_X_PATCH_TOKEN[x.upper()]], special_tokens=True) + # tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) + self.resize_token_embeddings(len(tokenizer)) + + if model_args.mm_use_x_start_end: + num_new_tokens = 0 + for x in model_args.X: + num_new_tokens += tokenizer.add_tokens([DEFAULT_X_START_TOKEN[x.upper()], DEFAULT_X_END_TOKEN[x.upper()]], special_tokens=True) + self.resize_token_embeddings(len(tokenizer)) + + if num_new_tokens > 0: + input_embeddings = self.get_input_embeddings().weight.data + output_embeddings = self.get_output_embeddings().weight.data + + input_embeddings_avg = input_embeddings[:-num_new_tokens].mean( + dim=0, keepdim=True) + output_embeddings_avg = output_embeddings[:-num_new_tokens].mean( + dim=0, keepdim=True) + + input_embeddings[-num_new_tokens:] = input_embeddings_avg + output_embeddings[-num_new_tokens:] = output_embeddings_avg + + if model_args.tune_mm_mlp_adapter: + for p in self.get_input_embeddings().parameters(): + p.requires_grad = True + for p in self.get_output_embeddings().parameters(): + p.requires_grad = False + + if model_args.pretrain_mm_mlp_adapter: + mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu') + embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight'] + assert num_new_tokens == 2 + if input_embeddings.shape == embed_tokens_weight.shape: + input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:] + elif embed_tokens_weight.shape[0] == num_new_tokens: + input_embeddings[-num_new_tokens:] = embed_tokens_weight + else: + raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.") + elif model_args.mm_use_x_patch_token: + if model_args.tune_mm_mlp_adapter: + for p in self.get_input_embeddings().parameters(): + p.requires_grad = False + for p in self.get_output_embeddings().parameters(): + p.requires_grad = False \ No newline at end of file diff --git a/llava/model/make_delta.py b/llava/model/make_delta.py new file mode 100644 index 0000000..4ae55d5 --- /dev/null +++ b/llava/model/make_delta.py @@ -0,0 +1,52 @@ +""" +Usage: +python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta +""" +import argparse + +import torch +from tqdm import tqdm +from transformers import AutoTokenizer, AutoModelForCausalLM +from llava.model.utils import auto_upgrade + + +def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id): + print("Loading base model") + base = AutoModelForCausalLM.from_pretrained( + base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) + + print("Loading target model") + auto_upgrade(target_model_path) + target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) + + print("Calculating delta") + for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"): + if name not in base.state_dict(): + assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' + continue + if param.data.shape == base.state_dict()[name].shape: + param.data -= base.state_dict()[name] + else: + assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' + bparam = base.state_dict()[name] + param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam + + print("Saving delta") + if hub_repo_id: + kwargs = {"push_to_hub": True, "repo_id": hub_repo_id} + else: + kwargs = {} + target.save_pretrained(delta_path, **kwargs) + target_tokenizer = AutoTokenizer.from_pretrained(target_model_path) + target_tokenizer.save_pretrained(delta_path, **kwargs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--base-model-path", type=str, required=True) + parser.add_argument("--target-model-path", type=str, required=True) + parser.add_argument("--delta-path", type=str, required=True) + parser.add_argument("--hub-repo-id", type=str, default=None) + args = parser.parse_args() + + make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id) diff --git a/llava/model/multimodal_encoder/builder.py b/llava/model/multimodal_encoder/builder.py new file mode 100644 index 0000000..e44b7c7 --- /dev/null +++ b/llava/model/multimodal_encoder/builder.py @@ -0,0 +1,69 @@ +import os +from .clip_encoder import CLIPVisionTower +from .languagebind import LanguageBindImageTower, LanguageBindVideoTower +from .mae_encoder import MAEVisionTower +from transformers import CLIPModel + +def build_image_tower(image_tower_cfg, **kwargs): + image_tower = getattr(image_tower_cfg, 'mm_image_tower', getattr(image_tower_cfg, 'image_tower', None)) + # is_absolute_path_exists = os.path.exists(image_tower) + is_absolute_path_exists = False + if is_absolute_path_exists or image_tower.startswith("openai") or image_tower.startswith("laion"): + return CLIPVisionTower(image_tower, args=image_tower_cfg, **kwargs) + if image_tower.endswith('LanguageBind_Image'): + return LanguageBindImageTower(image_tower, args=image_tower_cfg, cache_dir='./cache_dir', **kwargs) + if 'mae' in image_tower: + print('maemaemaemaemaemaemaemae') + print('maemaemaemaemaemaemaemae') + print('maemaemaemaemaemaemaemae') + print('maemaemaemaemaemaemaemae') + print('maemaemaemaemaemaemaemae') + return MAEVisionTower(image_tower, args=image_tower_cfg, cache_dir='./cache_dir', **kwargs) + raise ValueError(f'Unknown image tower: {image_tower}') + +def build_video_tower(video_tower_cfg, **kwargs): + video_tower = getattr(video_tower_cfg, 'mm_video_tower', getattr(video_tower_cfg, 'video_tower', None)) + + if video_tower.endswith('LanguageBind_Video_merge'): + return LanguageBindVideoTower(video_tower, args=video_tower_cfg, cache_dir='./cache_dir', **kwargs) + raise ValueError(f'Unknown video tower: {video_tower}') + +def extractor(**kwargs): + video_tower = "./cache_dir/LanguageBind_Video_merge" + class VideoTowerConfig: + def __init__(self): + self.mm_video_tower = "./cache_dir/LanguageBind_Video_merge" + self.mm_vision_select_feature = "patch" + self.mm_vision_select_layer = -2 + self.model_type = "llava" + self.num_attention_heads = 32 + self.num_hidden_layers = 32 + self.num_key_value_heads = 32 + self.pad_token_id = 0 + self.pretraining_tp = 1 + self.rms_norm_eps = 1e-05 + self.vocab_size = 32000 + + video_tower_cfg = VideoTowerConfig() + return LanguageBindVideoTower(video_tower, args=video_tower_cfg, cache_dir='./cache_dir', **kwargs) + + +# import os +# from .clip_encoder import CLIPVisionTower +# from .languagebind import LanguageBindImageTower, LanguageBindVideoTower +# from transformers import CLIPModel + +# def build_image_tower(image_tower_cfg, **kwargs): +# image_tower = getattr(image_tower_cfg, 'mm_image_tower', getattr(image_tower_cfg, 'image_tower', None)) +# is_absolute_path_exists = os.path.exists(image_tower) +# if is_absolute_path_exists or image_tower.startswith("openai") or image_tower.startswith("laion"): +# return CLIPVisionTower(image_tower, args=image_tower_cfg, **kwargs) +# if image_tower.endswith('LanguageBind_Image'): +# return LanguageBindImageTower(image_tower, args=image_tower_cfg, cache_dir='./cache_dir', **kwargs) +# raise ValueError(f'Unknown image tower: {image_tower}') + +# def build_video_tower(video_tower_cfg, **kwargs): +# video_tower = getattr(video_tower_cfg, 'mm_video_tower', getattr(video_tower_cfg, 'video_tower', None)) +# if video_tower.endswith('LanguageBind_Video'): +# return LanguageBindVideoTower(video_tower, args=video_tower_cfg, cache_dir='./cache_dir', **kwargs) +# raise ValueError(f'Unknown video tower: {video_tower}') \ No newline at end of file diff --git a/llava/model/multimodal_encoder/clip_encoder.py b/llava/model/multimodal_encoder/clip_encoder.py new file mode 100644 index 0000000..dbb9015 --- /dev/null +++ b/llava/model/multimodal_encoder/clip_encoder.py @@ -0,0 +1,78 @@ +import torch +import torch.nn as nn + +from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig + + +class CLIPVisionTower(nn.Module): + def __init__(self, vision_tower, args, delay_load=False): + super().__init__() + + self.is_loaded = False + + self.vision_tower_name = vision_tower + self.select_layer = args.mm_vision_select_layer + self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch') + + if not delay_load: + self.load_model() + else: + self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name) + + def load_model(self): + self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name) + self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name) + self.vision_tower.requires_grad_(False) + + self.is_loaded = True + + def feature_select(self, image_forward_outs): + image_features = image_forward_outs.hidden_states[self.select_layer] + if self.select_feature == 'patch': + image_features = image_features[:, 1:] + elif self.select_feature == 'cls_patch': + image_features = image_features + else: + raise ValueError(f'Unexpected select feature: {self.select_feature}') + return image_features + + @torch.no_grad() + def forward(self, images): + if type(images) is list: + image_features = [] + for image in images: + image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) + image_feature = self.feature_select(image_forward_out).to(image.dtype) + image_features.append(image_feature) + else: + image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) + image_features = self.feature_select(image_forward_outs).to(images.dtype) + + return image_features + + @property + def dummy_feature(self): + return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) + + @property + def dtype(self): + return self.vision_tower.dtype + + @property + def device(self): + return self.vision_tower.device + + @property + def config(self): + if self.is_loaded: + return self.vision_tower.config + else: + return self.cfg_only + + @property + def hidden_size(self): + return self.config.hidden_size + + @property + def num_patches(self): + return (self.config.image_size // self.config.patch_size) ** 2 diff --git a/llava/model/multimodal_encoder/mae_encoder.py b/llava/model/multimodal_encoder/mae_encoder.py new file mode 100644 index 0000000..377883d --- /dev/null +++ b/llava/model/multimodal_encoder/mae_encoder.py @@ -0,0 +1,80 @@ +import torch +import torch.nn as nn + +from transformers import ViTMAEForPreTraining, AutoConfig, AutoImageProcessor + + +class MAEVisionTower(nn.Module): + def __init__(self, vision_tower, args, cache_dir='./cache_dir', delay_load=False): + super().__init__() + + self.is_loaded = False + self.cache_dir = cache_dir + self.vision_tower_name = vision_tower + self.select_layer = args.mm_vision_select_layer + self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch') + + if not delay_load: + self.load_model() + else: + self.cfg_only = AutoConfig.from_pretrained(self.vision_tower_name, cache_dir=self.cache_dir) + + def load_model(self): + self.image_processor = AutoImageProcessor.from_pretrained(self.vision_tower_name, cache_dir=self.cache_dir) + vision_tower = ViTMAEForPreTraining.from_pretrained(self.vision_tower_name, cache_dir=self.cache_dir) + self.vision_tower = vision_tower.vit + self.vision_tower.requires_grad_(False) + + self.is_loaded = True + + def feature_select(self, image_forward_outs): + image_features = image_forward_outs.hidden_states[self.select_layer] + if self.select_feature == 'patch': + image_features = image_features[:, 1:] + elif self.select_feature == 'cls_patch': + image_features = image_features + else: + raise ValueError(f'Unexpected select feature: {self.select_feature}') + # print(image_features.shape) + return image_features + + @torch.no_grad() + def forward(self, images): + if type(images) is list: + image_features = [] + for image in images: + image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) + image_feature = self.feature_select(image_forward_out).to(image.dtype) + image_features.append(image_feature) + else: + image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) + image_features = self.feature_select(image_forward_outs).to(images.dtype) + + return image_features + + @property + def dummy_feature(self): + return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) + + @property + def dtype(self): + return self.vision_tower.dtype + + @property + def device(self): + return self.vision_tower.device + + @property + def config(self): + if self.is_loaded: + return self.vision_tower.config + else: + return self.cfg_only + + @property + def hidden_size(self): + return self.config.hidden_size + + @property + def num_patches(self): + return (self.config.image_size // self.config.patch_size) ** 2 diff --git a/llava/model/multimodal_projector/builder.py b/llava/model/multimodal_projector/builder.py new file mode 100644 index 0000000..8cc8e9d --- /dev/null +++ b/llava/model/multimodal_projector/builder.py @@ -0,0 +1,257 @@ +from typing import Optional + +import torch +import torch.nn as nn +import re + +from transformers import PretrainedConfig, Blip2PreTrainedModel, Blip2Config, Blip2QFormerModel + + +class IdentityMap(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x, *args, **kwargs): + return x + + @property + def config(self): + return {"mm_projector_type": 'identity'} + + +class SimpleResBlock(nn.Module): + def __init__(self, channels): + super().__init__() + self.pre_norm = nn.LayerNorm(channels) + + self.proj = nn.Sequential( + nn.Linear(channels, channels), + nn.GELU(), + nn.Linear(channels, channels) + ) + def forward(self, x): + x = self.pre_norm(x) + return x + self.proj(x) + + +# def build_vision_projector(config, delay_load=False, **kwargs): +# projector_type = getattr(config, 'mm_projector_type', 'linear') +# +# if projector_type == 'linear': +# return nn.Linear(config.mm_hidden_size, config.hidden_size) +# +# mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) +# if mlp_gelu_match: +# mlp_depth = int(mlp_gelu_match.group(1)) +# modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] +# for _ in range(1, mlp_depth): +# modules.append(nn.GELU()) +# modules.append(nn.Linear(config.hidden_size, config.hidden_size)) +# return nn.Sequential(*modules) +# +# if projector_type == 'identity': +# return IdentityMap() +# +# raise ValueError(f'Unknown projector type: {projector_type}') + + +class Blip2Model(Blip2PreTrainedModel): + def __init__(self, config: Blip2Config): + super().__init__(config) + + self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) + self.qformer = Blip2QFormerModel(config.qformer_config) + + # self.proj = nn.Linear(config.mm_hidden_size, config.hidden_size) + modules = [nn.Linear(config.mm_hidden_size, config.hidden_size), nn.GELU(), nn.Linear(config.hidden_size, config.hidden_size)] + self.proj = nn.Sequential(*modules) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns: + vision_outputs (`BaseModelOutputWithPooling` or tuple of `torch.FloatTensor`): + The vision model outputs. If `return_dict=True`, the output is a [`BaseModelOutputWithPooling`] that + contains the image features, the pooled image features and the hidden states if + `output_hidden_states=True`. + Examples: + ```python + >>> import torch + >>> from PIL import Image + >>> import requests + >>> from transformers import Blip2Processor, Blip2Model + + >>> device = "cuda" if torch.cuda.is_available() else "cpu" + + >>> processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") + >>> model = Blip2Model.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) + >>> model.to(device) # doctest: +IGNORE_RESULT + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + >>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16) + >>> qformer_outputs = model.get_qformer_features(**inputs) + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # vision_outputs = self.vision_model( + # pixel_values=pixel_values, + # output_attentions=output_attentions, + # output_hidden_states=output_hidden_states, + # return_dict=return_dict, + # ) + # + # image_embeds = vision_outputs[0] + # image_embeds = self.proj(pixel_values) + image_embeds = pixel_values + + + # print('pixel_values to proj', pixel_values.shape, image_embeds.shape) + # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention + image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) + + query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) + query_outputs = self.qformer( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ).last_hidden_state + # print('qformer out', query_outputs.shape) + query_outputs = self.proj(query_outputs) + return query_outputs + + +def qformer_config_template(config, projector_type): + pattern = r"qformer(\d+)_(\d+)" + + match = re.search(pattern, projector_type) + num_hidden_layers = int(match.group(1)) + num_query_tokens = int(match.group(2)) + + qformer_config = type('Blip2Config', (PretrainedConfig,), { + "initializer_factor": 1.0, + "initializer_range": 0.02, + "model_type": "blip-2", + "num_query_tokens": num_query_tokens, + "hidden_size": config.hidden_size, + "mm_hidden_size": config.mm_hidden_size, + "qformer_config": type('qformer_config', (PretrainedConfig,), { + "_name_or_path": "", + "add_cross_attention": False, + "architectures": None, + "attention_probs_dropout_prob": 0.0, + "bad_words_ids": None, + "begin_suppress_tokens": None, + "bos_token_id": None, + "chunk_size_feed_forward": 0, + "classifier_dropout": None, + "cross_attention_frequency": 1, + "cross_attention_hidden_size": None, + "decoder_start_token_id": None, + "diversity_penalty": 0.0, + "do_sample": False, + "early_stopping": False, + "encoder_hidden_size": config.mm_hidden_size, + "encoder_no_repeat_ngram_size": 0, + "eos_token_id": None, + "exponential_decay_length_penalty": None, + "finetuning_task": None, + "forced_bos_token_id": None, + "forced_eos_token_id": None, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.0, + "hidden_size": config.mm_hidden_size, + "id2label": { + "0": "LABEL_0", + "1": "LABEL_1" + }, + "initializer_range": 0.02, + "intermediate_size": config.mm_hidden_size * 4, + "is_decoder": False, + "is_encoder_decoder": False, + "label2id": { + "LABEL_0": 0, + "LABEL_1": 1 + }, + "layer_norm_eps": 1e-12, + "length_penalty": 1.0, + "max_length": 20, + "max_position_embeddings": 512, + "min_length": 0, + "model_type": "blip_2_qformer", + "no_repeat_ngram_size": 0, + "num_attention_heads": 32, + "num_beam_groups": 1, + "num_beams": 1, + "num_hidden_layers": num_hidden_layers, + "num_return_sequences": 1, + "output_attentions": False, + "output_hidden_states": False, + "output_scores": False, + "pad_token_id": 0, + "position_embedding_type": "absolute", + "prefix": None, + "problem_type": None, + "pruned_heads": {}, + "remove_invalid_values": False, + "repetition_penalty": 1.0, + "return_dict": True, + "return_dict_in_generate": False, + "sep_token_id": None, + "suppress_tokens": None, + "task_specific_params": None, + "temperature": 1.0, + "tf_legacy_loss": False, + "tie_encoder_decoder": False, + "tie_word_embeddings": True, + "tokenizer_class": None, + "top_k": 50, + "top_p": 1.0, + "torch_dtype": None, + "torchscript": False, + "transformers_version": "4.27.0.dev0", + "typical_p": 1.0, + "use_bfloat16": False, + "vocab_size": 30522 + })() + })() + return qformer_config + +def build_vision_projector(config, delay_load=False, **kwargs): + projector_type = getattr(config, 'mm_projector_type', 'linear') + + if projector_type == 'linear': + return nn.Linear(config.mm_hidden_size, config.hidden_size) + + elif projector_type == 'identity': + return IdentityMap() + + elif projector_type.startswith('qformer'): # qformer2_64 + qformer_config = qformer_config_template(config, projector_type) + return Blip2Model(qformer_config) + else: + mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) + if mlp_gelu_match: + mlp_depth = int(mlp_gelu_match.group(1)) + modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] + for _ in range(1, mlp_depth): + modules.append(nn.GELU()) + modules.append(nn.Linear(config.hidden_size, config.hidden_size)) + return nn.Sequential(*modules) + + raise ValueError(f'Unknown projector type: {projector_type}') \ No newline at end of file diff --git a/llava/model/utils.py b/llava/model/utils.py new file mode 100644 index 0000000..2563f89 --- /dev/null +++ b/llava/model/utils.py @@ -0,0 +1,20 @@ +from transformers import AutoConfig + + +def auto_upgrade(config): + cfg = AutoConfig.from_pretrained(config) + if 'llava' in config and 'llava' not in cfg.model_type: + assert cfg.model_type == 'llama' + print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.") + print("You must upgrade the checkpoint to the new code base (this can be done automatically).") + confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]") + if confirm.lower() in ["y", "yes"]: + print("Upgrading checkpoint...") + assert len(cfg.architectures) == 1 + setattr(cfg.__class__, "model_type", "llava") + cfg.architectures[0] = 'LlavaLlamaForCausalLM' + cfg.save_pretrained(config) + print("Checkpoint upgraded.") + else: + print("Checkpoint upgrade aborted.") + exit(1) diff --git a/llava/serve/__init__.py b/llava/serve/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/llava/serve/cli.py b/llava/serve/cli.py new file mode 100644 index 0000000..4bd366d --- /dev/null +++ b/llava/serve/cli.py @@ -0,0 +1,145 @@ +import argparse +import torch + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import process_images, tokenizer_X_token, get_model_name_from_path, KeywordsStoppingCriteria + +from PIL import Image + +import requests +from PIL import Image +from io import BytesIO +from transformers import TextStreamer + + +def load_image(image_file): + if image_file.startswith('http://') or image_file.startswith('https://'): + response = requests.get(image_file) + image = Image.open(BytesIO(response.content)).convert('RGB') + else: + image = Image.open(image_file).convert('RGB') + return image + + +def main(args): + # Model + disable_torch_init() + assert not (args.image_file and args.video_file) + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, + args.load_8bit, args.load_4bit, device=args.device) + # print(model, tokenizer, processor) + image_processor = processor['image'] + video_processor = processor['video'] + if 'llama-2' in model_name.lower(): + conv_mode = "llava_llama_2" + elif "v1" in model_name.lower(): + conv_mode = "llava_v1" + elif "mpt" in model_name.lower(): + conv_mode = "mpt" + else: + conv_mode = "llava_v0" + + if args.conv_mode is not None and conv_mode != args.conv_mode: + print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode)) + else: + args.conv_mode = conv_mode + + conv = conv_templates[args.conv_mode].copy() + if "mpt" in model_name.lower(): + roles = ('user', 'assistant') + else: + roles = conv.roles + image = args.image_file + video = args.video_file + # print(image, video) + if args.image_file: + image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'] + if type(image_tensor) is list: + tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor] + else: + tensor = image_tensor.to(model.device, dtype=torch.float16) + key = ['image'] + # print(tensor.shape) + elif args.video_file: + video_tensor = video_processor(video, return_tensors='pt')['pixel_values'] + if type(video_tensor) is list: + tensor = [video.to(model.device, dtype=torch.float16) for video in video_tensor] + else: + tensor = video_tensor.to(model.device, dtype=torch.float16) + key = ['video'] + # print(tensor.shape) + while True: + try: + inp = input(f"{roles[0]}: ") + except EOFError: + inp = "" + if not inp: + print("exit...") + break + + print(f"{roles[1]}: ", end="") + + if image is not None: + # first message + inp = DEFAULT_X_TOKEN['IMAGE'] + '\n' + inp + conv.append_message(conv.roles[0], inp) + image = None + elif video is not None: + # first message + inp = DEFAULT_X_TOKEN['VIDEO'] + '\n' + inp + conv.append_message(conv.roles[0], inp) + video = None + else: + # later messages + conv.append_message(conv.roles[0], inp) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + if args.image_file: + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['IMAGE'], return_tensors='pt').unsqueeze(0).cuda() + elif args.video_file: + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['VIDEO'], return_tensors='pt').unsqueeze(0).cuda() + + # print(input_ids.shape) + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[tensor, key], + do_sample=True, + temperature=args.temperature, + max_new_tokens=args.max_new_tokens, + streamer=streamer, + use_cache=True, + stopping_criteria=[stopping_criteria]) + + outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip() + conv.messages[-1][-1] = outputs + + if args.debug: + print("\n", {"prompt": prompt, "outputs": outputs}, "\n") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-file", type=str, default=None) + parser.add_argument("--video-file", type=str) + parser.add_argument("--device", type=str, default="cuda") + parser.add_argument("--conv-mode", type=str, default=None) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--max-new-tokens", type=int, default=512) + parser.add_argument("--load-8bit", action="store_true") + parser.add_argument("--load-4bit", action="store_true") + parser.add_argument("--debug", action="store_true") + parser.add_argument("--image-aspect-ratio", type=str, default='pad') + args = parser.parse_args() + main(args) diff --git a/llava/serve/controller.py b/llava/serve/controller.py new file mode 100644 index 0000000..b61fca6 --- /dev/null +++ b/llava/serve/controller.py @@ -0,0 +1,298 @@ +""" +A controller manages distributed workers. +It sends worker addresses to clients. +""" +import argparse +import asyncio +import dataclasses +from enum import Enum, auto +import json +import logging +import time +from typing import List, Union +import threading + +from fastapi import FastAPI, Request +from fastapi.responses import StreamingResponse +import numpy as np +import requests +import uvicorn + +from llava.constants import CONTROLLER_HEART_BEAT_EXPIRATION +from llava.utils import build_logger, server_error_msg + + +logger = build_logger("controller", "controller.log") + + +class DispatchMethod(Enum): + LOTTERY = auto() + SHORTEST_QUEUE = auto() + + @classmethod + def from_str(cls, name): + if name == "lottery": + return cls.LOTTERY + elif name == "shortest_queue": + return cls.SHORTEST_QUEUE + else: + raise ValueError(f"Invalid dispatch method") + + +@dataclasses.dataclass +class WorkerInfo: + model_names: List[str] + speed: int + queue_length: int + check_heart_beat: bool + last_heart_beat: str + + +def heart_beat_controller(controller): + while True: + time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION) + controller.remove_stable_workers_by_expiration() + + +class Controller: + def __init__(self, dispatch_method: str): + # Dict[str -> WorkerInfo] + self.worker_info = {} + self.dispatch_method = DispatchMethod.from_str(dispatch_method) + + self.heart_beat_thread = threading.Thread( + target=heart_beat_controller, args=(self,)) + self.heart_beat_thread.start() + + logger.info("Init controller") + + def register_worker(self, worker_name: str, check_heart_beat: bool, + worker_status: dict): + if worker_name not in self.worker_info: + logger.info(f"Register a new worker: {worker_name}") + else: + logger.info(f"Register an existing worker: {worker_name}") + + if not worker_status: + worker_status = self.get_worker_status(worker_name) + if not worker_status: + return False + + self.worker_info[worker_name] = WorkerInfo( + worker_status["model_names"], worker_status["speed"], worker_status["queue_length"], + check_heart_beat, time.time()) + + logger.info(f"Register done: {worker_name}, {worker_status}") + return True + + def get_worker_status(self, worker_name: str): + try: + r = requests.post(worker_name + "/worker_get_status", timeout=5) + except requests.exceptions.RequestException as e: + logger.error(f"Get status fails: {worker_name}, {e}") + return None + + if r.status_code != 200: + logger.error(f"Get status fails: {worker_name}, {r}") + return None + + return r.json() + + def remove_worker(self, worker_name: str): + del self.worker_info[worker_name] + + def refresh_all_workers(self): + old_info = dict(self.worker_info) + self.worker_info = {} + + for w_name, w_info in old_info.items(): + if not self.register_worker(w_name, w_info.check_heart_beat, None): + logger.info(f"Remove stale worker: {w_name}") + + def list_models(self): + model_names = set() + + for w_name, w_info in self.worker_info.items(): + model_names.update(w_info.model_names) + + return list(model_names) + + def get_worker_address(self, model_name: str): + if self.dispatch_method == DispatchMethod.LOTTERY: + worker_names = [] + worker_speeds = [] + for w_name, w_info in self.worker_info.items(): + if model_name in w_info.model_names: + worker_names.append(w_name) + worker_speeds.append(w_info.speed) + worker_speeds = np.array(worker_speeds, dtype=np.float32) + norm = np.sum(worker_speeds) + if norm < 1e-4: + return "" + worker_speeds = worker_speeds / norm + if True: # Directly return address + pt = np.random.choice(np.arange(len(worker_names)), + p=worker_speeds) + worker_name = worker_names[pt] + return worker_name + + # Check status before returning + while True: + pt = np.random.choice(np.arange(len(worker_names)), + p=worker_speeds) + worker_name = worker_names[pt] + + if self.get_worker_status(worker_name): + break + else: + self.remove_worker(worker_name) + worker_speeds[pt] = 0 + norm = np.sum(worker_speeds) + if norm < 1e-4: + return "" + worker_speeds = worker_speeds / norm + continue + return worker_name + elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE: + worker_names = [] + worker_qlen = [] + for w_name, w_info in self.worker_info.items(): + if model_name in w_info.model_names: + worker_names.append(w_name) + worker_qlen.append(w_info.queue_length / w_info.speed) + if len(worker_names) == 0: + return "" + min_index = np.argmin(worker_qlen) + w_name = worker_names[min_index] + self.worker_info[w_name].queue_length += 1 + logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}") + return w_name + else: + raise ValueError(f"Invalid dispatch method: {self.dispatch_method}") + + def receive_heart_beat(self, worker_name: str, queue_length: int): + if worker_name not in self.worker_info: + logger.info(f"Receive unknown heart beat. {worker_name}") + return False + + self.worker_info[worker_name].queue_length = queue_length + self.worker_info[worker_name].last_heart_beat = time.time() + logger.info(f"Receive heart beat. {worker_name}") + return True + + def remove_stable_workers_by_expiration(self): + expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION + to_delete = [] + for worker_name, w_info in self.worker_info.items(): + if w_info.check_heart_beat and w_info.last_heart_beat < expire: + to_delete.append(worker_name) + + for worker_name in to_delete: + self.remove_worker(worker_name) + + def worker_api_generate_stream(self, params): + worker_addr = self.get_worker_address(params["model"]) + if not worker_addr: + logger.info(f"no worker: {params['model']}") + ret = { + "text": server_error_msg, + "error_code": 2, + } + yield json.dumps(ret).encode() + b"\0" + + try: + response = requests.post(worker_addr + "/worker_generate_stream", + json=params, stream=True, timeout=5) + for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"): + if chunk: + yield chunk + b"\0" + except requests.exceptions.RequestException as e: + logger.info(f"worker timeout: {worker_addr}") + ret = { + "text": server_error_msg, + "error_code": 3, + } + yield json.dumps(ret).encode() + b"\0" + + + # Let the controller act as a worker to achieve hierarchical + # management. This can be used to connect isolated sub networks. + def worker_api_get_status(self): + model_names = set() + speed = 0 + queue_length = 0 + + for w_name in self.worker_info: + worker_status = self.get_worker_status(w_name) + if worker_status is not None: + model_names.update(worker_status["model_names"]) + speed += worker_status["speed"] + queue_length += worker_status["queue_length"] + + return { + "model_names": list(model_names), + "speed": speed, + "queue_length": queue_length, + } + + +app = FastAPI() + + +@app.post("/register_worker") +async def register_worker(request: Request): + data = await request.json() + controller.register_worker( + data["worker_name"], data["check_heart_beat"], + data.get("worker_status", None)) + + +@app.post("/refresh_all_workers") +async def refresh_all_workers(): + models = controller.refresh_all_workers() + + +@app.post("/list_models") +async def list_models(): + models = controller.list_models() + return {"models": models} + + +@app.post("/get_worker_address") +async def get_worker_address(request: Request): + data = await request.json() + addr = controller.get_worker_address(data["model"]) + return {"address": addr} + + +@app.post("/receive_heart_beat") +async def receive_heart_beat(request: Request): + data = await request.json() + exist = controller.receive_heart_beat( + data["worker_name"], data["queue_length"]) + return {"exist": exist} + + +@app.post("/worker_generate_stream") +async def worker_api_generate_stream(request: Request): + params = await request.json() + generator = controller.worker_api_generate_stream(params) + return StreamingResponse(generator) + + +@app.post("/worker_get_status") +async def worker_api_get_status(request: Request): + return controller.worker_api_get_status() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=21001) + parser.add_argument("--dispatch-method", type=str, choices=[ + "lottery", "shortest_queue"], default="shortest_queue") + args = parser.parse_args() + logger.info(f"args: {args}") + + controller = Controller(args.dispatch_method) + uvicorn.run(app, host=args.host, port=args.port, log_level="info") diff --git a/llava/serve/eval_custom.py b/llava/serve/eval_custom.py new file mode 100644 index 0000000..6a6d43d --- /dev/null +++ b/llava/serve/eval_custom.py @@ -0,0 +1,159 @@ +import argparse +import torch + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import process_images, tokenizer_X_token, get_model_name_from_path, KeywordsStoppingCriteria +import requests +from PIL import Image +from io import BytesIO +from transformers import TextStreamer + +import os +import json +from tqdm import tqdm + + +def load_image(image_file): + if image_file.startswith('http://') or image_file.startswith('https://'): + response = requests.get(image_file) + image = Image.open(BytesIO(response.content)).convert('RGB') + else: + image = Image.open(image_file).convert('RGB') + return image + + +def main(args): + # Questions: + # q1 = "What is the action of ego car?" + # q2 = "Why does the ego car doing this?" + # json_file = "./video_process/conv_new_icl/conversation_bddx_eval.json" + json_file = args.input + # out_json_paths = [f"./video_process/conv_new_base_vicuna_iclzs/BDDX_Test_pred_{cap}.json" for cap in ['action','justification']] + os.makedirs(args.output, exist_ok=True) + out_json_paths = [f"{args.output}/BDDX_Test_pred_{cap}.json" for cap in ['action','justification']] + + # Model + disable_torch_init() + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, + args.load_8bit, args.load_4bit, device=args.device) + # print(model, tokenizer, processor) + # image_processor = processor['image'] + video_processor = processor['video'] + + + conv_mode = "driving" + + if args.conv_mode is not None and conv_mode != args.conv_mode: + print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode)) + else: + args.conv_mode = conv_mode + + conv = conv_templates[args.conv_mode].copy() + + + # gt + with open(json_file, 'r') as file: + data = json.load(file) + + # Pred + out_jsons = [[],[]] + + for item in tqdm(data): + q1, q2 = item["conversations"][0]["value"], item["conversations"][2]["value"] + conv.messages.clear() + if "mpt" in model_name.lower(): + roles = ('user', 'assistant') + else: + roles = conv.roles + + video, vid = item["video"], item['id'] + video = os.path.join("./video_process",video) + + video_tensor = video_processor(video, return_tensors='pt')['pixel_values'] + if type(video_tensor) is list: + tensor = [video.to(model.device, dtype=torch.float16) for video in video_tensor] + else: + tensor = video_tensor.to(model.device, dtype=torch.float16) + key = ['video'] + + + inst_answers = [] + for qid, question in enumerate([q1,q2]): + # print(question) + inp = question + + if video is not None: + # First Message + # inp = DEFAULT_X_TOKEN['VIDEO'] + '\n' + inp + inp = inp + conv.append_message(conv.roles[0], inp) + video = None + else: + # later messages + conv.append_message(conv.roles[0], inp) + + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['VIDEO'], return_tensors='pt').unsqueeze(0).cuda() + + # print(input_ids.shape) + # print(prompt) + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) + + + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[tensor, key], + do_sample=True, + temperature=args.temperature, + max_new_tokens=args.max_new_tokens, + streamer=streamer, + use_cache=True, + stopping_criteria=[stopping_criteria]) + + outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip() + conv.messages[-1][-1] = outputs + + if args.debug: + print("\n", {"prompt": prompt, "outputs": outputs}, "\n") + + + inst_pred = { + "image_id":vid, + "caption":outputs.replace("","") + } + + out_jsons[qid].append(inst_pred) + # break + + # Save separate json for action and justification + for i in range(2): + with open(out_json_paths[i],"w") as of: + json.dump(out_jsons[i], of, indent=4) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--device", type=str, default="cuda") + parser.add_argument("--conv-mode", type=str, default=None) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--max-new-tokens", type=int, default=512) + parser.add_argument("--load-8bit", action="store_true") + parser.add_argument("--load-4bit", action="store_true") + parser.add_argument("--debug", action="store_true") + parser.add_argument("--image-aspect-ratio", type=str, default='pad') + parser.add_argument("--input", type=str, default=None) + parser.add_argument("--output", type=str, default=None) + args = parser.parse_args() + main(args) diff --git a/llava/serve/eval_custom_chunck.py b/llava/serve/eval_custom_chunck.py new file mode 100644 index 0000000..b2d5329 --- /dev/null +++ b/llava/serve/eval_custom_chunck.py @@ -0,0 +1,184 @@ +import argparse +import torch + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import process_images, tokenizer_X_token, get_model_name_from_path, KeywordsStoppingCriteria +import requests +from PIL import Image +from io import BytesIO +from transformers import TextStreamer + +import os +import json +from tqdm import tqdm + +# video_root = "/zhaobai46g/Project/Video-LLaVA/video_process/" +# retr_strategy = 'visual' +# train_match_file, test_match_file = [f"./retrieval/BDDX_RAG_{retr_strategy}_vpmatch_{split}.json" for split in ['train','test']] +# with open(train_match_file, "r") as fm: +# train_match = json.load(fm) +# with open(test_match_file, "r") as ft: +# test_match = json.load(ft) +# PATH_RETRIEVAL_MATCH = {**train_match, **test_match} + +def load_image(image_file): + if image_file.startswith('http://') or image_file.startswith('https://'): + response = requests.get(image_file) + image = Image.open(BytesIO(response.content)).convert('RGB') + else: + image = Image.open(image_file).convert('RGB') + return image + + +def split_list_into_subsets(real_dst_train, cur_worker, num_chunks=4): + # num_chunks = 8 + chunk_size = len(real_dst_train) // num_chunks + chunk_index = cur_worker + if chunk_index == num_chunks - 1: + subset_indices = range(chunk_index * chunk_size, len(real_dst_train)) + else: + subset_indices = range(chunk_index * chunk_size, (chunk_index + 1) * chunk_size) + subset = [real_dst_train[i] for i in subset_indices] + return subset + + +def main(args): + # Questions: + + json_file = args.input + os.makedirs(args.output, exist_ok=True) + out_json_paths = [f"{args.output}/BDDX_Test_pred_{cap}_{args.cur_worker}.json" for cap in ['action','justification','control_signal']] + + # Model + disable_torch_init() + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, + args.load_8bit, args.load_4bit, device=args.device) + # print(model, tokenizer, processor) + # image_processor = processor['image'] + video_processor = processor['video'] + + + conv_mode = "driving" + + if args.conv_mode is not None and conv_mode != args.conv_mode: + print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode)) + else: + args.conv_mode = conv_mode + + conv = conv_templates[args.conv_mode].copy() + + + # gt + with open(json_file, 'r') as file: + data = json.load(file) + + # Split data + sub_data = split_list_into_subsets(data, cur_worker=args.cur_worker, num_chunks=args.total_worker) + data = sub_data + + # Pred + out_jsons = [[],[],[]] + + for item in tqdm(data): + q1, q2, q3 = item["conversations"][0]["value"], item["conversations"][2]["value"], item["conversations"][4]["value"] + conv.messages.clear() + if "mpt" in model_name.lower(): + roles = ('user', 'assistant') + else: + roles = conv.roles + + vps, vid = item["video"], item['id'] + + video_paths = [os.path.join("./video_process",vp) for vp in vps] + + + video_tensor = [video_processor(video_path, return_tensors='pt')['pixel_values'] for video_path in video_paths] + if type(video_tensor) is list: + tensor = [[video.to(model.device, dtype=torch.float16) for video in video_tensor]] + else: + tensor = video_tensor.to(model.device, dtype=torch.float16) + key = ['video'] + + + inst_answers = [] + for qid, question in enumerate([q1,q2,q3]): + # print(question) + inp = question + + if vps is not None: + # First Message + # inp = DEFAULT_X_TOKEN['VIDEO'] + '\n' + inp + inp = inp + conv.append_message(conv.roles[0], inp) + video = None + else: + # later messages + conv.append_message(conv.roles[0], inp) + + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['VIDEO'], return_tensors='pt').unsqueeze(0).cuda() + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) + + + print(len(tensor), key) + # import pdb;pdb.set_trace() + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[tensor, key], + do_sample=True, + temperature=args.temperature, + max_new_tokens=args.max_new_tokens, + streamer=streamer, + use_cache=True, + stopping_criteria=[stopping_criteria]) + + outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip() + conv.messages[-1][-1] = outputs + + if args.debug: + print("\n", {"prompt": prompt, "outputs": outputs}, "\n") + + + inst_pred = { + "image_id":vid, + "caption":outputs.replace("","") + } + + out_jsons[qid].append(inst_pred) + # import pdb; pdb.set_trace() + # break + + # Save separate json for action and justification + for i in range(3): + with open(out_json_paths[i],"w") as of: + json.dump(out_jsons[i], of, indent=4) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--device", type=str, default="cuda") + parser.add_argument("--conv-mode", type=str, default=None) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--max-new-tokens", type=int, default=512) + parser.add_argument("--load-8bit", action="store_true") + parser.add_argument("--load-4bit", action="store_true") + parser.add_argument("--debug", action="store_true") + parser.add_argument("--image-aspect-ratio", type=str, default='pad') + parser.add_argument("--input", type=str, default=None) + parser.add_argument("--output", type=str, default=None) + parser.add_argument("--total_worker", type=int, default=4) + parser.add_argument("--cur_worker", type=int, default=0) + args = parser.parse_args() + main(args) diff --git a/llava/serve/eval_custom_predsig.py b/llava/serve/eval_custom_predsig.py new file mode 100644 index 0000000..cf4feb9 --- /dev/null +++ b/llava/serve/eval_custom_predsig.py @@ -0,0 +1,166 @@ +import argparse +import torch + +from llava.constants import X_TOKEN_INDEX, DEFAULT_X_TOKEN, DEFAULT_X_START_TOKEN, DEFAULT_X_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import process_images, tokenizer_X_token, get_model_name_from_path, KeywordsStoppingCriteria +import requests +from PIL import Image +from io import BytesIO +from transformers import TextStreamer + +import os +import json +from tqdm import tqdm + +# video_root = "/zhaobai46g/Project/Video-LLaVA/video_process/" +# retr_strategy = 'visual' +# train_match_file, test_match_file = [f"./retrieval/BDDX_RAG_{retr_strategy}_vpmatch_{split}.json" for split in ['train','test']] +# with open(train_match_file, "r") as fm: +# train_match = json.load(fm) +# with open(test_match_file, "r") as ft: +# test_match = json.load(ft) +# PATH_RETRIEVAL_MATCH = {**train_match, **test_match} + +def load_image(image_file): + if image_file.startswith('http://') or image_file.startswith('https://'): + response = requests.get(image_file) + image = Image.open(BytesIO(response.content)).convert('RGB') + else: + image = Image.open(image_file).convert('RGB') + return image + + +def main(args): + # Questions: + + json_file = args.input + os.makedirs(args.output, exist_ok=True) + out_json_paths = [f"{args.output}/BDDX_Test_pred_{cap}.json" for cap in ['action','justification','control_signal']] + + # Model + disable_torch_init() + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, + args.load_8bit, args.load_4bit, device=args.device) + # print(model, tokenizer, processor) + # image_processor = processor['image'] + video_processor = processor['video'] + + + conv_mode = "driving" + + if args.conv_mode is not None and conv_mode != args.conv_mode: + print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode)) + else: + args.conv_mode = conv_mode + + conv = conv_templates[args.conv_mode].copy() + + + # gt + with open(json_file, 'r') as file: + data = json.load(file) + + # Pred + out_jsons = [[],[],[]] + + for item in tqdm(data): + q1, q2, q3 = item["conversations"][0]["value"], item["conversations"][2]["value"], item["conversations"][4]["value"] + conv.messages.clear() + if "mpt" in model_name.lower(): + roles = ('user', 'assistant') + else: + roles = conv.roles + + vps, vid = item["video"], item['id'] + + video_paths = [os.path.join("./video_process",vp) for vp in vps] + + + video_tensor = [video_processor(video_path, return_tensors='pt')['pixel_values'] for video_path in video_paths] + if type(video_tensor) is list: + tensor = [[video.to(model.device, dtype=torch.float16) for video in video_tensor]] + else: + tensor = video_tensor.to(model.device, dtype=torch.float16) + key = ['video'] + + + inst_answers = [] + for qid, question in enumerate([q1,q2,q3]): + # print(question) + inp = question + + if vps is not None: + # First Message + # inp = DEFAULT_X_TOKEN['VIDEO'] + '\n' + inp + inp = inp + conv.append_message(conv.roles[0], inp) + video = None + else: + # later messages + conv.append_message(conv.roles[0], inp) + + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + input_ids = tokenizer_X_token(prompt, tokenizer, X_TOKEN_INDEX['VIDEO'], return_tensors='pt').unsqueeze(0).cuda() + + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) + + + print(len(tensor), key) + # import pdb;pdb.set_trace() + with torch.inference_mode(): + output_ids = model.generate( + input_ids, + images=[tensor, key], + do_sample=True, + temperature=args.temperature, + max_new_tokens=args.max_new_tokens, + streamer=streamer, + use_cache=True, + stopping_criteria=[stopping_criteria]) + + outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip() + conv.messages[-1][-1] = outputs + + if args.debug: + print("\n", {"prompt": prompt, "outputs": outputs}, "\n") + + + inst_pred = { + "image_id":vid, + "caption":outputs.replace("","") + } + + out_jsons[qid].append(inst_pred) + # import pdb; pdb.set_trace() + # break + + # Save separate json for action and justification + for i in range(3): + with open(out_json_paths[i],"w") as of: + json.dump(out_jsons[i], of, indent=4) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--device", type=str, default="cuda") + parser.add_argument("--conv-mode", type=str, default=None) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--max-new-tokens", type=int, default=512) + parser.add_argument("--load-8bit", action="store_true") + parser.add_argument("--load-4bit", action="store_true") + parser.add_argument("--debug", action="store_true") + parser.add_argument("--image-aspect-ratio", type=str, default='pad') + parser.add_argument("--input", type=str, default=None) + parser.add_argument("--output", type=str, default=None) + args = parser.parse_args() + main(args) diff --git a/llava/serve/examples/desert.jpg b/llava/serve/examples/desert.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57c63ec5adcbe91ae6abb2a885715fb6d1e1fcb5 GIT binary patch literal 262144 zcmV))K#ISKP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGqB>(^xB>_oNB=7(L|D{PpK~#8Nto`MG z9LctZiS9q?-kCYL+sxEvW@ZLiWTC~(%*@Qp%nTNpxy^2R@4F&1rIOmc&zU>(;rT^U zWu>I5%qNystoX<3ZSh$BvndH!V}h)&-X_b5TDv_NYqzH;*6m0oX^ORE&5jI>U71+B zCkq?)=U_eAa3B{O59VXjp#p3i?QVxIbMRTCrUL=mSNkea{PR{;xEYl%{+e7 z(GqMrR*H>mHykO(`oksvl68kmvHnPzLYCKZo3QRkDb^k3I@|Tf%CO;BInP^;&Bys( z$1C}MRlJ@mY|+_#vKpID)nN1K8f-aJt8umtTgkR__1Jd40Y6`8#P*9#*m1EDJ1;e1 z*X3q~ESnYd2 zPjdYPe+IIJ`!=2^Q_FJP*3(t|9adrcnQCl5SA*^6Yq8@(Ep}e4<9iAX*nOpuG+^(w zChWV>g#9<0ao|=94&83WVRGbd8;;&<$1$@u#R>CvoU-V`8Ov^*wd%$hYoQBgZMt#J zwg+czx!txCXYHiz#(BG5Ty*HgW#=B;@EX8f{~=h0jKMZy0uN&z!836dzNxbaNSi}& z_7jBUK1Nu7*DomV z{(#E9PpBUFjJn~kXdL;5*3s{18~={>$?xcR^eeiie?#}o@93HR9ldjZpzrbT=zshh z2A=%Bh{30SVCdPO7LOaHw@5vwN<+*$3_+E3rDb%{Ytxw)tA@9%j z317qW_dVtNv;8~zd0qp#_C?5f`EwccO#ixw?nl3&Yw}lgPVjdz{%yW(ANz{dkU>ARfYxgKs`lsge&f&f75Ti@JYHum&znQ!8nca-@0r#58JVr0klykE zY0bYNjo{0$O~*S_x4uDj%NtZQy+&!>3l!EoMM2dQMSdkIe}bHn zIb;@0BcosjX?fE~$(crc`XfXpParg50zt9k@QWUUXT%s>Lq^~jFbEspepq_-;l6t> z?l^bjmSZPw*mvNXO*<}Iwc?UxE6(3^)zOy%#DpE>>V4k?qnRxLiee{1t)cxL&U~dZU3f;@C}% zp*kWsh%jw;F46%Tmv-2?w87p@=zy(zC+yt2d0aQ!F4%jr_3VO!cQ>4Td*R~O2OI9Q z^5|1oyY;H`*gErE58Gkw&;~2JW?0%b!Q7?^_a8Ljo>c?xSk;q8+_rAOZLZ%UH^~hP zVqTBy_X%5BzHPx{E$d)u(*PUW20VCB3+o3pu(Ga!g=Hno%t~Q?zZ~Xf<**=@_ba$u zf&1j%y(-+fSB>kpt8w*K4X)j(!?k-npG6Za?f5e~w!zV{39e2}aCWGJJds`WV!LuD*^z=(bl26geK#F;H+`1d*X&8h^1T^Yekc!Xj?)p3mt)MeBw2@VF6^X4leN3j zv37Sl*6pQ8lXd$+Oz<%?epxEX_QxyobD(Ai}JRvIz3V z;{unJ{Fm`sHMWRIvy)-0Z{=$Qk?5b#){t6?bR9*ST##$6M(x)tmMJG;Mc2cUPZO2LLR-Clzz)4E> zDXSiwe$a#S_C2`b(ub?AeYoK{h}%BHFbf!g1#c`5ctf-2&Bv8De4msl_@+(6FMS67 z85HZRSp?@iMnu62L>0YIVjaT=WkUH2KGa?!t@aht>t7?c=?w~6-=VPm7nD$@D|$bn zmLlCi!D*)SG*bjc=-MdKZI39_QxwS=3gs+?dX|F8)(~Aoc140sWmVdx5^G@WH)OV( z=achy&islF%B!^6x<=3JALyA8DAXdnBEJ;rUSfLQzY}QVeHNh$bT7{@bWeSsr;9&x zH|4pD(%dQ1tc$WB;@mPU0xj!bHAeK7w8}AKU(hnj;{A#Ayv2E$T`E9an3I*A8U5PW_+raePO9k3ssYut6TK;S`BFv+|YK(lR zI2+RZ6=nTjQQG$fCB2_f)cqL+9Tev_3Ub>A%HRiN39ZupGEa8<2a4k-HQT2l6V%@bAlloI_6A?%C?&TXbmd#bRKF)55L5dJ zadn@NK#5iY&E?d33UlLUq_%v~<(bkBoZDYyuf>sTH8@Q1JtP8rkx-luP@MOjuf*PSl;-ml*nPGPyU0!ov*K*&B6d-( z_gtiClf9QH-Ir@go#MciIvl)Ok3-iQaQJ#74qvOs;Tv^|qqiv5x0`V6ZVOJDwc(_B z2Togc;p_t~U~|r{2NxXraOq(`E;$e2DrNc_x#>QP+g?Ms?LCZp{-d}bFa}FLZ|s8E zkq8@yPvkiKqs9@)&P6bvJ)v=Ptm|nz2^&*l29Twh) z&~UaQ&m=r*fD%255dOXc*;(<8oq#8CkDi1Z<=HEI9Nv7!c~YL;_&0D47=RNc*D0VM z&i=iW-Yz`!>VlJZ4_nGK<=26N?BK?29zqA4J=)>y-bOmqx+7Z`O0zSU-MqWt;?)6H z?@oC7b;HBI8xOh7mir&Lb;6eCv2&)BI<>*!VH@lmT3}<>1Z#?~l}$ZqfW?Cb+_$PD z^)RC}-?Oa8U6EvqI^4c5)T;Il<@qkT$L(eowUp%=SXxw*DlS)ZTQ%{z6t`}b;Oey^T)I+(3zrIT?mPvW@_d5weE38L4j)O!VT$v?{b|^{HyJ`25)y6dX#N%U9>H63>4cCPP8AavSY$@JC?tlyW1_4LQ}hl}W1biwo0*m1iJ z`|SpC*kuey+$V6%YYN9bCKm*H<>qKzpktLp|A>{sRxVSdS8WxcmZ&r@OyC8r-kzc% zcE2-~7et|^NUzBtb(FiNnzEYP;Wdb z(O*rdO(WAHzD5N3uQckS+=!pgHBy$jjjgf#ck-;>M@7Qt2Uyc7{? zVHKp`b{x6ifurP@c_&WZ zr#LG*aEdp%%Ds7dr_Umg54eC#KJ+Nmp*gdBNIpSa$x|d$JVPoUq}dJc zRAf50*o9Qi1><3>q7h^fi@)C^cZ7V z3A6|~&n0y88oDOGX-o*;(J}T_lV^!Gn=mephr(|2dCoH2TLZNc6wLRFDNzc~;0a9aDiem1s?#zw2nx zW-a3U8^!roR8pJ;E$aLo@LWnRmGpj57|UsVZHmBkK_Z=tpopd}L0%)I^F5c! zvx;2n^89zQoJ$EdBG1Npe*bS2W&=vH;M6{E66g06W|8Lw;;Y^uuJScv%PF)a&k$2eX)b=sPBz7v@*GNO z4k7-za=)0PC_h1v+&hRL?<3ynbMQ`^g%9ygorP!0EZmYc+|#(9BJIQ1_~lTx^Isr{ zG96O%3SlL$5nlEd5tZ-AFNmuCgy`DOh$Zn9XA$VchA&8N{EkElv_zqk*da(}=OBeL zovNbyACSx6dr9{fl(XYi+4TW6o$pcK@eWOGuh7`?9JTdNQBnOE<<(D7RPhwql;y0_ zIiwX$Beh@}Dfu%<%9}+T#W^x{91+Rm2uq;E#f-o=as*zX!*C0suu<9^0)}Aa(+@L> z*&XLjigU-jIG?v@!8wZa8M7vwx^xnH9b`NC`E&_>KD8LxzT=FDHIJ3o@Em(CRL{2tdoR{t-zAZ2Lb2Y@ zYdAo$K6stnY~nMh8AtAFfgBag;WOui1a2Pm;H+IA&O7$w;==)4cA`kT5|2S#r$pcK z8^eA75!h3p-6^$R;Un;i;O~o_jF8At1ji2Z_cx5FxPHXO_9KqO#`GhG;;e}5LqtRm z1zO~}kK){qi0A=Ch(PnT!Lg$VqJ;a$vExFS_ThPbcz*w|Q3QmHz$bVR-hl(~2<(S@ zKrh?_`?%c4!gRPfuH@tm1;iG7Wmv=iny*s#%vP?<#X6wy$ zAL8rV317cX_yu&r-It>6*-p`J!$UWUw@Wizom=7J)Pjc&O|ZACgPmwgjxBx2}~yFvWTG9{TFubgbK-jcrFtuA*wkXTByM5s-p)S5`oOSCe3vYqUpOjD#+?-J6m zjv~E&UzUnYOH_J2FIG@OeSku3!X`=ikZAQG%C*i$uCH4p%#`B2x$~@9!fHj{JW8l_ zRx1kTS+k!qZX(y~4;EwnAuUa0-N6z{wxoZQVx6ReP_EZetk)l}m`8-#kZMDw4WXXr zOr4Te)siizdCU@2s)$0QnVjHx6_qLyzEL9LO6n=j5|L*IW8+atZ;^F_5^R>JG})p{ zw2DNZs=^kzKC+c!zE!_I5qFVj14--Hb-59Hu27_}HY+rlZX``grX|H=|LqnWpiCbm zhq-*@Hl>;EQITx6hh)DTr=WPj zTjVsqM?u>!C~2oicYj1>&nHy(eWFZ%M%~a?)C;iTj%-enq3m7tc{Q@&$Di%6fsz4HR~var7JC;~URS!Q}Dk{6r$+?G*S9 z(#iMf=6QQWq$%pXbHDws1X_?|6%_D3k!PMq=vLzVRna;A1)ZaxDa0bh6yzZavMGZf z&_4K!#(=c4u5D`_`hZr-v_eTVrCCR0TB7?BjW$i)n3re%T&e@X-TR!r$ zCuv5%AYDnJhPM0*1@ZlSG*yu06z-o#1X5p4rFbPaP$s!7By&AgM2s>l+Y@RFGQagknP6n<-A!G=X80` z_RsSoFV`YJQxdN=MX2Y`SjV4BBw3`{fa?i5N}j9VDtV5fJWI+(EWi6v#m^AN`+$gZ zC_C5z8SHqcO~E~73LZ&Q@JyV77dz&jMD8KZ@snhV_m>&C#LsYf299x43g<+MbjlpO zGM^BNboMj&=eUkuLRGdnFg?Z2S7gZCtgb2Y`ew*4&{TVJEOAET*Z4h?m)sIHzxMb#XN z%I1($G=r?-S)>(ClUWM1A^xP5U8l=Q8Fq*qlWix96JUVcLLM_i8inKgSh$Z$#F z=%Fz8Au@*hVh0e-<6{#>5SuuP$b?aZ$B!T^ZUmvRBM6QjK@dAKq2a>_4I4yoXg`8N z`Vbt>Hmn~3p}iU*y$B=$K|Kfv>_%W2(e5ry@Nm}BBAgr=;9yrv>fm5s z4_n(>*il?5Xv(ssZ4K-k>fz!{;eXfy#|QPWme<`Z z!_6DTxN=<+=t~sni|6xj@pLXOQlQVA%)oI<^pV4<6zCM}+n<6x`;xJHUlMjuptqBs zcTuEwQlu%-o3|%n>-I!!-I0t*+I%ILhzHQM(=- zVW;eb$g}4ZPWn)w-6!ym)te)+YEuM^cGae6tlmUX{sF5tnWlPZk!q3XRa+7?DAEQi zw@J8MLgXUSBGg7?It{BNI!&22SSOL`eYuN}bPrQDP^#CnUAHd}YmErANb#PWzh(9R zz^XlY^Q_*RugUa&U8oHx)@$|`Db^m+Bzg^zcCE-XS%0(~8z{CLRCJmWeX?er&8O=Y zpcrq_*($pL#2C2v|3uq^$i_<@D6g{{;Lhx zeXSn5xW4N;&&TE6*JN8Gmz&s9#&1cqx)bMZdvMLAA9uY5Vc|Cn>!2}M1P;S0WK>}h zJOpdD4xtlp2TSsGw9=_I*Z`K(VeK_%ct8F!&WU!`~KBP4TW8{EP}7U)uAL0!8>BEGE(l zrQBBDBl*~TK=xDG2R@^Aka8{3KJtx1tqHp(&=gA|0^LQ?6oKxZ{;nkYe`$&Iy( zexz`Wn}!o5+BIbso*9}%duKg^U+!}R6~0DD@mqwJyrxLMp+vu@L`x+4BVuYkXc8?^ zX#Q<-_`A<%r>U^vR?Rr44n6?4cdn?-Kf6J!<5s??3-ycxu#Pbh&7O&rtYIdTZ@p_J#qJ~#&U zYq`!IJt}46mQxq5+P2}673JBy1!wL`esdF!-wxON4nZ1$NIl zNzc$C%sVN*BG1B(la$~SCHVPx5w?=e#|jmjjur4ZCFJYv0-KKW7|OJ;g|cnRR*`Ut zUQ?)lrku}@gE~`zZNk|~Y?pthpRd903$@sDi4x6cjFM;x+Q>Ma`yDuL*`=op_T$3C zeq3}Jz!i@nT=g2lEuUdL;Il=3H!i{ba0?!Q2V4KJK7>RKA}odi9W{UmN^T^jI#L8W zs+$tsg(#6{%5;>Zb0iEPHhvIs2}4Lu8ADRi2ogy`qR8|JVkp&7al@4CVZ`v9F|k94 zrgTR~4{*I7v2lETTtBzVI`?yXEZ-|Ox({)Y-AIV)LIU42k;{n@or)B$CsE|%!`cxY z+)k-(M?_EuBH2a-wjsj51z`cr2nlRLpkFI|yj$VvNjzHN<=O;KO0=6(Bb*!?;Amfq zhYodcRMabxwxPUQSyxeti6uqayc!RPHRaig;%udanPN?(6?wL{u2&+h1e(X*6DZ4f z%_I_Ch5JO4Z*Hdu-;>s&P6@P`O$+XkJJwAwd(cFAZiKBv1Dsu(Dg7;Qr$jqSZuPxt z3Umbpx)fKh731oaLR_UlU!_1_J)MKgCo^&WR2EL1$iUGf6zIb#*ncn?dk+xGv`F;s zy~)_MCmB10y{XtvY2LXfh07_}zBe7)_GMz*!CdU*=W`!Fe@FN|ILz-Xj;k)Ch&iL-zW2UKX?gh`YGFMP9&@Rd$MYPQYKXKv_r-diljq=ZyCjl8374Gev(`PhVBd{vu6?-a*^fJ3 z{V?|%gjL{(l4(n}w!vdcq8-D>;X;Y_h@XTHACv)nXoh6YAv{<9h5H=Qr7sX){t`)* zuaHvxS_yS}-CJa<2iXVY@}ZW)2U>0mTRzmXo8EH$9oOF>pAWbK5p4=~VaG=lcYh{d zP)Om(!QqoCT7g6v1tfX8d zx`7g{MQw=;%%f;Z+K8lpEKcPx&4rf1g_5!%+kaDtI7`|_@2tGWkmv6TO`yLjR1}&L z-D5)6s0g)2H|1IcyMuz=KJsy%wqc4hMY@FoEl5g-qCEo$$RgQYY&*4xvkW}UX#d9d{;DG9NzGc6Jb{8A z%ML~?uTk1Kk|2?16>Zj#&VzI!gxCrZ=a-12G>1u~nbI6Cg9G!Pz&~piK@{hJv}yPz zPr*BJ3hr@}coULl?E2SU&&JLl|>^L}zaXax7Td^d0u=9DU{5K45|BiKZc z=qcF6Pvc?IG~Ci2!y}y%ogul=FA$jj3c-aM^XVLv=!lA65L5LLan+xA{k*Rcu5%;hDYciT!Q-H;MWIR-vL;9 z_TjEeH*P-c!WFv?oVTJl-*3Wcvu2#Q)1(IQ9lBbNgI8*?kN4=k7bTsd3cD%KyUtc9 zcAPHL(lSm`mdVx=#n>j%}j`XC3=+!KDwE-1;fe1Gr0pw)P)_y?;NP{CnXVASo2R@DJ%nFoiid zoP-S^ghCxA5>2U%iteRAcT=7z($PJLj_so$Q+O%Q@gmMCqex91LR#7|(o#o|k}`ti zthjb$|t{d5LUC2>% zBP+HWX;GaN=?=t(wjn0C4Y4F9sD*8-A~LWUp?=K>_Gv+Y7x8F@pIZ}r+$68K0WJ^g zDbY1>cBp}iLoHk#>)>opk)}X9*jB^QrW*DSs$pwY0~@Pq*gU9#t)x;=a39##>omaH zt_ct9n_j-}$jkrW&_oQnOzAz4nNP^S`Ug>>vY$o615b{)vTj{TX~btDIe&J^L)l?oiW zSdRVY%h{2vz}{=MIAGb1Lk@j7<1vBr-j8tJ=MgUZQJ`0B2%$8GD|yxgI@|MfM%AH#hj&Ptl~ z0gQSMwk&Uv<7M!wf!#VqQTy^fk9rr$%`wYN>H$4jqv{k^cl4!e7 z5;g*d$Wge)Ou&l|OwYIpcqdK4FHHs>KIX&qDZ=xgA-d=VrG-+WkbKFP+@}4@H>!lf zO`=Plb04L<@+IOco+FMVQg{-pDB)GFDC5t`3pG$lM*7F`JVFc~XmMrF)qutX9+O=8 z0%=t{*l8T2eZ+T#B>H#C^Y4`B-%(3(t{MJLw8*m|&{A!qY~U+O`@f>3?RQ@@2qZ zyW})~Kx)G~q)-SGC30Lv$))^7RJ=eWKU2~AKsOQSP<{qOgyQE2;O8%ht*nO^OJr3W zuqK3+y+J4mDSwS1e)dAjUm=|545y4oR=!avnWjXGJR6Z|L88wht`c$9qRv`GIE%8M zLxGk`9#YvOpAuX^X*STJ%!@HnF(k4qWb!;2JWmEiSxDo#g$zn_I;B~V!E?z?pH-Au zq&cxcDphC`QN8u4R442?R zJPa66X&V-Lg^lYD9k^`Uj`P;7IBnj96L)1$UIPx_kU)osbFIodmR#hWl0G5QEa?;! z=bt6rLYL+($3<|9lqAn{I3HWcW}OX(^RVu4u447U96lTK7P0OS#ad_m5;hzz;B!k8 zX{od&NUGw-#Ig&=ZF+Fh zz6WO>_TqwbFRoCc?|2Tv!n+?2eEVSUD^)AH;p*3;@{m1)`ryOPhhK0X0)l%HN{J4S z&?GuCx{vfzo(B;dKZLl%VZ_A`s;F~X#so4m$B~^if$U6@KB>sc5XRNM%&al)A4N{~ zDDrbhQJ6D=B0<)xD%0TCwqxh6URn8bZUl!QwwQ@9a~#squMsuI<_j2wv-%d z>l#=RBSLLuErS%BU`5HcvZ>=S_1sV4wr+wsv9O_l+w=9dO|a(cZRNdOo8j!z2q%Yn zI9gZ3fztipZaK_}*_~3D-z-a~jw2`1aP)W@ z4pXK@sE-^=$Kj*0o`Hi$3EM+Q$k9yfKbnQTM>28fR6b5!D#f|0>Wh*C?*}jPrgmf!hHS?0}DH0Lu}#_~-f&^nta z(wpKGE4L&nkrttr$n<=g$F^jwRJqa%GCg0xgRG-S3nJ8pEE`A)h_F(k*-Hdk+W$Qw z;5s`f&^sy33K43NZ(XECqW9)0iI((^ReLp7?=wm7ph$~E8yF&OD$qYdq*`OKG%NYl zQ4+07v#B_5I>ejV5=5XkNL7(TQtFS_An+XPP2u_0AJ^DGY1eYM1#M8IpbcOYq1L2Y zZdis~D`cPKQmb5V%C!--79v#j)sNPF%gzPxT&5-Z4jptAz zeYFlp@3*Qlc$XZzaMPs+x7~Vi+p`CEeEVS@FbYe4a4myIaX)Z~56K}|2amuuWRy}p z42OtOI7$g2$#G1WhFbz3u!*y9PnzNK6x`w`;YQi^NSK6s;v;w@KZ0lKj5hi{{V{y_ zK=w(O+{tOK&%jTrfMm=efXhK#4$hiW`ITY0j}b2aHZEXWsQnu`yx@tF^4LeDcKj&C<9`z9&RM9|kZ4m(1$rrq<#}luhZcQa5a{oVBhX5ozbH!jzM!b*GYYys zBClPlYkWjT%X?j*-yyM1&(*DxiVf^+QL17q*@-KEi71IUQ<9ZH7d}G}J9$BcltXs- zgaCfF0`i~2KmUo!yH)U*K)xoJ9k(#bbU2Bq5P_y-N|aj{Xwx(f5oi^8raVguMV4NI zE?XkU-IQpBmcFq#!YtCP#WE0Jiu`hBV2nRGnfAc{X@Bs;!g^7|=x4&R)ol-S4c zOrPWTcM6WN<8X+Xgk6M4Gw%%)W;2mv%JNOGLEP{rp8dG))`#2f{kUbcM7CXLd&7gr zcn#sEH}M_DT}t%5kV%+@@gW&Gi3f4BuuFIhr<7-K&v*f^tQWkF7x2rKoak2wD0+?H zlGo}#2N^Z5P{!|kZOa?9wLM2)$1HkVXVB9;i>}5QG}q0bu6`Ev6y)ZXXJ~JIj`pUf zXs1B8H9baS?KJAEr%*#gp3BN-P(*nyD49i0;S^GHrx2gOju?^8W@yu-D7ErC@-hC{m)=AEQW9pp`@`ftKfn_DquJ z&5lbA*eRu0uQg-ujaKZv(}w+&=);zsIQF0mr|o)h-k}GVo%lC*>cc(PKA3y-;DKj1 zCAtqzDjMAbXBo&BK#30SM_@=lf-j?77>e^r))30F z2T+;ahqA0*6lM0IFs&O!sa+K6E)*qqQKY+&7vDvh?xIY0A|t9DsS#~R32#MGXfqNi z(eXjei1cejjBg7fJR1<^B@*3$K=*q1xz@qowO$cG{9NkcO__Fgsv#6>=Q_3$scwK1 zadv5fvs(+rwhfLht#ELmJUcI-B>G`1Z0s9hYYa}L49md82X+m3;LwZ*jud6e@dF1Q zW7`Z{it__W+jt;(&l25k;<}cHZEvrolsMY*Tvk1I+W_7q=*N$uKc-`?I#Xa8@9 z$~>kbZD8a~OWKFQ{A+$r!HTWiwk=g_m#|#e_V=v#nXmgB<|SK2vbE^+%H4Y2^lsi9 z^xSD9PkJp`cR(tHXeIU5nG8Zy6+U#Cl@fVBWf}@~98qX~J0>-uy+P`JlVyIe=U8P?U!p zAB@*M`f!C0iEAEw81ln?*K-JWCCcQ}ueXEzuyeaanF_dizxJ0_V^)h?@nUw0>Ifc}jm47Kq z-D-(0#`3|RB9UlG>u7$bQbI~7TIIc(MAr^})dnw8q6O0dcv2Na(m1qK4o#+2s)ooi z>7`8fu0QJz)lItp|;|K4eh?~y{0PHuRwsz}7wz9o|HOqniwK`E3}1WL5X zGetQdPa>O7;hi}L@2tlPPj=Qkx$c|>GRGGW{I>1r*8Rmk2GD)B(zK#oNC`phcWz zP@Wo?M}bYJ&@KqHHc(JYy;vZNBAsbUmqe=rjFcAm`wD82m7-xG0mrxdf(eZ{#K7oB<(7wn~Ga0jkBcH{a( z-ZP|jup@=)A&;X_U3ZiF$sjJf58$!~?=k*kxE(lw+Z5@$5{-_T#e?|Aa7=m%my{>) zOnV9`br-?ENqo^uBvbIy*(u7ce2$9RXJ~4AhGq(KTjMl38Ya+EKZ&-68MINHTSTCh zM9-j6gjpiZq`v+!w>?Hp%^WH#r%_QkgW~dOinGY`9P&z@AhTc=@f5X))CmM9j>9i* z9G;OB=MX7THv${qAy~4rU`9c_&7bX>T_-MCx8t;V3y$-CdgNvU4qdOqfvb|LQH@>a zD;GtaMVz;uD4{g-IU~8uMz1eK}to$3+ z9LS$%^?ng=iA-zd!`AaTw@&7IPy$^FHMfBX^vMb>c%uh$Byh9!d>yu3sK@q8jo5Lu z3EQtVVdqWC^xak*Fl)n6iw>N$?!tN7K3t|u-{SA)9zP@ZUAtlK)}?a9t-ZQo>(h;g zzPdyQ4Zx2Q?N5mgj2u7+XiA^G8vhH;BsoAynlLqd2!8 zMOi&4&+b80b}uS3yHSzO^|T%oC3c`7z61FrH>Mq#QLRW1Z$?^p3z8|)36$tq|0YEH zH6hZc0pTRrvmSwNbqI2kwgCa|4HW7Ic#AYUQ=*9ovDAP8SXOU_fM>&q7O$oPkC339PwdS!BahAMi5obG! z^g|x+Y)^6L`a=q|6NTKxz8WsJm3U}VNr^5YWq5G61lG5TVR54ncdz8&*2OH`yp)4$ zZMSKDcdV0 z9YossNH&irt0kJPN4lj|%Var^Gi4dy^Cz-Q);IGwQ`z2AN2+=hU@cjvi?pdsE14xKB24DXO>*a@dhEGgkK^~-aM89Km*ih?yy=}M=N!7! z@>xC<&+&nHp8GC3_TsWk)bg+kmmDQ>%?GA^hw3?AqiB~8RCa9FkGlyAwuw&cUvCWsOp{s_*|kKh(ZkxrBwfEvX%4uaHprO4X>&Y{C`iN|B7jZx^q?}`p2|UtlQ^6 z{#47Rw5kZTM59Hbe^)e3{-$VT2SyX~3Z-ZxraS6zDG$=r1UvI18n{M)|rgs&t(i zJSY;)j#75(FUV|>@^yOE4U$BWPN;iJQIyE)D@0JDCHfpHHHu~MTc(zd;E_5FcXrMM zxAbXvWX>vq_NT0gKnE7SL@)(9oB|zDM#-dXO7vL-I<{65Q$wJ&^bIZgYzVX=!&xHH zCM=|5@L2vlhA>N&4k>eIx~%0>e?zK5>KZRZowZVJBJLVSBwVGGup^OF_fCiFyk5!O zmOO5`{z!>7vr`a8aSqCVfq>i>@J@dM_v9Hkv-9e}@1JGpn5r9pU(z!8{k`GdgR5>m zxb8|}bn3A%TCl?Bb^=tayTwidmFa&Z4q< z4z-lvCdzUv#ksY90u6OjsH>eqeeEMO)J{{9r%_uwgX-E@RMpL)rv4eKNoCDblvO@P zQTZJ5OJ|WIb)NHQm7FE<{t}b=2!V+c@QxXU8$11uL4&aM*Cs@{$Dd!s@TznctlMzb zq7|piG;uzBqX7r5)nVV2I#rfy&!rmdI9Gw~XDQ8R%CPOUw#~p>qrBP9J$RiF|i z=pxS}J@s(IDXyQX)=~~B(_1bzVB6J3{49gjDbo9Ix8l%UnFy={XD#^|;OF2vpARy5 z%w6YB{_eVP-@OYKQa70*?daPJSN}eE1`ohHlp-B9gdhrZSp29e6Bv?6QYH~7@H-ov zK7*L7X(Z>)s1%z5%CSgsLGdJViYAa-^a#0yk0{L)YYMcGTRe+A3UnR?x{$|~l}@3u zWD+%{lc*{hM@8W%DoJ_a04fUlP*d24+WcPB=JudEyBk%RU8qd$LPc^HN<^gN+L068 zimb>MWJa`5qMKDTIxeUQF#(N;3ZOvy)+5BL4#6IE2yt&jAPMkjgfB(f(}mLPQVS2t zv&giUQ$0MLnkdvwaC2=|0`213hKDZAct{a;;5J+Cv!hJgQ;r=d#g5Lhza36ebD9!u zFCzW02@Y=Uu;Xj(-8u>e>z$r)E{U&XvczDH=6+9c~Y62_@RT ziV|H8d#f@ySd_!TtQ>ZCOJQ@T2v#=>V17Lhx3A{n=9OIBxRi(M7jtp>LLM%i&&3tC zS1;yMuJdq_+b>?u!+9Qaj!1j{QUR{rs=$qVHMniwfLj&~xM@Rq<~`*WJJ+}6e&8{L z>+EFTU?=;I4?FH&qxi=P%JV9bXB{QbCWtWq9i|e!g3`Q<>%wx8YLVy-BGp=o#|jZ? zwXBKs@{N+}5vR#Cg;otZ)DhX0DjsT`%kya;BHFT#uhCjT7v@cT-NtyH^T#aPC@Cci z%UW8ANVfsyoDwapQc}%zk!Z^FDoXT9L!x(PVdV}<^I-3qtfow_+0CB#9vR%o8-bvg z%3Hf%>Q#$O^Cl#FS)Pw%>msaLU7AfXqR@s!3+st`Ezc(cEvR$o5otAv3?=&S zP-Xgz!H~RZnWCzI7>HcU&5P~&6O>=F;Ur~QkY)WCkz-Z%&lGu|!@Ra5MObsBgp?}Q z9_7vQSQ)nRX1M)gHTFv_(z`75 z3zbuys0J~LNWVdLlgyqd`Ou$G)n*_r~!*Q zCflWS-co@!mFT4cZJO#~)Z6~wAky4LITn5-&8D;wk!MYywRHm{65Yb@HUNlm5#{fkO#Nst_8U7VvTr4*c`K)ggi;S2tZ&)~_UMoDv#tY-}35L;(U|Rc*ThwYDrU2;>_>4Z99=xo2~8~k#sPT=nJ|;3$iS7ErXV& z1Ad7Tec6d3?b1gfAHa35Vchf?#clsln1xJGq9{@l&EP+GG(e{PSto$ z&!dE8KSF%g6k^h+5S~1Vkc0^}8H#7DF#IEyr!inAfj8U=ig6i}G8 zNb$d7X_R^Ke$&_IV-?S5%4)vH0(>?sc{UmUO;!Iz<#WYILtHD*7(Q?0znmK=*6SsR zbFKy(&(~6>>#_ZE19nLjsq1`R+-Xsri;H|dTopmGZ^v!^9`Cwz!kiLq#ow(B1=@}R z?G)4pkFX*5MvfvNau^{o!w8KZr$F=X!p}e;J5mAZkJa1+k?i0k<*`FmIERu_{=Lg) zQC>cy^4E)04*MfT5&!N5B{S?G%^{~`7MV(@XHdk?MP&s$SLKxGvT-tjYEo4)gxZop zG?oscxnuxMMZKuY>qbp>2WrwgQJvO>^5jkwC$u9!woQ|0i9|;X4Q^5b z9qC_>NS}IydrLY;6M{WOpc~*vd??X^pB|MCaBGA=McK!_8D1XE@bG9+l`&k1GjVcn z#Y5sGb*5cKo<*XY;Ygu&c5jCpah3Yhu3YDNoY+33NIUuTz%_6Pu0ca^4;X-#cP~76 zoVyz(-Ie0ZW8En09vV%W>W)a%L>?=6~Wi@A)1bTh236y3NR#TLhGB42U zCH*5zv7F1xdAzhMDA5|>MDnU5RjZg!@zByd<|TRwBFLu8^T;-NjpSJK^(!gP!U~b+ z4cyMwKt%cPFqLb;Gitt0~k5Yj$bXJXX#RK4cGEuSYFq z_oQ~UF$v6%(WF@-*ea^6N3&&lJ!RYw=k+{~V7hFe)yUQ)dVcmrIj4ax)pCPUHz|Qq zZJ=gm6ZG2`$~dV~OqPoPZlk~N2*oX3g~ zigZ~16GTy_r5Z>wB`3Z1Ez;{`VBs$)q-020z0!`4DDV1=%APN%qL5Yf=?s2B&9Icv z(-4^xv6JYi)SGT(EA^_4$x759M4~2SnbAt%8MUNM(?5(#9;V2^M8(s^45@8AMon!_ z$rkC>G0r#7{IukMs7Sb))sZ6q|5TtCMVw6pT0uEB5NS4DZ|8M%JpNN5%clHDq78wT z2(*;5(|*_A^_d92sZ z6{K=OpckrfP*N$ZM)X(eMN2eT)qrl*Cr{z$FO#ynz$eJecrtlQI;CILY5v~`G{sp# z0Z&ozJw&AW`b4Qk-TVm&Y~$)bszHNtjdHz`zpVyGmcCKdECTXh!aM7!n!v;*NsIJK zIXTOaahM0n1QzciadAY zJP~=8+*sj~gQRx!sT|qsE?QpnO>a%2Z~2emZtxf^!^Ysq@4P2F&wfc02&F_tranS+ z#w2319wCl0omeo3=s_kF2aV&&TpYyW=O@^9cIQRuJ{_=!Zvjw2}H5rUGZ5s=F7sEnOSX9t%89mq~@ z3_G}K{JR%Yj>{;}6_pg}s>di|n_n@Dg7P^Ol|N-C>jiRTusN3#OP?XB^f|K2pP;mA z4%O^r)mJ@2WA!66R86A3d=!o4BWNifLQ82snu~i;pWlVr+z!-ccA_@DTScNv6FX6u zprvwTMYSP=0-YAxf|QWf`JCw30Lh7NM3f9JBq0>(K(BfPde$qE4$=o3hH{&U;nfZ=uQrNzD__^51l-%JRZRrr zAJl`O@F9e-LlP<#PlCD-;LGEDDA9b4yH6)vWniaIH{88D)xwPKnQbokg zr5^4QA$O>Qr)@QytxDlyQ3faTQaqqQKe$swN?=J*zJI$EcWxEo?%fiY-!Frec{%LN zE8s{REURE|SqVFf3fNM_?QH8*&bF0(Gw#{9;I=~>Zp-(M!hD6_IpLyJD^6Rt;3V%c zrzqP0ShYS7BGCp*C3>+u%euBr1bPMcuUJQMCd<}^kRS?lu#Txr>+-CKr0AMTv@z)L zuPl>j^=3_$7a@7oeBFu-f~-@B*Yhr(!%jt`6)9J*1b&7O6q0jKWq z=4jT4^Y@!@!J-M5tebJ!wwb-hHr$XNCFS|H3{0dz-{yl@awhM1^s<#@jT0cr00{WGp*>QX!m_YMgJ#M z4M=_JZ>Sskjs{9y^Vn}{Fk&mkuZ;pIW#o*s4@vo0h)Vya1|F)xh&m$3L(c>e=07p? ze1YK?(sDb|s(<`FMyWlKY01ep%IX81cL@aJME z%LbjihR(+#(S*xFyFhu?1X}W>C5=M_`gh8+NVKGJP@)Y)rX{6Agqk!+jx^WnCcdj_ zzGae?8hw_+s^RabP*Z#BgA2Q*tlekiwtrMpd8f;yC=wa2l}ZhiUWr1NyhOM}q1oAz zPFFzooQ5ptK1Oiv6L!R&sa$C(br&Y3@5=ZZ8H8B*nj%hd)&~%30xhNO-XMk@wOGo! zRJTy&;V98+aurH+GCyk)8CEkG%ET%Xp(e&8E7A(aY=$W+B}1ah+Emo36zJ5JubM!! z6^Tx1(NZ@OdEWS@4~XN>8pEG8l0R!CuOXb$98y71rZfkYQ=Y z2^)oH)FeD&r{Iw=4Ugm*xTlcRX?U{3>m?G+j&3*wI;rRha>^d7|3nm2K0zMEIKAvC zQp%rEn4cq~;w94gJxG>PmFx)1c$pY}zY{6Z8D$i23UpJ=B-(2y&{i{!ma0)SR}3qG zZWe(q=|e+d59$lLP@mI-y6kRLr+1(#tpjDr?I?%Lkgk09?x1b8*T&$EF-tjV+& zh1OHeib%=z=~A;Ldi%(1i0x`(mr(yUg!{E3GN2uifgOkr=|X%|7ZPK7kR0Eyh>z|@ zR49)NYEhHN_y+JifxYn5XK3^Y;Ie-=yk&MpnPpLAn&R%^&i8lYwNR#go$BCiR|PMd zD!5x!z@CyU0}k!2Dit=?)v%RPcn+=q+mQn9Py=_q-px*;+A{dD z7M3=3Fte62dri1y)qtz_>u~9w(1>$pbd0<8IC-xb$Ju%P$4Vv88Y|WXK&`K*OjCjl zjMR>$Xb0y?kvjmI>v7 z4Usm`W^NRj{?{0iZOZy3RYlejqvo|BX&0+iBtTo9hue&(fx6+zO;B!_a#Q4uav5)| zKk*mHmgGVHQ0Trh7S$4 zWG^2g5`8&(vtC6OPVhs1=64!mA$%N;d@w$Y9)WYr7~J9~ zRDQHq@)Z2kwBs`frwm0E$V_Q6V;U)Yp^!-+A}K;rl_9a}6_Tr8A*GfyyhLi_Yh*OO zMP}1GWH-MaNcy?bfL1{enu$nhZ!(DImjIed#7WIxRCEYSC#K$fpae zq*th1X+0t>Jo$|xE&NGQ28CHLB>E49Cen)q+rWrm8v?D>rCz8cvLMmgbu2`nDaT7h zc`-bO7*nRpph=;JQv4%&pZqybH#-MikNG+UU&m|f;%klcjy7GOB_CSlL=zF|W?o+l zuXO>DYb`ph@}+r9Bijc4Uh5}*NA1|}s1}JH{T(vRci8~{Hk4>poul)kibCg5ptC5j z84VOzN~Jap^;^VHCZj0ClI|me2}25=Dr8wjr$w+!wRDdNepVtRcUng(Z$z>)rYdlV zOe>Mrw^ehQoiwQz9mV&Gp~OZ@li2hHNsV71(^V%leCF|=5L^F|=l=!aRhldZQ4#`5N!e@omb`)|h1rwe1Mh-o z@XCD(ZwiNdx>UB9gKfeL%qb@p6le4B3ET@F!`*-pHR;6-uVGwu>&Io6UR-dZ^g4D@ ze7kU&;w!n%2AAwQDV^Q8B%;jat6aXq9q>kz&b>zDns9F7dS&xL*_KE+Xgbq(t*v z+%H_>{o$H(AMUzI={w1{9#rM;%>9S(fC6k6JPMnTA%&FFl>bU;t)zMla=T0hC1Wav zV8Q!^Ie(UW{8{cu4y9{9u1Z-oryg}JO6YmbXCAcUv?WEE_pX!on{YzX)NVKO{?>>? zlE=(@or*ADmD>8%*m0>!rMhi9U#Zw~R;p7-IZ`Q2S^*iPEo`7PuRmT2sb(R_fB01T z9K~5!&HLR1`p zNQxmPdNU!_T92c*_&cUVOCp#F|_p`)-5 zpM;&{!N<+OE^!w2DYJN(HV4;?CveYv0?#b@PtjAv6+c5t=~GJbQ>2zss7sz8p5O0i zBKLe5Tpq{oMRdt)c8mpn|49VDBe9h4g7O)()=i_geiB`^<7lm>Kvxc^<_lqR)Nq}xys+lsvC7UV=VBRjGMS&_}i2ya4q zm_(_$j%9|D8A z5kLWspd=?mbRj*a8#%FED2VGsfm92L<9eJ_4(UX$40ufFL=KlTVx$CLE8_V65rHiT z4QN%fD+UJjATYEaej$DE;(Pe|cXE3d{QbJrbmiWjt?>11hOb8xd|fHg4{P8{5%#vL zfy;v$INR01iGu9xTnks1CU|*M!OElX>rZl zf`vsDZr!cG%{!I2a!TF1Y%0u) z;Qo~ogjM((vr`Pf2<)|rn`lNBmbtqne$uj)aO7FMbf ze3CzHpygB>tdV?bQd&BG4kt`Zi(J!D6gDphuJ?x~y)%IU2ht&b+byw2L=i{yu(UN8=}UIF{|m zz;g0Wb~t|8%?=1}&@y4p`h$7caI^>;WpwzN3jBPw8oT+>*nO!QyZ8{vcGK zqmBYCJwy5TTnjExo-bLqs+qS$ps(@GROqq7=r8HBV-8(7JT}q^H zQlf<$t~}nkQ{@`obmFp$9M`7?FWvL*hZP^DmVpC!z=x)N_^6f}9XqBZ+Bane0W#Pq zYYt(#PY|J}bjVc0YPw-*3m+q#Z3NpW-aMmuJ!4vMJYj6l0X#sv1aHS>gHuyDtvrHn?5j6t2a%#q@YTgg`{`%Q!-UV z`l*O8bTly*L6%6g2Il#ZK>sxwP3E-Lh)Qc>t)+AH%>03F6;;+SM0qJo#o1JpP3e0I zoj=jT>kvewMV@=OO-VFgucdYTH9xwQ*QN2>Jlej+RXt>Kmc~baP@+YkMV^0ER1V7& z-(OX!J1vdllQy|Z+edbUG>J}cc&}Dtn8dk~avX00{ntui#tof^sZ^`~p7tFW{B;0v@@~;hysx zE?LjuMA3Md@feQ$UN|I8!!e#B5;KJdQB#!WN!$(_$DQC2+@?6+@RhQ1gSf)Z?G=${ zimVJIzWA_*0!=8K7p01ZV=slc4_BS~lu%#cF*0CSxaQQ0Ydq!#_p7L~+9vx%SS9zF z;%qPH(sHP!Qpb6TXj7;!@?4jcNXr02iF)f1Y0rLD0q71z+RS%|NIRgU`o78awGwHjR0?Y( zjqadCb0xAXDHF!Mn(%p{1v(627SYX~m)?K!-u=(rne+Ta7D=<**LCC`|4-iU4gQ~Gotx(47P@;F7tHD10 z9uHq{#1X0QYu131)-CGa@#pwCxaQKw=Rm(!GdzHQSAHhV*;z6Vm6C>!U=ceFtAtrR zNMgq?^$8sKxpvfXmsBh%Wm-6kM3Pke1c@b2`Q3i0CQJ%0VnHBCAuHA1wBfnHJR=~wG1wl zh;#}`ZbwB@JIWHATWL}U zN)m)l6esdHigX?YIxD;dsq$Vyjfkhr#|H7-!JP=9poau?At+Eo1lrfH1K!^4@DU`X z!-Ep-E)uPkqb)C08-=okRHoxjFxhLQY*VrMcG3=>f!5D z0}s0@I7`I(emU;nE5)r_rMPjU99OPVps!G#uax7=)pDG>Rf$XVjH`DWaqdpN3?lTS z{A#RRFZdHfswu}5WJ&EXT1oLR(miC`@--B1k!V4eWmADRmFRia36euid1kBfs5eNS z^a9H%)xt_H|3!?~a~oN)j>nM|N~E=Yvb?~;Yh-yT#y$gM#>JJAR%9 z#2=AJsn#lbEMqH}md#tJLH$=u^Qn!TYJ=4~xXx{Bd1G0}n~Ld_<%T?)uFng!lI(@l zkiSEs*?&RmTT`r6nLY}&k@}&c%e+}h{4WQj8CQ3>8mm$SpL zf|9&!M;exGPgVT%bBaQimy>^P=Q>$Iu~v1z_hhP^y!A&4v4szapU+gO5#+lrRAM&; zT9C3#$FA4nI6u&*%$iiqQ<<*z@&iqvukhw51B@hR^4h~rKFB(iJYRRFD7#A3xm(He z9nWrsM4}b$g!}GFl3!fpu|>6bh|`;RrL2) z^gcGyIz)iAD72|SkGvFVHWg`Yo5A9J#%uL_X^O3pH?72)@~Y)ZOH0w!rC3-RT{abG zDcxr(%Kf}XQx@b|;4!@RUSf!JFOLz4Rw6C%JYBj_3!VHONZQAIst2#p$d@)`Tg#bV zD1Rp@9g@aTJ@Q>k;~4o36+i(uuIr7>+QKUa1rUz3!$ZyV?L0Bi(Vn9Q0hg$KmY~S zKkqqP!gZGY4maYO z$^Ge+X`I2=K@ueUe0pIs<2~XkJLYmdm-xPtYkiLIbLK%ePRaxy<{fJ0)x&pNaPYRwqSu6dHyW_# zS_5`dn0H-~2(y&*)FztxnbIc{Pigg!E3o-Yxnk2PEyBFTf)NYi>ih;?CA*2sZ;pRXM{u!4tSGwZS8%VG%t8%h(xM$BRfmhE4Kg z*rh&&qg0H^CBK(``Rr`wy@C(%BC;&`{~_ETPvI`8 zm_c>bBh;3Uqpo}mjb$S`W2i43LtTkRZP6$hKs6=0x^NJ+g+r(*7*HbJkk^gIoK7@m zb)Ye`6Ah9poz{jLigj(edBe z)WXlH67II;aIz|gtywYd-_ZvcUMs=1E2Wg^3Y@=8iKa}Szg3Q__iAwcu9nvEkCkhE zv0{x6q+KNvy+ou}tfffnviz?ETD9v%mNk~~b;9y>+$ZTF^ZN|B)+BoQS}l*-7>sBj zdDX(o^%AAlgn0oidaX&gzDegbWn(a3=CPxa|kPVPLb!ID9%4gl$l6$ zc?Ex-6+fqAnFura=hmcoe%h9-lZ53`&QR6-PQ|KS=~%Tl6YCGsKdTGbm zGVHljjeUIB9J)q%zFDsZ5T2$$OZ54QHO1M6;%qCqgp_HAc0Ncu6q25CgYv8*&u(3m zhM7kX?t97hK~?5X{yl3MG(vfnSrNzJAQd{I z#^4$^29JaZ_#{740v(h&ry|jzLbgoAGD~ruMsVg7w@U>N3N~-L0o?B=In;cOPue73 zJBfhwNnNsM5Ro^B7#WnPCTe-EBs#NR1o{`1LtWJI5hY!pQP%Sr<$a$~)h`u3zMyvK zi&iR+A}!TDS|)!(t3L3sWBR+6BR#7PG!)4-1lou&izJV}`rkA1Qj=-ZR1ibRwFtGy zv?kYm0)8EIinKo6w}`Y%1+Pu_t<74POQFt^ z0fsHVAieQD(kRm+)F}&k%`L0xjZP{z^@#A``F#7K$v(#2(M! zL)rDtdSZfBQ(Bh2vPsrcco<|phI_^w+|uXZO7V3~r2r?*z&`#FY!jwo6F*Jiol@9P zf(>jb4z_W#u!-gVm?>COh^?XxxE?hH%g8BMNW_lqePR|NJc4-yr8#^O_d~~F7BY@| zl;=Bv6RKp~4c`%5_Zh}jFHNAYQes4)C57WU#pb#b*D1@FDAAW)Da{m}t0FtDgShNM z!KUDfA>j+?P?ZOX=PmoAf91+mSSz> zOkd#Zg^Lo&b~500u1h+IL@@;^vw525J^7#m$F16N+@c*v@3-U7y*3=W+p5G_>fNgW z(n^?XvE!14%+M-P=B?){6^qg`PL*QA$r8o-6Phe92(w6|AOb1kEN!tGgR)AOX490q zMbf-aqP!xiBB&B+7J((ZD7GTEDpD+xYd`^}B>&f6BsqgpOn9#sWZQr8+4UbkXDDRZ zl>ZQTj35&IA3BCm8`v54%125o<+csQNo^>IZ$TcVHYc_n*)i?N zpv=l_hsh)_%94 zH-dt@;1}2q|9}nz1$H7dNYX|+5gpQw_^>vlMs*-FwhMWQJt#^sW^(L6p~!o3D=L$l zQ5M&P+=xab1=J(Ps}7N#weWYUpg@C82> z$h0XU*G6kZrhmlJTRHUTp7dH@c=LvRQg)ER(XkZkW~I|TdS5!g$_na4=Jqh06- zrFk4KG2?KHlN{&?cqQvKqD7puwHXT|^PV9p|2ZP_pK$pxB68*st|HG`^f^SJTnA;& z@^xCITEsbw;v7}*1hGYuU-uLVl;`9MJpx_(8rco+DA4ba-|`+sQq`kNO5{ZrbJhySBo_NIWNXzf5Yf29`jm9 zmbpz{XNa_sL%opZu@IFu#Mlt!r7VcECdLDl^8Xng&-WAhD9|!fo|sS(2!^~(2z(vNA2ius2TYc)oKPqqe_RAz%xqU z>FFJ++}&s7w0}f)+b>$4w5lKd0qIS@sQS?|2}`o%BiFn}Y_$v;q(oP06+C1xVMqz( znH{PCel}#*LO*`aWTrlmXdgbT<)JMTvv^UGJw=qIG#o|QIc*LPQ)XeGI0ZYA<=98C zk*IJKg*RNpb`)mp6qzlThDZjv$K|_W8fM&g zFLVNTLnd%1XbiUl##EJyn|`D70)5SU2-iI|i56s%lUwdX?Cf&8+aRvEYm=T_;r8pE zax6uM%d$=CIA3z5OiP|I5s|)3`IZuON}wsalD2V9Npla*TTyr|I&g*(b(*4k+KM8r z+IF0@Y{N;mr_?qc^FYs2);S}2%M@|p6h-^wgEpL?fD6Z@)aLzG3Udn%N~D<_xYL5Y zH!026CEBcs^R6qBmQkYyN^d(S$TZcIJ&7=%E>oDMW~@72gtf;CDTi$5qs*Exs)`hb zFzcv6+mfcCi}T9;e-&sofR7k*s>!O6TCorzR)VW!mojXii?YFg{G4urCd|_Q`?hq2 zA<_o_F7S1NocrH(453~s(CRvMei(f|yjYwi>MS!B%7iOwmfTAX*nUN(|8Bqm3iMI4 zcAT*6=HIyor|tT5fga)KXcQMcMsUSv6j$Z5DIGlN=#g9O^xY0+$1m&=?y-}2pCWA) zJ*Otzv6dX^xM?_1svjoIs%hX|Q)R&6EMX@#?HRv=6zPm7@Rz*ljK_%1eS(C-CrB%n za)#3=qC}UMPoug*Ce)cidBr5kD9;sT6R4zASCu_Nb@>#Lb`sT;?RvJ&mE&lu7(r*{ zFuE%H(N*4yjxti(gZ7ec3UxPH3pJT;%9k{YPE=8%%hEb1o1Mr{>Of9HJF?=tkQvv7 z^f-z#Nsgm9$M+#QVE`$KgGf&7M{06E(o*}8nck1QtU=^u_92hLnwQ#*+~iK4L(bEI zEXr{@Wjrma6`9fPifpM_t>;5$$H+{IZAhoACq}d(HoODTkzI(4>`~$z5hZOe1-ci( zVciI%Fb9QpalHpo+#eUMNqq*zJ3o=|`iqh~P@2+7X{N9%+EA9*g0lE#ltfdYLn+X{ z^@wt>LYP~XCeU`JcxYJydy8T`FfW1iePUJu^LwSF0_OLtVPRDZE8BWCo1)aecJ$~F zf%e9#bqlOqt3{&?R#A=(R{jMF_iBnZk!4eXHjPdjEJUs~R!S6FUsfooHc~uHSuD@S zYqet+_Wc!+Y(to3c_|ArEf^xbVqFB5t&PwmTB6m4Nb4+LFV#Q(I!IBmJ{HTQB8aLA zBBl1UGI`5%WLf1^r+%2Wt9cXr zTTEW_7YP@k<_%S%)T_B{l;)GceuA;C;PESY4q=6=M3PSVT}ahXQD>3oZOQm2W%-|5 z;wjE?_)qd5!WOcX+lUDCGN}W;JsHa=(93tFV$J?6$W*Z#j})nCX1AX%#m@5;Y8u)@ zd=QC1pQJ#aF>6re>m>So%|ROkc-65@Ro1xW+J!qrMp-|QDR2Emock%v18@!-fNR7M z+#-kJ86zX?g%Nnhk@zvVb6rN$yGx2j^aPw`WPR)yyb>l5kTQjU)M*5!i}(^sZ%E!8 z!en9`BMbHOvj6cT9*o+_d#KSE@&@Kjanh?Zl7q9J_qU-yoO5 zEYh6a@D@3ZS{`&E#jCjOJxV&>quj`ara)Iyplb&{DS>Vr`h+G5bo1CZHLy?(;Cu8P zJ=4FcL4R8MhLp7XO$o0i&LYKsVC>DG7^fWn9b>{9?Kr^@=>>^41|rJ&P0HpGi71;2 z^n&OL6ki^%$uNbRGXB3u6Xc~65y@25qjyG$gyeU8#>BGDqyZBu_Li8dn9 zTIG)Kl<417b&mPccREr_+9-QBU-FKW_I*Q9k0#RvUD`w}BGj3!TIstqsoWu`84cee zvF9^Xh;qqyeu3c9X9y~Jjv#)1v}p8m_~$-{pNMppM84+Wkv_`@IUnj2 zXURd92=fDp97l=lQhLKDR7Chr;HW}!ku|jCTmIVeoj}Sm_nT3k6?}~ug;=J&R!FX> zKvBLQPPtI>{0MhKr*J2Pt;k0ZB_v>6MV)W@QK0?!zT_4~<)+`Tn(^zl&Kr?lL}=8pydDaUaMf!FmnqQ~-TQIDtq&K8M546_ai5xeNg~BkaYNEFlnh&U z;jCpR&RU2>6HAKl{Wi!1PDd%fBEUz@TXD>+6(`84`;xD$@8kMWndOcGCLH1N;d`wr zE#nYH`GBNm+>$7>)E{18H^pto%!{*0s>NRgno?}Szi&&!zY%FQ z((!LwxlP9qX@h?gDBFTa^uOsCLcLU?jeKZRdDg0M@b_?pzaLc_x|E%cauvDXc23e@ z>agRYKIQkdX6(7ug8lsaNT=o)|Nh5qdU1mCe9Ey8=bR|et|HK5IPX4;OFooncI?#W zEnq@P^tHfoTw|y420M$AyMH%=pLOYcMza$dJ;m=8zh5%_d;DYACp>`@zjLlB?7*fx zWheF-+$Ac_&aFHf;z$yo1Eswy6#zG@OJHRI^08ADgi2zqKq&|5o*-kJgQR1qc9edsDB#l2`R>{00z z4Y^&Y%IZQ{x=a(^jlAS8WG7Ob6MB#q--{HIL~%}_IL9XpA}*f94RU!1aa@m0=tm6Y zIy$-=(NSH9j_5#CcspXEIw`j#q7&g^Z3qi(MMMY*Zf7g`)9n=HP7MliWKbKz16vUq z*p7gJHu(6p!pFZ2et{hn<}L(>_8=&@2LU87xSJx~i|~kEL`8KYA+{G8iG7szZWK|P zi*#u&P3b^+N;}HAT+Z#qiESv3Ye8XTBeFvpknCHl1Ul5U5&<#>(X-!d=I zTwcD$-vklqh4mF8+$x$avP_}o{*@xlYeckVU6*Vn$3c|&V1*&d22xtjRJd126nha1 z0xjtw{}LXrSh9WD+E5~CBqGt8P^*EC3PY&1!HY{-zAi?wjQbZPT4SLGwU$G@AkvaI zous9A=sDC%q?JUcV3mk8S#2WJiv(JPnm3z;+SEpywP?G6l-n~{TGLvh*%}g=7Kzpc zn#&@|^T>X6jE0)(T#!6x5olhQsX$8vT1(wX#((RP=KtCpkN>1FD|z0=mi+fN$}{&X zfu=k!+ev}morbj(=#7W+)U>i&PZU$4i?Q=ug^EHSx>AEvH|ufwZavPKH{r5%t2Wb= z%5m8_2qi%o zj}Vyl2tjF-V~R*n#+-^S2W3A&MD`q_a~>licMcJGvuaSF8XU+QVq_tum@*t)^qfNc z9I?gE5l0eAUm}sjmA*hM1v$3-IpQhFi4`x9Qt=XLGJ|3DD`eHbRid0r@|)hHNYXS& zF(s?0|l3OSDLTWGIS5)F({q=#Srt_d_56p5zL4oX^w zNU=nV=S5Z|*GSV4H2IZdgh7h2f>N%5e*s^|_nQ}Lbq*~;ZHV-b=+&dpk{>M+ty+mj z&rzsn$;_XW=s(nCHmy@y1&>z#K3h~R=|5D3jz)G)8rVrOrG8w}J$^^+s6IWoO81a7 z52N(mS2ZIdm(| zJ96Eg(rnv>(-i1aR%|KGXUJLePMp5qfuodPnY`(+iX3Z_CFvFiDZ__lM!VaTuUk#n zM-EWL4ED>^(btXWvg9&r?7B>8qqvE{Nh>n9?R*Wko|l>Ps<7#d3~tkA(%X2t3L8#V zsEMki*1j?El+@riX5~{QLUlx(4S8NE1Gi1&Sy;ZGlB~v~$iP5Nm`%l5$+KPw<1f-o zG+8Fwxh!y5NiLr~ru@6eug?F)XOv>Ah9=P5zD48Tw(|Al-?s@|{}%$SBGJ5#zX-Ja zEb}QF#d;lRiO%zP$KT;5$rYDUewQfC^83Hqh}{(D-Pc>N`<6`a-HwCi?KmuHHT)bL zv+u!i%JV7bew=d|qa8=u4g>xXMo3HGcMQ@w0e0V3MEfN4OnI!Y3)tlApUDCp?8y>SGmkc29o_&&+4=%93#|Pt>zO%HE|EQKE~VAc1W> zJHd%;GuSCkD}Ic4K11S*pCP&UDN;)wBdhc=@c-GpKY~8e z-!P2+xPbCbG|mC&Wq zGo;L1BIP+Qu8)%3k7$Z>cuYUSBKr|eLMgC;VciG_?V{v%!#}W-(%TLX%CDDy2i$$z z6mGuFY@6Zf*QW52*0%+oK27lQX;yf8X}A+tuNJs?x53r718#op@C@vNPjDZ60%dyj zKKO_9BP4>?BG(bogSgl}WTy5*a1tWrIFgHBw7YgmL_%}Us6*dn~@#Xi1ffZ zBzjjP#p%37W;a5I5)!2s}*6v>>z~qu#?b(NEQ7a zpui9RD}nZ=6dM>Kt)Yb3M4SzgmTjhJ+qj>s;xSqzJCIPGDb*`QqF3v1S=*!PZzR#)~l-=*ZWp_4$3KQS7BiYKWFAy)>sJq&Wjh3_yCDBWx(mJcQE}789gn6MBnf?*R{=Y%eL>401>6B=RW@{-TN|1L^ zo_|OVSuD^ihzt}okjfg%cuqs0mv3jsfc$e?5(PRLKPi&9o`C<}PO09(<)0H(4)jVM zCwb7TD94{YP?}4htCB_0B`;O^x>zO3FAzsr zmZ)-k=~E<>y;KvdBv6>OL4hxkUh@(;^{=rbCIKcjj03tGoSm<_(7^U*hSO@F62YxSTd-+7R7I80F;p5+q@z+|4$1oWZ;hh)R1)*KYk=7#8#`@o3M8}8! zM}htq$UsM(r9v$d-}~gZ`G~j(b&rTNI~qn+5LIGN)vBggQ=mJh|5UVco0jT9p%$rD z{HY;n9}|C|X+qLIepiDJYfVx;Br2^^Jfw!Sn$hqx@+HlKTzeKMo(APzv zJq9VyeK_qxiFO{qxrY=YdoDZl;;cQf?WQ<);nag}9J8Vw+c z6pNtb{vD=~Wz$IWkNCf~P^yWsu3jVP0Egc+ofcBfU;Q;0NkR|`#l;;!n-8dy3E7t)nWy5O> zS3UU|_2Fk)K3n{Z-3g&AhfTvQd`6`tSw!(O86zD~N!OTxecUYUweza4dn+v*8m(z`kj2@Jv_o5)B2U&^TNKfcik!MNMh^L%JM0F!9 zst=*jg9wPAz=rm~FSwgR+euOGgu5R_*QWz6UTtvjXoamu3vAt6U`K3S*t)jD-i`a* zTH)x{LYfs0u1)jH4|%MkOCwvZ^Bnf>ZLs(3fW3D&?Dbh89fAhn6g&vG&|!Fm4#6+1 zA3@>$h)ozka%vAU)BBLiwlK9zMWXYQ+L0IEhQio(6vVV6Q%d(q9qix+#QWDFRwB^e zHHh%8RRZnlSP3`BD!4n>!riqFK0d7o^lwFEXeSb52aps$h}48Zq$CU>DRBfzNu!8P zoWws?Q`bG;r(^AI ztqRBLeKLqJ51Wq`V%y15>^M`7-4`mc?@A30U9ZEbyA8M?k!RaB+;i=LrKD*14ZtB} z7%mYbDjg#zc>>|-Q;5!sQmP{;)smVa(dcmAEJF+C5Xl>7 zbm3FPNd&r*=c|5=)SB0-nnq63TjV#tLm|bvsDt9%`GLaxNzG7LL%FIW^%O0s`)peK zS>-m5N_xguw2ps8hc3+BlHWWnlc;=0pDI^pl&YhQ8sbce9HWemlZjW9W)WxdBT=5B zB>xDJXHB3rnO+d+KPk|9BwD0bqR`KmNHVWOW<6Y(*oD_;AlGSnEHM@6ArbSx;fLaE zut=a6OT9$BRo=9;dRm88{X+&d3Q`$FO6!SC%km%So~2+*bXp&9s9{v{&?;8(|uCK^$|B6hR6;LKU zY5Ii3`VVTVY$3MlJ)$e$LZV16QE6THhi9k!l_>M@p z4 z^bFym8>QK00B0!AXPt*|_TeB-+YjJ`T`!K?^x%YuvqdM4ns?w3Md~0qq^DdQx-GMu zQj#gq`))N-oEz1IO(MqzBFMY1YLYC2{Vf$#drC z@j>hyY$?n336CkvPn0k_QFL zv2!ak5GF}grSexuEPsu762`8fmZS^q*x|i(gHCld&LuPU>viY-Q#djh-Mg|qOAU&cL zsbMWh32i}KV56FiG1^Ziim5}Cj|g-fLOkmb;86>IuSWQKHzGj9nVkknTS-YCMshNR zIe8Q*X=6xEn?!8t6oQiub&3z`US}hW-p`y|z zQEZ7wFRaV5i1Y#yr8dwcT8~Imc2xwMNLq*tU{vrm%T@>cOBMuMgqhoT4h>oVt3;c! zSg0jEMDnIZq^(E4>s|-9wjXHP}!jT2eg7vh@p5X+xYBt^Y`%jrA>y_nBhkRhzPC zzp#zVN@$meG$q)WQBlpRXd=r$5@?yiTqIghDII+MN=mdL zYbAZ+H8QwOklbTM-CG|28tFAMfbS(Th%Bd6zT^#1BFwz`$pjcuwkYQ@{Ih1^M|`tp z2wO=9QG*JfA%-`~c#&|vUg|rmfq-rAQA}|z>!LXKh&X?yIDbanz*jU3eM6H#$&%Rz zHNK%?^s5r*mWeND)5W=G`a60l&Al_fqMxES_~bW=>hBm<@+$erx;Rrb$H@eld`)z5 zo_hCp{BH$XlW0w#|CrB*)&$yQ)6t=$QP0B{DPbgigeo-q|1zg zjUSQH_yI|j==l_nN^Rhw%9k#B&WG^}1WAr`j!b4ERXHBRo!=3c@TK>H);-Mq!!6M4qfI%ZaT7=m=Lh_>x zRqmiLTT-AcV+mW?CRob9$0MhC?6fK&Ch~ko25<#U^1UQ?nzHUaj4N(~xJY?E@6?Yo z4t+S~(2tY$eKB#+d9M&sgXw@)Uw8--H231OK@6876R38I8yOUsFbn&nIXI>~#luu7+xQ%= z8P6%o&*72v0-ia-OMaK0!8eEZ{=Ao}G@W1G3wRT$^Bh_97O|!85mWkB335#7E5w(* zMjWL%x{T{Y>cLB0_?XIf?EJn*SlN4o@-?BQ?+{WZ$Q1Z*5L59M@gz>fyW$n{YhIwa z=_&f!W--)0h0)F_Om<9QqGbeQ%~~{ifCAl1f$pY2w-@)LF|P+znLQ|xfq#i2os{P| zDN9Fjj1_tAMX01`gml3tup1u!-Ej5mg0pug9(s1b!Mz=Ju5EbW+(z+jg@r>i%*b8J z@*TS-+_ohT8gbj2SP2bWHmINH4%hGSxLZ8_reNQKTaInG<|xR!dacK;&89zMN9NjOSQJ30JK4(-E2W$4{$-h4sZkEtNwIxUMiHItV{; z*_34z_<592i$pJGVPK-EOiM&sLk1-NM2Y@seY7spk|V7RI#j5r^j{;>E0jb_I!DqE zS-vHau$6&`I+}1(m<1DgmON`ykzOYIDbNOsL|WvRH!jmjUls-+{#c+5iB{4ruhRv3 z4PU>OVyvRd3JNlhUArd>Yxi=UtlXQ96??e6FB@z2
7*DomV z{(#E9PpBUFjJn~kXdL;5*3s{18~={>$?xcR^eeiie?#}o@93HR9ldjZpzrbT=zshh z2A=%Bh{30SVCdPO7LOaHw@5vwN<+*$3_+E3rDb%{Ytxw)tA@9%j z317qW_dVtNv;8~zd0qp#_C?5f`EwccO#ixw?nl3&Yw}lgPVjdz{%yW(ANz{dkU>ARfYxgKs`lsge&f&f75Ti@JYHum&znQ!8nca-@0r#58JVr0klykE zY0bYNjo{0$O~*S_x4uDj%NtZQy+&!>3l!EoMM2dQMSdkIe}bHn zIb;@0BcosjX?fE~$(crc`XfXpParg50zt9k@QWUUXT%s>Lq^~jFbEspepq_-;l6t> z?l^bjmSZPw*mvNXO*<}Iwc?UxE6(3^)zOy%#DpE>>V4k?qnRxLiee{1t)cxL&U~dZU3f;@C}% zp*kWsh%jw;F46%Tmv-2?w87p@=zy(zC+yt2d0aQ!F4%jr_3VO!cQ>4Td*R~O2OI9Q z^5|1oyY;H`*gErE58Gkw&;~2JW?0%b!Q7?^_a8Ljo>c?xSk;q8+_rAOZLZ%UH^~hP zVqTBy_X%5BzHPx{E$d)u(*PUW20VCB3+o3pu(Ga!g=Hno%t~Q?zZ~Xf<**=@_ba$u zf&1j%y(-+fSB>kpt8w*K4X)j(!?k-npG6Za?f5e~w!zV{39e2}aCWGJJds`WV!LuD*^z=(bl26geK#F;H+`1d*X&8h^1T^Yekc!Xj?)p3mt)MeBw2@VF6^X4leN3j zv37Sl*6pQ8lXd$+Oz<%?epxEX_QxyobD(Ai}JRvIz3V z;{unJ{Fm`sHMWRIvy)-0Z{=$Qk?5b#){t6?bR9*ST##$6M(x)tmMJG;Mc2cUPZO2LLR-Clzz)4E> zDXSiwe$a#S_C2`b(ub?AeYoK{h}%BHFbf!g1#c`5ctf-2&Bv8De4msl_@+(6FMS67 z85HZRSp?@iMnu62L>0YIVjaT=WkUH2KGa?!t@aht>t7?c=?w~6-=VPm7nD$@D|$bn zmLlCi!D*)SG*bjc=-MdKZI39_QxwS=3gs+?dX|F8)(~Aoc140sWmVdx5^G@WH)OV( z=achy&islF%B!^6x<=3JALyA8DAXdnBEJ;rUSfLQzY}QVeHNh$bT7{@bWeSsr;9&x zH|4pD(%dQ1tc$WB;@mPU0xj!bHAeK7w8}AKU(hnj;{A#Ayv2E$T`E9an3I*A8U5PW_+raePO9k3ssYut6TK;S`BFv+|YK(lR zI2+RZ6=nTjQQG$fCB2_f)cqL+9Tev_3Ub>A%HRiN39ZupGEa8<2a4k-HQT2l6V%@bAlloI_6A?%C?&TXbmd#bRKF)55L5dJ zadn@NK#5iY&E?d33UlLUq_%v~<(bkBoZDYyuf>sTH8@Q1JtP8rkx-luP@MOjuf*PSl;-ml*nPGPyU0!ov*K*&B6d-( z_gtiClf9QH-Ir@go#MciIvl)Ok3-iQaQJ#74qvOs;Tv^|qqiv5x0`V6ZVOJDwc(_B z2Togc;p_t~U~|r{2NxXraOq(`E;$e2DrNc_x#>QP+g?Ms?LCZp{-d}bFa}FLZ|s8E zkq8@yPvkiKqs9@)&P6bvJ)v=Ptm|nz2^&*l29Twh) z&~UaQ&m=r*fD%255dOXc*;(<8oq#8CkDi1Z<=HEI9Nv7!c~YL;_&0D47=RNc*D0VM z&i=iW-Yz`!>VlJZ4_nGK<=26N?BK?29zqA4J=)>y-bOmqx+7Z`O0zSU-MqWt;?)6H z?@oC7b;HBI8xOh7mir&Lb;6eCv2&)BI<>*!VH@lmT3}<>1Z#?~l}$ZqfW?Cb+_$PD z^)RC}-?Oa8U6EvqI^4c5)T;Il<@qkT$L(eowUp%=SXxw*DlS)ZTQ%{z6t`}b;Oey^T)I+(3zrIT?mPvW@_d5weE38L4j)O!VT$v?{b|^{HyJ`25)y6dX#N%U9>H63>4cCPP8AavSY$@JC?tlyW1_4LQ}hl}W1biwo0*m1iJ z`|SpC*kuey+$V6%YYN9bCKm*H<>qKzpktLp|A>{sRxVSdS8WxcmZ&r@OyC8r-kzc% zcE2-~7et|^NUzBtb(FiNnzEYP;Wdb z(O*rdO(WAHzD5N3uQckS+=!pgHBy$jjjgf#ck-;>M@7Qt2Uyc7{? zVHKp`b{x6ifurP@c_&WZ zr#LG*aEdp%%Ds7dr_Umg54eC#KJ+Nmp*gdBNIpSa$x|d$JVPoUq}dJc zRAf50*o9Qi1><3>q7h^fi@)C^cZ7V z3A6|~&n0y88oDOGX-o*;(J}T_lV^!Gn=mephr(|2dCoH2TLZNc6wLRFDNzc~;0a9aDiem1s?#zw2nx zW-a3U8^!roR8pJ;E$aLo@LWnRmGpj57|UsVZHmBkK_Z=tpopd}L0%)I^F5c! zvx;2n^89zQoJ$EdBG1Npe*bS2W&=vH;M6{E66g06W|8Lw;;Y^uuJScv%PF)a&k$2eX)b=sPBz7v@*GNO z4k7-za=)0PC_h1v+&hRL?<3ynbMQ`^g%9ygorP!0EZmYc+|#(9BJIQ1_~lTx^Isr{ zG96O%3SlL$5nlEd5tZ-AFNmuCgy`DOh$Zn9XA$VchA&8N{EkElv_zqk*da(}=OBeL zovNbyACSx6dr9{fl(XYi+4TW6o$pcK@eWOGuh7`?9JTdNQBnOE<<(D7RPhwql;y0_ zIiwX$Beh@}Dfu%<%9}+T#W^x{91+Rm2uq;E#f-o=as*zX!*C0suu<9^0)}Aa(+@L> z*&XLjigU-jIG?v@!8wZa8M7vwx^xnH9b`NC`E&_>KD8LxzT=FDHIJ3o@Em(CRL{2tdoR{t-zAZ2Lb2Y@ zYdAo$K6stnY~nMh8AtAFfgBag;WOui1a2Pm;H+IA&O7$w;==)4cA`kT5|2S#r$pcK z8^eA75!h3p-6^$R;Un;i;O~o_jF8At1ji2Z_cx5FxPHXO_9KqO#`GhG;;e}5LqtRm z1zO~}kK){qi0A=Ch(PnT!Lg$VqJ;a$vExFS_ThPbcz*w|Q3QmHz$bVR-hl(~2<(S@ zKrh?_`?%c4!gRPfuH@tm1;iG7Wmv=iny*s#%vP?<#X6wy$ zAL8rV317cX_yu&r-It>6*-p`J!$UWUw@Wizom=7J)Pjc&O|ZACgPmwgjxBx2}~yFvWTG9{TFubgbK-jcrFtuA*wkXTByM5s-p)S5`oOSCe3vYqUpOjD#+?-J6m zjv~E&UzUnYOH_J2FIG@OeSku3!X`=ikZAQG%C*i$uCH4p%#`B2x$~@9!fHj{JW8l_ zRx1kTS+k!qZX(y~4;EwnAuUa0-N6z{wxoZQVx6ReP_EZetk)l}m`8-#kZMDw4WXXr zOr4Te)siizdCU@2s)$0QnVjHx6_qLyzEL9LO6n=j5|L*IW8+atZ;^F_5^R>JG})p{ zw2DNZs=^kzKC+c!zE!_I5qFVj14--Hb-59Hu27_}HY+rlZX``grX|H=|LqnWpiCbm zhq-*@Hl>;EQITx6hh)DTr=WPj zTjVsqM?u>!C~2oicYj1>&nHy(eWFZ%M%~a?)C;iTj%-enq3m7tc{Q@&$Di%6fsz4HR~var7JC;~URS!Q}Dk{6r$+?G*S9 z(#iMf=6QQWq$%pXbHDws1X_?|6%_D3k!PMq=vLzVRna;A1)ZaxDa0bh6yzZavMGZf z&_4K!#(=c4u5D`_`hZr-v_eTVrCCR0TB7?BjW$i)n3re%T&e@X-TR!r$ zCuv5%AYDnJhPM0*1@ZlSG*yu06z-o#1X5p4rFbPaP$s!7By&AgM2s>l+Y@RFGQagknP6n<-A!G=X80` z_RsSoFV`YJQxdN=MX2Y`SjV4BBw3`{fa?i5N}j9VDtV5fJWI+(EWi6v#m^AN`+$gZ zC_C5z8SHqcO~E~73LZ&Q@JyV77dz&jMD8KZ@snhV_m>&C#LsYf299x43g<+MbjlpO zGM^BNboMj&=eUkuLRGdnFg?Z2S7gZCtgb2Y`ew*4&{TVJEOAET*Z4h?m)sIHzxMb#XN z%I1($G=r?-S)>(ClUWM1A^xP5U8l=Q8Fq*qlWix96JUVcLLM_i8inKgSh$Z$#F z=%Fz8Au@*hVh0e-<6{#>5SuuP$b?aZ$B!T^ZUmvRBM6QjK@dAKq2a>_4I4yoXg`8N z`Vbt>Hmn~3p}iU*y$B=$K|Kfv>_%W2(e5ry@Nm}BBAgr=;9yrv>fm5s z4_n(>*il?5Xv(ssZ4K-k>fz!{;eXfy#|QPWme<`Z z!_6DTxN=<+=t~sni|6xj@pLXOQlQVA%)oI<^pV4<6zCM}+n<6x`;xJHUlMjuptqBs zcTuEwQlu%-o3|%n>-I!!-I0t*+I%ILhzHQM(=- zVW;eb$g}4ZPWn)w-6!ym)te)+YEuM^cGae6tlmUX{sF5tnWlPZk!q3XRa+7?DAEQi zw@J8MLgXUSBGg7?It{BNI!&22SSOL`eYuN}bPrQDP^#CnUAHd}YmErANb#PWzh(9R zz^XlY^Q_*RugUa&U8oHx)@$|`Db^m+Bzg^zcCE-XS%0(~8z{CLRCJmWeX?er&8O=Y zpcrq_*($pL#2C2v|3uq^$i_<@D6g{{;Lhx zeXSn5xW4N;&&TE6*JN8Gmz&s9#&1cqx)bMZdvMLAA9uY5Vc|Cn>!2}M1P;S0WK>}h zJOpdD4xtlp2TSsGw9=_I*Z`K(VeK_%ct8F!&WU!`~KBP4TW8{EP}7U)uAL0!8>BEGE(l zrQBBDBl*~TK=xDG2R@^Aka8{3KJtx1tqHp(&=gA|0^LQ?6oKxZ{;nkYe`$&Iy( zexz`Wn}!o5+BIbso*9}%duKg^U+!}R6~0DD@mqwJyrxLMp+vu@L`x+4BVuYkXc8?^ zX#Q<-_`A<%r>U^vR?Rr44n6?4cdn?-Kf6J!<5s??3-ycxu#Pbh&7O&rtYIdTZ@p_J#qJ~#&U zYq`!IJt}46mQxq5+P2}673JBy1!wL`esdF!-wxON4nZ1$NIl zNzc$C%sVN*BG1B(la$~SCHVPx5w?=e#|jmjjur4ZCFJYv0-KKW7|OJ;g|cnRR*`Ut zUQ?)lrku}@gE~`zZNk|~Y?pthpRd903$@sDi4x6cjFM;x+Q>Ma`yDuL*`=op_T$3C zeq3}Jz!i@nT=g2lEuUdL;Il=3H!i{ba0?!Q2V4KJK7>RKA}odi9W{UmN^T^jI#L8W zs+$tsg(#6{%5;>Zb0iEPHhvIs2}4Lu8ADRi2ogy`qR8|JVkp&7al@4CVZ`v9F|k94 zrgTR~4{*I7v2lETTtBzVI`?yXEZ-|Ox({)Y-AIV)LIU42k;{n@or)B$CsE|%!`cxY z+)k-(M?_EuBH2a-wjsj51z`cr2nlRLpkFI|yj$VvNjzHN<=O;KO0=6(Bb*!?;Amfq zhYodcRMabxwxPUQSyxeti6uqayc!RPHRaig;%udanPN?(6?wL{u2&+h1e(X*6DZ4f z%_I_Ch5JO4Z*Hdu-;>s&P6@P`O$+XkJJwAwd(cFAZiKBv1Dsu(Dg7;Qr$jqSZuPxt z3Umbpx)fKh731oaLR_UlU!_1_J)MKgCo^&WR2EL1$iUGf6zIb#*ncn?dk+xGv`F;s zy~)_MCmB10y{XtvY2LXfh07_}zBe7)_GMz*!CdU*=W`!Fe@FN|ILz-Xj;k)Ch&iL-zW2UKX?gh`YGFMP9&@Rd$MYPQYKXKv_r-diljq=ZyCjl8374Gev(`PhVBd{vu6?-a*^fJ3 z{V?|%gjL{(l4(n}w!vdcq8-D>;X;Y_h@XTHACv)nXoh6YAv{<9h5H=Qr7sX){t`)* zuaHvxS_yS}-CJa<2iXVY@}ZW)2U>0mTRzmXo8EH$9oOF>pAWbK5p4=~VaG=lcYh{d zP)Om(!QqoCT7g6v1tfX8d zx`7g{MQw=;%%f;Z+K8lpEKcPx&4rf1g_5!%+kaDtI7`|_@2tGWkmv6TO`yLjR1}&L z-D5)6s0g)2H|1IcyMuz=KJsy%wqc4hMY@FoEl5g-qCEo$$RgQYY&*4xvkW}UX#d9d{;DG9NzGc6Jb{8A z%ML~?uTk1Kk|2?16>Zj#&VzI!gxCrZ=a-12G>1u~nbI6Cg9G!Pz&~piK@{hJv}yPz zPr*BJ3hr@}coULl?E2SU&&JLl|>^L}zaXax7Td^d0u=9DU{5K45|BiKZc z=qcF6Pvc?IG~Ci2!y}y%ogul=FA$jj3c-aM^XVLv=!lA65L5LLan+xA{k*Rcu5%;hDYciT!Q-H;MWIR-vL;9 z_TjEeH*P-c!WFv?oVTJl-*3Wcvu2#Q)1(IQ9lBbNgI8*?kN4=k7bTsd3cD%KyUtc9 zcAPHL(lSm`mdVx=#n>j%}j`XC3=+!KDwE-1;fe1Gr0pw)P)_y?;NP{CnXVASo2R@DJ%nFoiid zoP-S^ghCxA5>2U%iteRAcT=7z($PJLj_so$Q+O%Q@gmMCqex91LR#7|(o#o|k}`ti zthjb$|t{d5LUC2>% zBP+HWX;GaN=?=t(wjn0C4Y4F9sD*8-A~LWUp?=K>_Gv+Y7x8F@pIZ}r+$68K0WJ^g zDbY1>cBp}iLoHk#>)>opk)}X9*jB^QrW*DSs$pwY0~@Pq*gU9#t)x;=a39##>omaH zt_ct9n_j-}$jkrW&_oQnOzAz4nNP^S`Ug>>vY$o615b{)vTj{TX~btDIe&J^L)l?oiW zSdRVY%h{2vz}{=MIAGb1Lk@j7<1vBr-j8tJ=MgUZQJ`0B2%$8GD|yxgI@|MfM%AH#hj&Ptl~ z0gQSMwk&Uv<7M!wf!#VqQTy^fk9rr$%`wYN>H$4jqv{k^cl4!e7 z5;g*d$Wge)Ou&l|OwYIpcqdK4FHHs>KIX&qDZ=xgA-d=VrG-+WkbKFP+@}4@H>!lf zO`=Plb04L<@+IOco+FMVQg{-pDB)GFDC5t`3pG$lM*7F`JVFc~XmMrF)qutX9+O=8 z0%=t{*l8T2eZ+T#B>H#C^Y4`B-%(3(t{MJLw8*m|&{A!qY~U+O`@f>3?RQ@@2qZ zyW})~Kx)G~q)-SGC30Lv$))^7RJ=eWKU2~AKsOQSP<{qOgyQE2;O8%ht*nO^OJr3W zuqK3+y+J4mDSwS1e)dAjUm=|545y4oR=!avnWjXGJR6Z|L88wht`c$9qRv`GIE%8M zLxGk`9#YvOpAuX^X*STJ%!@HnF(k4qWb!;2JWmEiSxDo#g$zn_I;B~V!E?z?pH-Au zq&cxcDphC`QN8u4R442?R zJPa66X&V-Lg^lYD9k^`Uj`P;7IBnj96L)1$UIPx_kU)osbFIodmR#hWl0G5QEa?;! z=bt6rLYL+($3<|9lqAn{I3HWcW}OX(^RVu4u447U96lTK7P0OS#ad_m5;hzz;B!k8 zX{od&NUGw-#Ig&=ZF+Fh zz6WO>_TqwbFRoCc?|2Tv!n+?2eEVSUD^)AH;p*3;@{m1)`ryOPhhK0X0)l%HN{J4S z&?GuCx{vfzo(B;dKZLl%VZ_A`s;F~X#so4m$B~^if$U6@KB>sc5XRNM%&al)A4N{~ zDDrbhQJ6D=B0<)xD%0TCwqxh6URn8bZUl!QwwQ@9a~#squMsuI<_j2wv-%d z>l#=RBSLLuErS%BU`5HcvZ>=S_1sV4wr+wsv9O_l+w=9dO|a(cZRNdOo8j!z2q%Yn zI9gZ3fztipZaK_}*_~3D-z-a~jw2`1aP)W@ z4pXK@sE-^=$Kj*0o`Hi$3EM+Q$k9yfKbnQTM>28fR6b5!D#f|0>Wh*C?*}jPrgmf!hHS?0}DH0Lu}#_~-f&^nta z(wpKGE4L&nkrttr$n<=g$F^jwRJqa%GCg0xgRG-S3nJ8pEE`A)h_F(k*-Hdk+W$Qw z;5s`f&^sy33K43NZ(XECqW9)0iI((^ReLp7?=wm7ph$~E8yF&OD$qYdq*`OKG%NYl zQ4+07v#B_5I>ejV5=5XkNL7(TQtFS_An+XPP2u_0AJ^DGY1eYM1#M8IpbcOYq1L2Y zZdis~D`cPKQmb5V%C!--79v#j)sNPF%gzPxT&5-Z4jptAz zeYFlp@3*Qlc$XZzaMPs+x7~Vi+p`CEeEVS@FbYe4a4myIaX)Z~56K}|2amuuWRy}p z42OtOI7$g2$#G1WhFbz3u!*y9PnzNK6x`w`;YQi^NSK6s;v;w@KZ0lKj5hi{{V{y_ zK=w(O+{tOK&%jTrfMm=efXhK#4$hiW`ITY0j}b2aHZEXWsQnu`yx@tF^4LeDcKj&C<9`z9&RM9|kZ4m(1$rrq<#}luhZcQa5a{oVBhX5ozbH!jzM!b*GYYys zBClPlYkWjT%X?j*-yyM1&(*DxiVf^+QL17q*@-KEi71IUQ<9ZH7d}G}J9$BcltXs- zgaCfF0`i~2KmUo!yH)U*K)xoJ9k(#bbU2Bq5P_y-N|aj{Xwx(f5oi^8raVguMV4NI zE?XkU-IQpBmcFq#!YtCP#WE0Jiu`hBV2nRGnfAc{X@Bs;!g^7|=x4&R)ol-S4c zOrPWTcM6WN<8X+Xgk6M4Gw%%)W;2mv%JNOGLEP{rp8dG))`#2f{kUbcM7CXLd&7gr zcn#sEH}M_DT}t%5kV%+@@gW&Gi3f4BuuFIhr<7-K&v*f^tQWkF7x2rKoak2wD0+?H zlGo}#2N^Z5P{!|kZOa?9wLM2)$1HkVXVB9;i>}5QG}q0bu6`Ev6y)ZXXJ~JIj`pUf zXs1B8H9baS?KJAEr%*#gp3BN-P(*nyD49i0;S^GHrx2gOju?^8W@yu-D7ErC@-hC{m)=AEQW9pp`@`ftKfn_DquJ z&5lbA*eRu0uQg-ujaKZv(}w+&=);zsIQF0mr|o)h-k}GVo%lC*>cc(PKA3y-;DKj1 zCAtqzDjMAbXBo&BK#30SM_@=lf-j?77>e^r))30F z2T+;ahqA0*6lM0IFs&O!sa+K6E)*qqQKY+&7vDvh?xIY0A|t9DsS#~R32#MGXfqNi z(eXjei1cejjBg7fJR1<^B@*3$K=*q1xz@qowO$cG{9NkcO__Fgsv#6>=Q_3$scwK1 zadv5fvs(+rwhfLht#ELmJUcI-B>G`1Z0s9hYYa}L49md82X+m3;LwZ*jud6e@dF1Q zW7`Z{it__W+jt;(&l25k;<}cHZEvrolsMY*Tvk1I+W_7q=*N$uKc-`?I#Xa8@9 z$~>kbZD8a~OWKFQ{A+$r!HTWiwk=g_m#|#e_V=v#nXmgB<|SK2vbE^+%H4Y2^lsi9 z^xSD9PkJp`cR(tHXeIU5nG8Zy6+U#Cl@fVBWf}@~98qX~J0>-uy+P`JlVyIe=U8P?U!p zAB@*M`f!C0iEAEw81ln?*K-JWCCcQ}ueXEzuyeaanF_dizxJ0_V^)h?@nUw0>Ifc}jm47Kq z-D-(0#`3|RB9UlG>u7$bQbI~7TIIc(MAr^})dnw8q6O0dcv2Na(m1qK4o#+2s)ooi z>7`8fu0QJz)lItp|;|K4eh?~y{0PHuRwsz}7wz9o|HOqniwK`E3}1WL5X zGetQdPa>O7;hi}L@2tlPPj=Qkx$c|>GRGGW{I>1r*8Rmk2GD)B(zK#oNC`phcWz zP@Wo?M}bYJ&@KqHHc(JYy;vZNBAsbUmqe=rjFcAm`wD82m7-xG0mrxdf(eZ{#K7oB<(7wn~Ga0jkBcH{a( z-ZP|jup@=)A&;X_U3ZiF$sjJf58$!~?=k*kxE(lw+Z5@$5{-_T#e?|Aa7=m%my{>) zOnV9`br-?ENqo^uBvbIy*(u7ce2$9RXJ~4AhGq(KTjMl38Ya+EKZ&-68MINHTSTCh zM9-j6gjpiZq`v+!w>?Hp%^WH#r%_QkgW~dOinGY`9P&z@AhTc=@f5X))CmM9j>9i* z9G;OB=MX7THv${qAy~4rU`9c_&7bX>T_-MCx8t;V3y$-CdgNvU4qdOqfvb|LQH@>a zD;GtaMVz;uD4{g-IU~8uMz1eK}to$3+ z9LS$%^?ng=iA-zd!`AaTw@&7IPy$^FHMfBX^vMb>c%uh$Byh9!d>yu3sK@q8jo5Lu z3EQtVVdqWC^xak*Fl)n6iw>N$?!tN7K3t|u-{SA)9zP@ZUAtlK)}?a9t-ZQo>(h;g zzPdyQ4Zx2Q?N5mgj2u7+XiA^G8vhH;BsoAynlLqd2!8 zMOi&4&+b80b}uS3yHSzO^|T%oC3c`7z61FrH>Mq#QLRW1Z$?^p3z8|)36$tq|0YEH zH6hZc0pTRrvmSwNbqI2kwgCa|4HW7Ic#AYUQ=*9ovDAP8SXOU_fM>&q7O$oPkC339PwdS!BahAMi5obG! z^g|x+Y)^6L`a=q|6NTKxz8WsJm3U}VNr^5YWq5G61lG5TVR54ncdz8&*2OH`yp)4$ zZMSKDcdV0 z9YossNH&irt0kJPN4lj|%Var^Gi4dy^Cz-Q);IGwQ`z2AN2+=hU@cjvi?pdsE14xKB24DXO>*a@dhEGgkK^~-aM89Km*ih?yy=}M=N!7! z@>xC<&+&nHp8GC3_TsWk)bg+kmmDQ>%?GA^hw3?AqiB~8RCa9FkGlyAwuw&cUvCWsOp{s_*|kKh(ZkxrBwfEvX%4uaHprO4X>&Y{C`iN|B7jZx^q?}`p2|UtlQ^6 z{#47Rw5kZTM59Hbe^)e3{-$VT2SyX~3Z-ZxraS6zDG$=r1UvI18n{M)|rgs&t(i zJSY;)j#75(FUV|>@^yOE4U$BWPN;iJQIyE)D@0JDCHfpHHHu~MTc(zd;E_5FcXrMM zxAbXvWX>vq_NT0gKnE7SL@)(9oB|zDM#-dXO7vL-I<{65Q$wJ&^bIZgYzVX=!&xHH zCM=|5@L2vlhA>N&4k>eIx~%0>e?zK5>KZRZowZVJBJLVSBwVGGup^OF_fCiFyk5!O zmOO5`{z!>7vr`a8aSqCVfq>i>@J@dM_v9Hkv-9e}@1JGpn5r9pU(z!8{k`GdgR5>m zxb8|}bn3A%TCl?Bb^=tayTwidmFa&Z4q< z4z-lvCdzUv#ksY90u6OjsH>eqeeEMO)J{{9r%_uwgX-E@RMpL)rv4eKNoCDblvO@P zQTZJ5OJ|WIb)NHQm7FE<{t}b=2!V+c@QxXU8$11uL4&aM*Cs@{$Dd!s@TznctlMzb zq7|piG;uzBqX7r5)nVV2I#rfy&!rmdI9Gw~XDQ8R%CPOUw#~p>qrBP9J$RiF|i z=pxS}J@s(IDXyQX)=~~B(_1bzVB6J3{49gjDbo9Ix8l%UnFy={XD#^|;OF2vpARy5 z%w6YB{_eVP-@OYKQa70*?daPJSN}eE1`ohHlp-B9gdhrZSp29e6Bv?6QYH~7@H-ov zK7*L7X(Z>)s1%z5%CSgsLGdJViYAa-^a#0yk0{L)YYMcGTRe+A3UnR?x{$|~l}@3u zWD+%{lc*{hM@8W%DoJ_a04fUlP*d24+WcPB=JudEyBk%RU8qd$LPc^HN<^gN+L068 zimb>MWJa`5qMKDTIxeUQF#(N;3ZOvy)+5BL4#6IE2yt&jAPMkjgfB(f(}mLPQVS2t zv&giUQ$0MLnkdvwaC2=|0`213hKDZAct{a;;5J+Cv!hJgQ;r=d#g5Lhza36ebD9!u zFCzW02@Y=Uu;Xj(-8u>e>z$r)E{U&XvczDH=6+9c~Y62_@RT ziV|H8d#f@ySd_!TtQ>ZCOJQ@T2v#=>V17Lhx3A{n=9OIBxRi(M7jtp>LLM%i&&3tC zS1;yMuJdq_+b>?u!+9Qaj!1j{QUR{rs=$qVHMniwfLj&~xM@Rq<~`*WJJ+}6e&8{L z>+EFTU?=;I4?FH&qxi=P%JV9bXB{QbCWtWq9i|e!g3`Q<>%wx8YLVy-BGp=o#|jZ? zwXBKs@{N+}5vR#Cg;otZ)DhX0DjsT`%kya;BHFT#uhCjT7v@cT-NtyH^T#aPC@Cci z%UW8ANVfsyoDwapQc}%zk!Z^FDoXT9L!x(PVdV}<^I-3qtfow_+0CB#9vR%o8-bvg z%3Hf%>Q#$O^Cl#FS)Pw%>msaLU7AfXqR@s!3+st`Ezc(cEvR$o5otAv3?=&S zP-Xgz!H~RZnWCzI7>HcU&5P~&6O>=F;Ur~QkY)WCkz-Z%&lGu|!@Ra5MObsBgp?}Q z9_7vQSQ)nRX1M)gHTFv_(z`75 z3zbuys0J~LNWVdLlgyqd`Ou$G)n*_r~!*Q zCflWS-co@!mFT4cZJO#~)Z6~wAky4LITn5-&8D;wk!MYywRHm{65Yb@HUNlm5#{fkO#Nst_8U7VvTr4*c`K)ggi;S2tZ&)~_UMoDv#tY-}35L;(U|Rc*ThwYDrU2;>_>4Z99=xo2~8~k#sPT=nJ|;3$iS7ErXV& z1Ad7Tec6d3?b1gfAHa35Vchf?#clsln1xJGq9{@l&EP+GG(e{PSto$ z&!dE8KSF%g6k^h+5S~1Vkc0^}8H#7DF#IEyr!inAfj8U=ig6i}G8 zNb$d7X_R^Ke$&_IV-?S5%4)vH0(>?sc{UmUO;!Iz<#WYILtHD*7(Q?0znmK=*6SsR zbFKy(&(~6>>#_ZE19nLjsq1`R+-Xsri;H|dTopmGZ^v!^9`Cwz!kiLq#ow(B1=@}R z?G)4pkFX*5MvfvNau^{o!w8KZr$F=X!p}e;J5mAZkJa1+k?i0k<*`FmIERu_{=Lg) zQC>cy^4E)04*MfT5&!N5B{S?G%^{~`7MV(@XHdk?MP&s$SLKxGvT-tjYEo4)gxZop zG?oscxnuxMMZKuY>qbp>2WrwgQJvO>^5jkwC$u9!woQ|0i9|;X4Q^5b z9qC_>NS}IydrLY;6M{WOpc~*vd??X^pB|MCaBGA=McK!_8D1XE@bG9+l`&k1GjVcn z#Y5sGb*5cKo<*XY;Ygu&c5jCpah3Yhu3YDNoY+33NIUuTz%_6Pu0ca^4;X-#cP~76 zoVyz(-Ie0ZW8En09vV%W>W)a%L>?=6~Wi@A)1bTh236y3NR#TLhGB42U zCH*5zv7F1xdAzhMDA5|>MDnU5RjZg!@zByd<|TRwBFLu8^T;-NjpSJK^(!gP!U~b+ z4cyMwKt%cPFqLb;Gitt0~k5Yj$bXJXX#RK4cGEuSYFq z_oQ~UF$v6%(WF@-*ea^6N3&&lJ!RYw=k+{~V7hFe)yUQ)dVcmrIj4ax)pCPUHz|Qq zZJ=gm6ZG2`$~dV~OqPoPZlk~N2*oX3g~ zigZ~16GTy_r5Z>wB`3Z1Ez;{`VBs$)q-020z0!`4DDV1=%APN%qL5Yf=?s2B&9Icv z(-4^xv6JYi)SGT(EA^_4$x759M4~2SnbAt%8MUNM(?5(#9;V2^M8(s^45@8AMon!_ z$rkC>G0r#7{IukMs7Sb))sZ6q|5TtCMVw6pT0uEB5NS4DZ|8M%JpNN5%clHDq78wT z2(*;5(|*_A^_d92sZ z6{K=OpckrfP*N$ZM)X(eMN2eT)qrl*Cr{z$FO#ynz$eJecrtlQI;CILY5v~`G{sp# z0Z&ozJw&AW`b4Qk-TVm&Y~$)bszHNtjdHz`zpVyGmcCKdECTXh!aM7!n!v;*NsIJK zIXTOaahM0n1QzciadAY zJP~=8+*sj~gQRx!sT|qsE?QpnO>a%2Z~2emZtxf^!^Ysq@4P2F&wfc02&F_tranS+ z#w2319wCl0omeo3=s_kF2aV&&TpYyW=O@^9cIQRuJ{_=!Zvjw2}H5rUGZ5s=F7sEnOSX9t%89mq~@ z3_G}K{JR%Yj>{;}6_pg}s>di|n_n@Dg7P^Ol|N-C>jiRTusN3#OP?XB^f|K2pP;mA z4%O^r)mJ@2WA!66R86A3d=!o4BWNifLQ82snu~i;pWlVr+z!-ccA_@DTScNv6FX6u zprvwTMYSP=0-YAxf|QWf`JCw30Lh7NM3f9JBq0>(K(BfPde$qE4$=o3hH{&U;nfZ=uQrNzD__^51l-%JRZRrr zAJl`O@F9e-LlP<#PlCD-;LGEDDA9b4yH6)vWniaIH{88D)xwPKnQbokg zr5^4QA$O>Qr)@QytxDlyQ3faTQaqqQKe$swN?=J*zJI$EcWxEo?%fiY-!Frec{%LN zE8s{REURE|SqVFf3fNM_?QH8*&bF0(Gw#{9;I=~>Zp-(M!hD6_IpLyJD^6Rt;3V%c zrzqP0ShYS7BGCp*C3>+u%euBr1bPMcuUJQMCd<}^kRS?lu#Txr>+-CKr0AMTv@z)L zuPl>j^=3_$7a@7oeBFu-f~-@B*Yhr(!%jt`6)9J*1b&7O6q0jKWq z=4jT4^Y@!@!J-M5tebJ!wwb-hHr$XNCFS|H3{0dz-{yl@awhM1^s<#@jT0cr00{WGp*>QX!m_YMgJ#M z4M=_JZ>Sskjs{9y^Vn}{Fk&mkuZ;pIW#o*s4@vo0h)Vya1|F)xh&m$3L(c>e=07p? ze1YK?(sDb|s(<`FMyWlKY01ep%IX81cL@aJME z%LbjihR(+#(S*xFyFhu?1X}W>C5=M_`gh8+NVKGJP@)Y)rX{6Agqk!+jx^WnCcdj_ zzGae?8hw_+s^RabP*Z#BgA2Q*tlekiwtrMpd8f;yC=wa2l}ZhiUWr1NyhOM}q1oAz zPFFzooQ5ptK1Oiv6L!R&sa$C(br&Y3@5=ZZ8H8B*nj%hd)&~%30xhNO-XMk@wOGo! zRJTy&;V98+aurH+GCyk)8CEkG%ET%Xp(e&8E7A(aY=$W+B}1ah+Emo36zJ5JubM!! z6^Tx1(NZ@OdEWS@4~XN>8pEG8l0R!CuOXb$98y71rZfkYQ=Y z2^)oH)FeD&r{Iw=4Ugm*xTlcRX?U{3>m?G+j&3*wI;rRha>^d7|3nm2K0zMEIKAvC zQp%rEn4cq~;w94gJxG>PmFx)1c$pY}zY{6Z8D$i23UpJ=B-(2y&{i{!ma0)SR}3qG zZWe(q=|e+d59$lLP@mI-y6kRLr+1(#tpjDr?I?%Lkgk09?x1b8*T&$EF-tjV+& zh1OHeib%=z=~A;Ldi%(1i0x`(mr(yUg!{E3GN2uifgOkr=|X%|7ZPK7kR0Eyh>z|@ zR49)NYEhHN_y+JifxYn5XK3^Y;Ie-=yk&MpnPpLAn&R%^&i8lYwNR#go$BCiR|PMd zD!5x!z@CyU0}k!2Dit=?)v%RPcn+=q+mQn9Py=_q-px*;+A{dD z7M3=3Fte62dri1y)qtz_>u~9w(1>$pbd0<8IC-xb$Ju%P$4Vv88Y|WXK&`K*OjCjl zjMR>$Xb0y?kvjmI>v7 z4Usm`W^NRj{?{0iZOZy3RYlejqvo|BX&0+iBtTo9hue&(fx6+zO;B!_a#Q4uav5)| zKk*mHmgGVHQ0Trh7S$4 zWG^2g5`8&(vtC6OPVhs1=64!mA$%N;d@w$Y9)WYr7~J9~ zRDQHq@)Z2kwBs`frwm0E$V_Q6V;U)Yp^!-+A}K;rl_9a}6_Tr8A*GfyyhLi_Yh*OO zMP}1GWH-MaNcy?bfL1{enu$nhZ!(DImjIed#7WIxRCEYSC#K$fpae zq*th1X+0t>Jo$|xE&NGQ28CHLB>E49Cen)q+rWrm8v?D>rCz8cvLMmgbu2`nDaT7h zc`-bO7*nRpph=;JQv4%&pZqybH#-MikNG+UU&m|f;%klcjy7GOB_CSlL=zF|W?o+l zuXO>DYb`ph@}+r9Bijc4Uh5}*NA1|}s1}JH{T(vRci8~{Hk4>poul)kibCg5ptC5j z84VOzN~Jap^;^VHCZj0ClI|me2}25=Dr8wjr$w+!wRDdNepVtRcUng(Z$z>)rYdlV zOe>Mrw^ehQoiwQz9mV&Gp~OZ@li2hHNsV71(^V%leCF|=5L^F|=l=!aRhldZQ4#`5N!e@omb`)|h1rwe1Mh-o z@XCD(ZwiNdx>UB9gKfeL%qb@p6le4B3ET@F!`*-pHR;6-uVGwu>&Io6UR-dZ^g4D@ ze7kU&;w!n%2AAwQDV^Q8B%;jat6aXq9q>kz&b>zDns9F7dS&xL*_KE+Xgbq(t*v z+%H_>{o$H(AMUzI={w1{9#rM;%>9S(fC6k6JPMnTA%&FFl>bU;t)zMla=T0hC1Wav zV8Q!^Ie(UW{8{cu4y9{9u1Z-oryg}JO6YmbXCAcUv?WEE_pX!on{YzX)NVKO{?>>? zlE=(@or*ADmD>8%*m0>!rMhi9U#Zw~R;p7-IZ`Q2S^*iPEo`7PuRmT2sb(R_fB01T z9K~5!&HLR1`p zNQxmPdNU!_T92c*_&cUVOCp#F|_p`)-5 zpM;&{!N<+OE^!w2DYJN(HV4;?CveYv0?#b@PtjAv6+c5t=~GJbQ>2zss7sz8p5O0i zBKLe5Tpq{oMRdt)c8mpn|49VDBe9h4g7O)()=i_geiB`^<7lm>Kvxc^<_lqR)Nq}xys+lsvC7UV=VBRjGMS&_}i2ya4q zm_(_$j%9|D8A z5kLWspd=?mbRj*a8#%FED2VGsfm92L<9eJ_4(UX$40ufFL=KlTVx$CLE8_V65rHiT z4QN%fD+UJjATYEaej$DE;(Pe|cXE3d{QbJrbmiWjt?>11hOb8xd|fHg4{P8{5%#vL zfy;v$INR01iGu9xTnks1CU|*M!OElX>rZl zf`vsDZr!cG%{!I2a!TF1Y%0u) z;Qo~ogjM((vr`Pf2<)|rn`lNBmbtqne$uj)aO7FMbf ze3CzHpygB>tdV?bQd&BG4kt`Zi(J!D6gDphuJ?x~y)%IU2ht&b+byw2L=i{yu(UN8=}UIF{|m zz;g0Wb~t|8%?=1}&@y4p`h$7caI^>;WpwzN3jBPw8oT+>*nO!QyZ8{vcGK zqmBYCJwy5TTnjExo-bLqs+qS$ps(@GROqq7=r8HBV-8(7JT}q^H zQlf<$t~}nkQ{@`obmFp$9M`7?FWvL*hZP^DmVpC!z=x)N_^6f}9XqBZ+Bane0W#Pq zYYt(#PY|J}bjVc0YPw-*3m+q#Z3NpW-aMmuJ!4vMJYj6l0X#sv1aHS>gHuyDtvrHn?5j6t2a%#q@YTgg`{`%Q!-UV z`l*O8bTly*L6%6g2Il#ZK>sxwP3E-Lh)Qc>t)+AH%>03F6;;+SM0qJo#o1JpP3e0I zoj=jT>kvewMV@=OO-VFgucdYTH9xwQ*QN2>Jlej+RXt>Kmc~baP@+YkMV^0ER1V7& z-(OX!J1vdllQy|Z+edbUG>J}cc&}Dtn8dk~avX00{ntui#tof^sZ^`~p7tFW{B;0v@@~;hysx zE?LjuMA3Md@feQ$UN|I8!!e#B5;KJdQB#!WN!$(_$DQC2+@?6+@RhQ1gSf)Z?G=${ zimVJIzWA_*0!=8K7p01ZV=slc4_BS~lu%#cF*0CSxaQQ0Ydq!#_p7L~+9vx%SS9zF z;%qPH(sHP!Qpb6TXj7;!@?4jcNXr02iF)f1Y0rLD0q71z+RS%|NIRgU`o78awGwHjR0?Y( zjqadCb0xAXDHF!Mn(%p{1v(627SYX~m)?K!-u=(rne+Ta7D=<**LCC`|4-iU4gQ~Gotx(47P@;F7tHD10 z9uHq{#1X0QYu131)-CGa@#pwCxaQKw=Rm(!GdzHQSAHhV*;z6Vm6C>!U=ceFtAtrR zNMgq?^$8sKxpvfXmsBh%Wm-6kM3Pke1c@b2`Q3i0CQJ%0VnHBCAuHA1wBfnHJR=~wG1wl zh;#}`ZbwB@JIWHATWL}U zN)m)l6esdHigX?YIxD;dsq$Vyjfkhr#|H7-!JP=9poau?At+Eo1lrfH1K!^4@DU`X z!-Ep-E)uPkqb)C08-=okRHoxjFxhLQY*VrMcG3=>f!5D z0}s0@I7`I(emU;nE5)r_rMPjU99OPVps!G#uax7=)pDG>Rf$XVjH`DWaqdpN3?lTS z{A#RRFZdHfswu}5WJ&EXT1oLR(miC`@--B1k!V4eWmADRmFRia36euid1kBfs5eNS z^a9H%)xt_H|3!?~a~oN)j>nM|N~E=Yvb?~;Yh-yT#y$gM#>JJAR%9 z#2=AJsn#lbEMqH}md#tJLH$=u^Qn!TYJ=4~xXx{Bd1G0}n~Ld_<%T?)uFng!lI(@l zkiSEs*?&RmTT`r6nLY}&k@}&c%e+}h{4WQj8CQ3>8mm$SpL zf|9&!M;exGPgVT%bBaQimy>^P=Q>$Iu~v1z_hhP^y!A&4v4szapU+gO5#+lrRAM&; zT9C3#$FA4nI6u&*%$iiqQ<<*z@&iqvukhw51B@hR^4h~rKFB(iJYRRFD7#A3xm(He z9nWrsM4}b$g!}GFl3!fpu|>6bh|`;RrL2) z^gcGyIz)iAD72|SkGvFVHWg`Yo5A9J#%uL_X^O3pH?72)@~Y)ZOH0w!rC3-RT{abG zDcxr(%Kf}XQx@b|;4!@RUSf!JFOLz4Rw6C%JYBj_3!VHONZQAIst2#p$d@)`Tg#bV zD1Rp@9g@aTJ@Q>k;~4o36+i(uuIr7>+QKUa1rUz3!$ZyV?L0Bi(Vn9Q0hg$KmY~S zKkqqP!gZGY4maYO z$^Ge+X`I2=K@ueUe0pIs<2~XkJLYmdm-xPtYkiLIbLK%ePRaxy<{fJ0)x&pNaPYRwqSu6dHyW_# zS_5`dn0H-~2(y&*)FztxnbIc{Pigg!E3o-Yxnk2PEyBFTf)NYi>ih;?CA*2sZ;pRXM{u!4tSGwZS8%VG%t8%h(xM$BRfmhE4Kg z*rh&&qg0H^CBK(``Rr`wy@C(%BC;&`{~_ETPvI`8 zm_c>bBh;3Uqpo}mjb$S`W2i43LtTkRZP6$hKs6=0x^NJ+g+r(*7*HbJkk^gIoK7@m zb)Ye`6Ah9poz{jLigj(edBe z)WXlH67II;aIz|gtywYd-_ZvcUMs=1E2Wg^3Y@=8iKa}Szg3Q__iAwcu9nvEkCkhE zv0{x6q+KNvy+ou}tfffnviz?ETD9v%mNk~~b;9y>+$ZTF^ZN|B)+BoQS}l*-7>sBj zdDX(o^%AAlgn0oidaX&gzDegbWn(a3=CPxa|kPVPLb!ID9%4gl$l6$ zc?Ex-6+fqAnFura=hmcoe%h9-lZ53`&QR6-PQ|KS=~%Tl6YCGsKdTGbm zGVHljjeUIB9J)q%zFDsZ5T2$$OZ54QHO1M6;%qCqgp_HAc0Ncu6q25CgYv8*&u(3m zhM7kX?t97hK~?5X{yl3MG(vfnSrNzJAQd{I z#^4$^29JaZ_#{740v(h&ry|jzLbgoAGD~ruMsVg7w@U>N3N~-L0o?B=In;cOPue73 zJBfhwNnNsM5Ro^B7#WnPCTe-EBs#NR1o{`1LtWJI5hY!pQP%Sr<$a$~)h`u3zMyvK zi&iR+A}!TDS|)!(t3L3sWBR+6BR#7PG!)4-1lou&izJV}`rkA1Qj=-ZR1ibRwFtGy zv?kYm0)8EIinKo6w}`Y%1+Pu_t<74POQFt^ z0fsHVAieQD(kRm+)F}&k%`L0xjZP{z^@#A``F#7K$v(#2(M! zL)rDtdSZfBQ(Bh2vPsrcco<|phI_^w+|uXZO7V3~r2r?*z&`#FY!jwo6F*Jiol@9P zf(>jb4z_W#u!-gVm?>COh^?XxxE?hH%g8BMNW_lqePR|NJc4-yr8#^O_d~~F7BY@| zl;=Bv6RKp~4c`%5_Zh}jFHNAYQes4)C57WU#pb#b*D1@FDAAW)Da{m}t0FtDgShNM z!KUDfA>j+?P?ZOX=PmoAf91+mSSz> zOkd#Zg^Lo&b~500u1h+IL@@;^vw525J^7#m$F16N+@c*v@3-U7y*3=W+p5G_>fNgW z(n^?XvE!14%+M-P=B?){6^qg`PL*QA$r8o-6Phe92(w6|AOb1kEN!tGgR)AOX490q zMbf-aqP!xiBB&B+7J((ZD7GTEDpD+xYd`^}B>&f6BsqgpOn9#sWZQr8+4UbkXDDRZ zl>ZQTj35&IA3BCm8`v54%125o<+csQNo^>IZ$TcVHYc_n*)i?N zpv=l_hsh)_%94 zH-dt@;1}2q|9}nz1$H7dNYX|+5gpQw_^>vlMs*-FwhMWQJt#^sW^(L6p~!o3D=L$l zQ5M&P+=xab1=J(Ps}7N#weWYUpg@C82> z$h0XU*G6kZrhmlJTRHUTp7dH@c=LvRQg)ER(XkZkW~I|TdS5!g$_na4=Jqh06- zrFk4KG2?KHlN{&?cqQvKqD7puwHXT|^PV9p|2ZP_pK$pxB68*st|HG`^f^SJTnA;& z@^xCITEsbw;v7}*1hGYuU-uLVl;`9MJpx_(8rco+DA4ba-|`+sQq`kNO5{ZrbJhySBo_NIWNXzf5Yf29`jm9 zmbpz{XNa_sL%opZu@IFu#Mlt!r7VcECdLDl^8Xng&-WAhD9|!fo|sS(2!^~(2z(vNA2ius2TYc)oKPqqe_RAz%xqU z>FFJ++}&s7w0}f)+b>$4w5lKd0qIS@sQS?|2}`o%BiFn}Y_$v;q(oP06+C1xVMqz( znH{PCel}#*LO*`aWTrlmXdgbT<)JMTvv^UGJw=qIG#o|QIc*LPQ)XeGI0ZYA<=98C zk*IJKg*RNpb`)mp6qzlThDZjv$K|_W8fM&g zFLVNTLnd%1XbiUl##EJyn|`D70)5SU2-iI|i56s%lUwdX?Cf&8+aRvEYm=T_;r8pE zax6uM%d$=CIA3z5OiP|I5s|)3`IZuON}wsalD2V9Npla*TTyr|I&g*(b(*4k+KM8r z+IF0@Y{N;mr_?qc^FYs2);S}2%M@|p6h-^wgEpL?fD6Z@)aLzG3Udn%N~D<_xYL5Y zH!026CEBcs^R6qBmQkYyN^d(S$TZcIJ&7=%E>oDMW~@72gtf;CDTi$5qs*Exs)`hb zFzcv6+mfcCi}T9;e-&sofR7k*s>!O6TCorzR)VW!mojXii?YFg{G4urCd|_Q`?hq2 zA<_o_F7S1NocrH(453~s(CRvMei(f|yjYwi>MS!B%7iOwmfTAX*nUN(|8Bqm3iMI4 zcAT*6=HIyor|tT5fga)KXcQMcMsUSv6j$Z5DIGlN=#g9O^xY0+$1m&=?y-}2pCWA) zJ*Otzv6dX^xM?_1svjoIs%hX|Q)R&6EMX@#?HRv=6zPm7@Rz*ljK_%1eS(C-CrB%n za)#3=qC}UMPoug*Ce)cidBr5kD9;sT6R4zASCu_Nb@>#Lb`sT;?RvJ&mE&lu7(r*{ zFuE%H(N*4yjxti(gZ7ec3UxPH3pJT;%9k{YPE=8%%hEb1o1Mr{>Of9HJF?=tkQvv7 z^f-z#Nsgm9$M+#QVE`$KgGf&7M{06E(o*}8nck1QtU=^u_92hLnwQ#*+~iK4L(bEI zEXr{@Wjrma6`9fPifpM_t>;5$$H+{IZAhoACq}d(HoODTkzI(4>`~$z5hZOe1-ci( zVciI%Fb9QpalHpo+#eUMNqq*zJ3o=|`iqh~P@2+7X{N9%+EA9*g0lE#ltfdYLn+X{ z^@wt>LYP~XCeU`JcxYJydy8T`FfW1iePUJu^LwSF0_OLtVPRDZE8BWCo1)aecJ$~F zf%e9#bqlOqt3{&?R#A=(R{jMF_iBnZk!4eXHjPdjEJUs~R!S6FUsfooHc~uHSuD@S zYqet+_Wc!+Y(to3c_|ArEf^xbVqFB5t&PwmTB6m4Nb4+LFV#Q(I!IBmJ{HTQB8aLA zBBl1UGI`5%WLf1^r+%2Wt9cXr zTTEW_7YP@k<_%S%)T_B{l;)GceuA;C;PESY4q=6=M3PSVT}ahXQD>3oZOQm2W%-|5 z;wjE?_)qd5!WOcX+lUDCGN}W;JsHa=(93tFV$J?6$W*Z#j})nCX1AX%#m@5;Y8u)@ zd=QC1pQJ#aF>6re>m>So%|ROkc-65@Ro1xW+J!qrMp-|QDR2Emock%v18@!-fNR7M z+#-kJ86zX?g%Nnhk@zvVb6rN$yGx2j^aPw`WPR)yyb>l5kTQjU)M*5!i}(^sZ%E!8 z!en9`BMbHOvj6cT9*o+_d#KSE@&@Kjanh?Zl7q9J_qU-yoO5 zEYh6a@D@3ZS{`&E#jCjOJxV&>quj`ara)Iyplb&{DS>Vr`h+G5bo1CZHLy?(;Cu8P zJ=4FcL4R8MhLp7XO$o0i&LYKsVC>DG7^fWn9b>{9?Kr^@=>>^41|rJ&P0HpGi71;2 z^n&OL6ki^%$uNbRGXB3u6Xc~65y@25qjyG$gyeU8#>BGDqyZBu_Li8dn9 zTIG)Kl<417b&mPccREr_+9-QBU-FKW_I*Q9k0#RvUD`w}BGj3!TIstqsoWu`84cee zvF9^Xh;qqyeu3c9X9y~Jjv#)1v}p8m_~$-{pNMppM84+Wkv_`@IUnj2 zXURd92=fDp97l=lQhLKDR7Chr;HW}!ku|jCTmIVeoj}Sm_nT3k6?}~ug;=J&R!FX> zKvBLQPPtI>{0MhKr*J2Pt;k0ZB_v>6MV)W@QK0?!zT_4~<)+`Tn(^zl&Kr?lL}=8pydDaUaMf!FmnqQ~-TQIDtq&K8M546_ai5xeNg~BkaYNEFlnh&U z;jCpR&RU2>6HAKl{Wi!1PDd%fBEUz@TXD>+6(`84`;xD$@8kMWndOcGCLH1N;d`wr zE#nYH`GBNm+>$7>)E{18H^pto%!{*0s>NRgno?}Szi&&!zY%FQ z((!LwxlP9qX@h?gDBFTa^uOsCLcLU?jeKZRdDg0M@b_?pzaLc_x|E%cauvDXc23e@ z>agRYKIQkdX6(7ug8lsaNT=o)|Nh5qdU1mCe9Ey8=bR|et|HK5IPX4;OFooncI?#W zEnq@P^tHfoTw|y420M$AyMH%=pLOYcMza$dJ;m=8zh5%_d;DYACp>`@zjLlB?7*fx zWheF-+$Ac_&aFHf;z$yo1Eswy6#zG@OJHRI^08ADgi2zqKq&|5o*-kJgQR1qc9edsDB#l2`R>{00z z4Y^&Y%IZQ{x=a(^jlAS8WG7Ob6MB#q--{HIL~%}_IL9XpA}*f94RU!1aa@m0=tm6Y zIy$-=(NSH9j_5#CcspXEIw`j#q7&g^Z3qi(MMMY*Zf7g`)9n=HP7MliWKbKz16vUq z*p7gJHu(6p!pFZ2et{hn<}L(>_8=&@2LU87xSJx~i|~kEL`8KYA+{G8iG7szZWK|P zi*#u&P3b^+N;}HAT+Z#qiESv3Ye8XTBeFvpknCHl1Ul5U5&<#>(X-!d=I zTwcD$-vklqh4mF8+$x$avP_}o{*@xlYeckVU6*Vn$3c|&V1*&d22xtjRJd126nha1 z0xjtw{}LXrSh9WD+E5~CBqGt8P^*EC3PY&1!HY{-zAi?wjQbZPT4SLGwU$G@AkvaI zous9A=sDC%q?JUcV3mk8S#2WJiv(JPnm3z;+SEpywP?G6l-n~{TGLvh*%}g=7Kzpc zn#&@|^T>X6jE0)(T#!6x5olhQsX$8vT1(wX#((RP=KtCpkN>1FD|z0=mi+fN$}{&X zfu=k!+ev}morbj(=#7W+)U>i&PZU$4i?Q=ug^EHSx>AEvH|ufwZavPKH{r5%t2Wb= z%5m8_2qi%o zj}Vyl2tjF-V~R*n#+-^S2W3A&MD`q_a~>licMcJGvuaSF8XU+QVq_tum@*t)^qfNc z9I?gE5l0eAUm}sjmA*hM1v$3-IpQhFi4`x9Qt=XLGJ|3DD`eHbRid0r@|)hHNYXS& zF(s?0|l3OSDLTWGIS5)F({q=#Srt_d_56p5zL4oX^w zNU=nV=S5Z|*GSV4H2IZdgh7h2f>N%5e*s^|_nQ}Lbq*~;ZHV-b=+&dpk{>M+ty+mj z&rzsn$;_XW=s(nCHmy@y1&>z#K3h~R=|5D3jz)G)8rVrOrG8w}J$^^+s6IWoO81a7 z52N(mS2ZIdm(| zJ96Eg(rnv>(-i1aR%|KGXUJLePMp5qfuodPnY`(+iX3Z_CFvFiDZ__lM!VaTuUk#n zM-EWL4ED>^(btXWvg9&r?7B>8qqvE{Nh>n9?R*Wko|l>Ps<7#d3~tkA(%X2t3L8#V zsEMki*1j?El+@riX5~{QLUlx(4S8NE1Gi1&Sy;ZGlB~v~$iP5Nm`%l5$+KPw<1f-o zG+8Fwxh!y5NiLr~ru@6eug?F)XOv>Ah9=P5zD48Tw(|Al-?s@|{}%$SBGJ5#zX-Ja zEb}QF#d;lRiO%zP$KT;5$rYDUewQfC^83Hqh}{(D-Pc>N`<6`a-HwCi?KmuHHT)bL zv+u!i%JV7bew=d|qa8=u4g>xXMo3HGcMQ@w0e0V3MEfN4OnI!Y3)tlApUDCp?8y>SGmkc29o_&&+4=%93#|Pt>zO%HE|EQKE~VAc1W> zJHd%;GuSCkD}Ic4K11S*pCP&UDN;)wBdhc=@c-GpKY~8e z-!P2+xPbCbG|mC&Wq zGo;L1BIP+Qu8)%3k7$Z>cuYUSBKr|eLMgC;VciG_?V{v%!#}W-(%TLX%CDDy2i$$z z6mGuFY@6Zf*QW52*0%+oK27lQX;yf8X}A+tuNJs?x53r718#op@C@vNPjDZ60%dyj zKKO_9BP4>?BG(bogSgl}WTy5*a1tWrIFgHBw7YgmL_%}Us6*dn~@#Xi1ffZ zBzjjP#p%37W;a5I5)!2s}*6v>>z~qu#?b(NEQ7a zpui9RD}nZ=6dM>Kt)Yb3M4SzgmTjhJ+qj>s;xSqzJCIPGDb*`QqF3v1S=*!PZzR#)~l-=*ZWp_4$3KQS7BiYKWFAy)>sJq&Wjh3_yCDBWx(mJcQE}789gn6MBnf?*R{=Y%eL>401>6B=RW@{-TN|1L^ zo_|OVSuD^ihzt}okjfg%cuqs0mv3jsfc$e?5(PRLKPi&9o`C<}PO09(<)0H(4)jVM zCwb7TD94{YP?}4htCB_0B`;O^x>zO3FAzsr zmZ)-k=~E<>y;KvdBv6>OL4hxkUh@(;^{=rbCIKcjj03tGoSm<_(7^U*hSO@F62YxSTd-+7R7I80F;p5+q@z+|4$1oWZ;hh)R1)*KYk=7#8#`@o3M8}8! zM}htq$UsM(r9v$d-}~gZ`G~j(b&rTNI~qn+5LIGN)vBggQ=mJh|5UVco0jT9p%$rD z{HY;n9}|C|X+qLIepiDJYfVx;Br2^^Jfw!Sn$hqx@+HlKTzeKMo(APzv zJq9VyeK_qxiFO{qxrY=YdoDZl;;cQf?WQ<);nag}9J8Vw+c z6pNtb{vD=~Wz$IWkNCf~P^yWsu3jVP0Egc+ofcBfU;Q;0NkR|`#l;;!n-8dy3E7t)nWy5O> zS3UU|_2Fk)K3n{Z-3g&AhfTvQd`6`tSw!(O86zD~N!OTxecUYUweza4dn+v*8m(z`kj2@Jv_o5)B2U&^TNKfcik!MNMh^L%JM0F!9 zst=*jg9wPAz=rm~FSwgR+euOGgu5R_*QWz6UTtvjXoamu3vAt6U`K3S*t)jD-i`a* zTH)x{LYfs0u1)jH4|%MkOCwvZ^Bnf>ZLs(3fW3D&?Dbh89fAhn6g&vG&|!Fm4#6+1 zA3@>$h)ozka%vAU)BBLiwlK9zMWXYQ+L0IEhQio(6vVV6Q%d(q9qix+#QWDFRwB^e zHHh%8RRZnlSP3`BD!4n>!riqFK0d7o^lwFEXeSb52aps$h}48Zq$CU>DRBfzNu!8P zoWws?Q`bG;r(^AI ztqRBLeKLqJ51Wq`V%y15>^M`7-4`mc?@A30U9ZEbyA8M?k!RaB+;i=LrKD*14ZtB} z7%mYbDjg#zc>>|-Q;5!sQmP{;)smVa(dcmAEJF+C5Xl>7 zbm3FPNd&r*=c|5=)SB0-nnq63TjV#tLm|bvsDt9%`GLaxNzG7LL%FIW^%O0s`)peK zS>-m5N_xguw2ps8hc3+BlHWWnlc;=0pDI^pl&YhQ8sbce9HWemlZjW9W)WxdBT=5B zB>xDJXHB3rnO+d+KPk|9BwD0bqR`KmNHVWOW<6Y(*oD_;AlGSnEHM@6ArbSx;fLaE zut=a6OT9$BRo=9;dRm88{X+&d3Q`$FO6!SC%km%So~2+*bXp&9s9{v{&?;8(|uCK^$|B6hR6;LKU zY5Ii3`VVTVY$3MlJ)$e$LZV16QE6THhi9k!l_>M@p z4 z^bFym8>QK00B0!AXPt*|_TeB-+YjJ`T`!K?^x%YuvqdM4ns?w3Md~0qq^DdQx-GMu zQj#gq`))N-oEz1IO(MqzBFMY1YLYC2{Vf$#drC z@j>hyY$?n336CkvPn0k_QFL zv2!ak5GF}grSexuEPsu762`8fmZS^q*x|i(gHCld&LuPU>viY-Q#djh-Mg|qOAU&cL zsbMWh32i}KV56FiG1^Ziim5}Cj|g-fLOkmb;86>IuSWQKHzGj9nVkknTS-YCMshNR zIe8Q*X=6xEn?!8t6oQiub&3z`US}hW-p`y|z zQEZ7wFRaV5i1Y#yr8dwcT8~Imc2xwMNLq*tU{vrm%T@>cOBMuMgqhoT4h>oVt3;c! zSg0jEMDnIZq^(E4>s|-9wjXHP}!jT2eg7vh@p5X+xYBt^Y`%jrA>y_nBhkRhzPC zzp#zVN@$meG$q)WQBlpRXd=r$5@?yiTqIghDII+MN=mdL zYbAZ+H8QwOklbTM-CG|28tFAMfbS(Th%Bd6zT^#1BFwz`$pjcuwkYQ@{Ih1^M|`tp z2wO=9QG*JfA%-`~c#&|vUg|rmfq-rAQA}|z>!LXKh&X?yIDbanz*jU3eM6H#$&%Rz zHNK%?^s5r*mWeND)5W=G`a60l&Al_fqMxES_~bW=>hBm<@+$erx;Rrb$H@eld`)z5 zo_hCp{BH$XlW0w#|CrB*)&$yQ)6t=$QP0B{DPbgigeo-q|1zg zjUSQH_yI|j==l_nN^Rhw%9k#B&WG^}1WAr`j!b4ERXHBRo!=3c@TK>H);-Mq!!6M4qfI%ZaT7=m=Lh_>x zRqmiLTT-AcV+mW?CRob9$0MhC?6fK&Ch~ko25<#U^1UQ?nzHUaj4N(~xJY?E@6?Yo z4t+S~(2tY$eKB#+d9M&sgXw@)Uw8--H231OK@6876R38I8yOUsFbn&nIXI>~#luu7+xQ%= z8P6%o&*72v0-ia-OMaK0!8eEZ{=Ao}G@W1G3wRT$^Bh_97O|!85mWkB335#7E5w(* zMjWL%x{T{Y>cLB0_?XIf?EJn*SlN4o@-?BQ?+{WZ$Q1Z*5L59M@gz>fyW$n{YhIwa z=_&f!W--)0h0)F_Om<9QqGbeQ%~~{ifCAl1f$pY2w-@)LF|P+znLQ|xfq#i2os{P| zDN9Fjj1_tAMX01`gml3tup1u!-Ej5mg0pug9(s1b!Mz=Ju5EbW+(z+jg@r>i%*b8J z@*TS-+_ohT8gbj2SP2bWHmINH4%hGSxLZ8_reNQKTaInG<|xR!dacK;&89zMN9NjOSQJ30JK4(-E2W$4{$-h4sZkEtNwIxUMiHItV{; z*_34z_<592i$pJGVPK-EOiM&sLk1-NM2Y@seY7spk|V7RI#j5r^j{;>E0jb_I!DqE zS-vHau$6&`I+}1(m<1DgmON`ykzOYIDbNOsL|WvRH!jmjUls-+{#c+5iB{4ruhRv3 z4PU>OVyvRd3JNlhUArd>Yxi=UtlXQ96??e6FB@z2
TR!r$ zCuv5%AYDnJhPM0*1@ZlSG*yu06z-o#1X5p4rFbPaP$s!7By&AgM2s>l+Y@RFGQagknP6n<-A!G=X80` z_RsSoFV`YJQxdN=MX2Y`SjV4BBw3`{fa?i5N}j9VDtV5fJWI+(EWi6v#m^AN`+$gZ zC_C5z8SHqcO~E~73LZ&Q@JyV77dz&jMD8KZ@snhV_m>&C#LsYf299x43g<+MbjlpO zGM^BNboMj&=eUkuLRGdnFg?Z2S7gZCtgb2Y`ew*4&{TVJEOAET*Z4h?m)sIHzxMb#XN z%I1($G=r?-S)>(ClUWM1A^xP5U8l=Q8Fq*qlWix96JUVcLLM_i8inKgSh$Z$#F z=%Fz8Au@*hVh0e-<6{#>5SuuP$b?aZ$B!T^ZUmvRBM6QjK@dAKq2a>_4I4yoXg`8N z`Vbt>Hmn~3p}iU*y$B=$K|Kfv>_%W2(e5ry@Nm}BBAgr=;9yrv>fm5s z4_n(>*il?5Xv(ssZ4K-k>fz!{;eXfy#|QPWme<`Z z!_6DTxN=<+=t~sni|6xj@pLXOQlQVA%)oI<^pV4<6zCM}+n<6x`;xJHUlMjuptqBs zcTuEwQlu%-o3|%n>-I!!-I0t*+I%ILhzHQM(=- zVW;eb$g}4ZPWn)w-6!ym)te)+YEuM^cGae6tlmUX{sF5tnWlPZk!q3XRa+7?DAEQi zw@J8MLgXUSBGg7?It{BNI!&22SSOL`eYuN}bPrQDP^#CnUAHd}YmErANb#PWzh(9R zz^XlY^Q_*RugUa&U8oHx)@$|`Db^m+Bzg^zcCE-XS%0(~8z{CLRCJmWeX?er&8O=Y zpcrq_*($pL#2C2v|3uq^$i_<@D6g{{;Lhx zeXSn5xW4N;&&TE6*JN8Gmz&s9#&1cqx)bMZdvMLAA9uY5Vc|Cn>!2}M1P;S0WK>}h zJOpdD4xtlp2TSsGw9=_I*Z`K(VeK_%ct8F!&WU!`~KBP4TW8{EP}7U)uAL0!8>BEGE(l zrQBBDBl*~TK=xDG2R@^Aka8{3KJtx1tqHp(&=gA|0^LQ?6oKxZ{;nkYe`$&Iy( zexz`Wn}!o5+BIbso*9}%duKg^U+!}R6~0DD@mqwJyrxLMp+vu@L`x+4BVuYkXc8?^ zX#Q<-_`A<%r>U^vR?Rr44n6?4cdn?-Kf6J!<5s??3-ycxu#Pbh&7O&rtYIdTZ@p_J#qJ~#&U zYq`!IJt}46mQxq5+P2}673JBy1!wL`esdF!-wxON4nZ1$NIl zNzc$C%sVN*BG1B(la$~SCHVPx5w?=e#|jmjjur4ZCFJYv0-KKW7|OJ;g|cnRR*`Ut zUQ?)lrku}@gE~`zZNk|~Y?pthpRd903$@sDi4x6cjFM;x+Q>Ma`yDuL*`=op_T$3C zeq3}Jz!i@nT=g2lEuUdL;Il=3H!i{ba0?!Q2V4KJK7>RKA}odi9W{UmN^T^jI#L8W zs+$tsg(#6{%5;>Zb0iEPHhvIs2}4Lu8ADRi2ogy`qR8|JVkp&7al@4CVZ`v9F|k94 zrgTR~4{*I7v2lETTtBzVI`?yXEZ-|Ox({)Y-AIV)LIU42k;{n@or)B$CsE|%!`cxY z+)k-(M?_EuBH2a-wjsj51z`cr2nlRLpkFI|yj$VvNjzHN<=O;KO0=6(Bb*!?;Amfq zhYodcRMabxwxPUQSyxeti6uqayc!RPHRaig;%udanPN?(6?wL{u2&+h1e(X*6DZ4f z%_I_Ch5JO4Z*Hdu-;>s&P6@P`O$+XkJJwAwd(cFAZiKBv1Dsu(Dg7;Qr$jqSZuPxt z3Umbpx)fKh731oaLR_UlU!_1_J)MKgCo^&WR2EL1$iUGf6zIb#*ncn?dk+xGv`F;s zy~)_MCmB10y{XtvY2LXfh07_}zBe7)_GMz*!CdU*=W`!Fe@FN|ILz-Xj;k)Ch&iL-zW2UKX?gh`YGFMP9&@Rd$MYPQYKXKv_r-diljq=ZyCjl8374Gev(`PhVBd{vu6?-a*^fJ3 z{V?|%gjL{(l4(n}w!vdcq8-D>;X;Y_h@XTHACv)nXoh6YAv{<9h5H=Qr7sX){t`)* zuaHvxS_yS}-CJa<2iXVY@}ZW)2U>0mTRzmXo8EH$9oOF>pAWbK5p4=~VaG=lcYh{d zP)Om(!QqoCT7g6v1tfX8d zx`7g{MQw=;%%f;Z+K8lpEKcPx&4rf1g_5!%+kaDtI7`|_@2tGWkmv6TO`yLjR1}&L z-D5)6s0g)2H|1IcyMuz=KJsy%wqc4hMY@FoEl5g-qCEo$$RgQYY&*4xvkW}UX#d9d{;DG9NzGc6Jb{8A z%ML~?uTk1Kk|2?16>Zj#&VzI!gxCrZ=a-12G>1u~nbI6Cg9G!Pz&~piK@{hJv}yPz zPr*BJ3hr@}coULl?E2SU&&JLl|>^L}zaXax7Td^d0u=9DU{5K45|BiKZc z=qcF6Pvc?IG~Ci2!y}y%ogul=FA$jj3c-aM^XVLv=!lA65L5LLan+xA{k*Rcu5%;hDYciT!Q-H;MWIR-vL;9 z_TjEeH*P-c!WFv?oVTJl-*3Wcvu2#Q)1(IQ9lBbNgI8*?kN4=k7bTsd3cD%KyUtc9 zcAPHL(lSm`mdVx=#n>j%}j`XC3=+!KDwE-1;fe1Gr0pw)P)_y?;NP{CnXVASo2R@DJ%nFoiid zoP-S^ghCxA5>2U%iteRAcT=7z($PJLj_so$Q+O%Q@gmMCqex91LR#7|(o#o|k}`ti zthjb$|t{d5LUC2>% zBP+HWX;GaN=?=t(wjn0C4Y4F9sD*8-A~LWUp?=K>_Gv+Y7x8F@pIZ}r+$68K0WJ^g zDbY1>cBp}iLoHk#>)>opk)}X9*jB^QrW*DSs$pwY0~@Pq*gU9#t)x;=a39##>omaH zt_ct9n_j-}$jkrW&_oQnOzAz4nNP^S`Ug>>vY$o615b{)vTj{TX~btDIe&J^L)l?oiW zSdRVY%h{2vz}{=MIAGb1Lk@j7<1vBr-j8tJ=MgUZQJ`0B2%$8GD|yxgI@|MfM%AH#hj&Ptl~ z0gQSMwk&Uv<7M!wf!#VqQTy^fk9rr$%`wYN>H$4jqv{k^cl4!e7 z5;g*d$Wge)Ou&l|OwYIpcqdK4FHHs>KIX&qDZ=xgA-d=VrG-+WkbKFP+@}4@H>!lf zO`=Plb04L<@+IOco+FMVQg{-pDB)GFDC5t`3pG$lM*7F`JVFc~XmMrF)qutX9+O=8 z0%=t{*l8T2eZ+T#B>H#C^Y4`B-%(3(t{MJLw8*m|&{A!qY~U+O`@f>3?RQ@@2qZ zyW})~Kx)G~q)-SGC30Lv$))^7RJ=eWKU2~AKsOQSP<{qOgyQE2;O8%ht*nO^OJr3W zuqK3+y+J4mDSwS1e)dAjUm=|545y4oR=!avnWjXGJR6Z|L88wht`c$9qRv`GIE%8M zLxGk`9#YvOpAuX^X*STJ%!@HnF(k4qWb!;2JWmEiSxDo#g$zn_I;B~V!E?z?pH-Au zq&cxcDphC`QN8u4R442?R zJPa66X&V-Lg^lYD9k^`Uj`P;7IBnj96L)1$UIPx_kU)osbFIodmR#hWl0G5QEa?;! z=bt6rLYL+($3<|9lqAn{I3HWcW}OX(^RVu4u447U96lTK7P0OS#ad_m5;hzz;B!k8 zX{od&NUGw-#Ig&=ZF+Fh zz6WO>_TqwbFRoCc?|2Tv!n+?2eEVSUD^)AH;p*3;@{m1)`ryOPhhK0X0)l%HN{J4S z&?GuCx{vfzo(B;dKZLl%VZ_A`s;F~X#so4m$B~^if$U6@KB>sc5XRNM%&al)A4N{~ zDDrbhQJ6D=B0<)xD%0TCwqxh6URn8bZUl!QwwQ@9a~#squMsuI<_j2wv-%d z>l#=RBSLLuErS%BU`5HcvZ>=S_1sV4wr+wsv9O_l+w=9dO|a(cZRNdOo8j!z2q%Yn zI9gZ3fztipZaK_}*_~3D-z-a~jw2`1aP)W@ z4pXK@sE-^=$Kj*0o`Hi$3EM+Q$k9yfKbnQTM>28fR6b5!D#f|0>Wh*C?*}jPrgmf!hHS?0}DH0Lu}#_~-f&^nta z(wpKGE4L&nkrttr$n<=g$F^jwRJqa%GCg0xgRG-S3nJ8pEE`A)h_F(k*-Hdk+W$Qw z;5s`f&^sy33K43NZ(XECqW9)0iI((^ReLp7?=wm7ph$~E8yF&OD$qYdq*`OKG%NYl zQ4+07v#B_5I>ejV5=5XkNL7(TQtFS_An+XPP2u_0AJ^DGY1eYM1#M8IpbcOYq1L2Y zZdis~D`cPKQmb5V%C!--79v#j)sNPF%gzPxT&5-Z4jptAz zeYFlp@3*Qlc$XZzaMPs+x7~Vi+p`CEeEVS@FbYe4a4myIaX)Z~56K}|2amuuWRy}p z42OtOI7$g2$#G1WhFbz3u!*y9PnzNK6x`w`;YQi^NSK6s;v;w@KZ0lKj5hi{{V{y_ zK=w(O+{tOK&%jTrfMm=efXhK#4$hiW`ITY0j}b2aHZEXWsQnu`yx@tF^4LeDcKj&C<9`z9&RM9|kZ4m(1$rrq<#}luhZcQa5a{oVBhX5ozbH!jzM!b*GYYys zBClPlYkWjT%X?j*-yyM1&(*DxiVf^+QL17q*@-KEi71IUQ<9ZH7d}G}J9$BcltXs- zgaCfF0`i~2KmUo!yH)U*K)xoJ9k(#bbU2Bq5P_y-N|aj{Xwx(f5oi^8raVguMV4NI zE?XkU-IQpBmcFq#!YtCP#WE0Jiu`hBV2nRGnfAc{X@Bs;!g^7|=x4&R)ol-S4c zOrPWTcM6WN<8X+Xgk6M4Gw%%)W;2mv%JNOGLEP{rp8dG))`#2f{kUbcM7CXLd&7gr zcn#sEH}M_DT}t%5kV%+@@gW&Gi3f4BuuFIhr<7-K&v*f^tQWkF7x2rKoak2wD0+?H zlGo}#2N^Z5P{!|kZOa?9wLM2)$1HkVXVB9;i>}5QG}q0bu6`Ev6y)ZXXJ~JIj`pUf zXs1B8H9baS?KJAEr%*#gp3BN-P(*nyD49i0;S^GHrx2gOju?^8W@yu-D7ErC@-hC{m)=AEQW9pp`@`ftKfn_DquJ z&5lbA*eRu0uQg-ujaKZv(}w+&=);zsIQF0mr|o)h-k}GVo%lC*>cc(PKA3y-;DKj1 zCAtqzDjMAbXBo&BK#30SM_@=lf-j?77>e^r))30F z2T+;ahqA0*6lM0IFs&O!sa+K6E)*qqQKY+&7vDvh?xIY0A|t9DsS#~R32#MGXfqNi z(eXjei1cejjBg7fJR1<^B@*3$K=*q1xz@qowO$cG{9NkcO__Fgsv#6>=Q_3$scwK1 zadv5fvs(+rwhfLht#ELmJUcI-B>G`1Z0s9hYYa}L49md82X+m3;LwZ*jud6e@dF1Q zW7`Z{it__W+jt;(&l25k;<}cHZEvrolsMY*Tvk1I+W_7q=*N$uKc-`?I#Xa8@9 z$~>kbZD8a~OWKFQ{A+$r!HTWiwk=g_m#|#e_V=v#nXmgB<|SK2vbE^+%H4Y2^lsi9 z^xSD9PkJp`cR(tHXeIU5nG8Zy6+U#Cl@fVBWf}@~98qX~J0>-uy+P`JlVyIe=U8P?U!p zAB@*M`f!C0iEAEw81ln?*K-JWCCcQ}ueXEzuyeaanF_dizxJ0_V^)h?@nUw0>Ifc}jm47Kq z-D-(0#`3|RB9UlG>u7$bQbI~7TIIc(MAr^})dnw8q6O0dcv2Na(m1qK4o#+2s)ooi z>7`8fu0QJz)lItp|;|K4eh?~y{0PHuRwsz}7wz9o|HOqniwK`E3}1WL5X zGetQdPa>O7;hi}L@2tlPPj=Qkx$c|>GRGGW{I>1r*8Rmk2GD)B(zK#oNC`phcWz zP@Wo?M}bYJ&@KqHHc(JYy;vZNBAsbUmqe=rjFcAm`wD82m7-xG0mrxdf(eZ{#K7oB<(7wn~Ga0jkBcH{a( z-ZP|jup@=)A&;X_U3ZiF$sjJf58$!~?=k*kxE(lw+Z5@$5{-_T#e?|Aa7=m%my{>) zOnV9`br-?ENqo^uBvbIy*(u7ce2$9RXJ~4AhGq(KTjMl38Ya+EKZ&-68MINHTSTCh zM9-j6gjpiZq`v+!w>?Hp%^WH#r%_QkgW~dOinGY`9P&z@AhTc=@f5X))CmM9j>9i* z9G;OB=MX7THv${qAy~4rU`9c_&7bX>T_-MCx8t;V3y$-CdgNvU4qdOqfvb|LQH@>a zD;GtaMVz;uD4{g-IU~8uMz1eK}to$3+ z9LS$%^?ng=iA-zd!`AaTw@&7IPy$^FHMfBX^vMb>c%uh$Byh9!d>yu3sK@q8jo5Lu z3EQtVVdqWC^xak*Fl)n6iw>N$?!tN7K3t|u-{SA)9zP@ZUAtlK)}?a9t-ZQo>(h;g zzPdyQ4Zx2Q?N5mgj2u7+XiA^G8vhH;BsoAynlLqd2!8 zMOi&4&+b80b}uS3yHSzO^|T%oC3c`7z61FrH>Mq#QLRW1Z$?^p3z8|)36$tq|0YEH zH6hZc0pTRrvmSwNbqI2kwgCa|4HW7Ic#AYUQ=*9ovDAP8SXOU_fM>&q7O$oPkC339PwdS!BahAMi5obG! z^g|x+Y)^6L`a=q|6NTKxz8WsJm3U}VNr^5YWq5G61lG5TVR54ncdz8&*2OH`yp)4$ zZMSKDcdV0 z9YossNH&irt0kJPN4lj|%Var^Gi4dy^Cz-Q);IGwQ`z2AN2+=hU@cjvi?pdsE14xKB24DXO>*a@dhEGgkK^~-aM89Km*ih?yy=}M=N!7! z@>xC<&+&nHp8GC3_TsWk)bg+kmmDQ>%?GA^hw3?AqiB~8RCa9FkGlyAwuw&cUvCWsOp{s_*|kKh(ZkxrBwfEvX%4uaHprO4X>&Y{C`iN|B7jZx^q?}`p2|UtlQ^6 z{#47Rw5kZTM59Hbe^)e3{-$VT2SyX~3Z-ZxraS6zDG$=r1UvI18n{M)|rgs&t(i zJSY;)j#75(FUV|>@^yOE4U$BWPN;iJQIyE)D@0JDCHfpHHHu~MTc(zd;E_5FcXrMM zxAbXvWX>vq_NT0gKnE7SL@)(9oB|zDM#-dXO7vL-I<{65Q$wJ&^bIZgYzVX=!&xHH zCM=|5@L2vlhA>N&4k>eIx~%0>e?zK5>KZRZowZVJBJLVSBwVGGup^OF_fCiFyk5!O zmOO5`{z!>7vr`a8aSqCVfq>i>@J@dM_v9Hkv-9e}@1JGpn5r9pU(z!8{k`GdgR5>m zxb8|}bn3A%TCl?Bb^=tayTwidmFa&Z4q< z4z-lvCdzUv#ksY90u6OjsH>eqeeEMO)J{{9r%_uwgX-E@RMpL)rv4eKNoCDblvO@P zQTZJ5OJ|WIb)NHQm7FE<{t}b=2!V+c@QxXU8$11uL4&aM*Cs@{$Dd!s@TznctlMzb zq7|piG;uzBqX7r5)nVV2I#rfy&!rmdI9Gw~XDQ8R%CPOUw#~p>qrBP9J$RiF|i z=pxS}J@s(IDXyQX)=~~B(_1bzVB6J3{49gjDbo9Ix8l%UnFy={XD#^|;OF2vpARy5 z%w6YB{_eVP-@OYKQa70*?daPJSN}eE1`ohHlp-B9gdhrZSp29e6Bv?6QYH~7@H-ov zK7*L7X(Z>)s1%z5%CSgsLGdJViYAa-^a#0yk0{L)YYMcGTRe+A3UnR?x{$|~l}@3u zWD+%{lc*{hM@8W%DoJ_a04fUlP*d24+WcPB=JudEyBk%RU8qd$LPc^HN<^gN+L068 zimb>MWJa`5qMKDTIxeUQF#(N;3ZOvy)+5BL4#6IE2yt&jAPMkjgfB(f(}mLPQVS2t zv&giUQ$0MLnkdvwaC2=|0`213hKDZAct{a;;5J+Cv!hJgQ;r=d#g5Lhza36ebD9!u zFCzW02@Y=Uu;Xj(-8u>e>z$r)E{U&XvczDH=6+9c~Y62_@RT ziV|H8d#f@ySd_!TtQ>ZCOJQ@T2v#=>V17Lhx3A{n=9OIBxRi(M7jtp>LLM%i&&3tC zS1;yMuJdq_+b>?u!+9Qaj!1j{QUR{rs=$qVHMniwfLj&~xM@Rq<~`*WJJ+}6e&8{L z>+EFTU?=;I4?FH&qxi=P%JV9bXB{QbCWtWq9i|e!g3`Q<>%wx8YLVy-BGp=o#|jZ? zwXBKs@{N+}5vR#Cg;otZ)DhX0DjsT`%kya;BHFT#uhCjT7v@cT-NtyH^T#aPC@Cci z%UW8ANVfsyoDwapQc}%zk!Z^FDoXT9L!x(PVdV}<^I-3qtfow_+0CB#9vR%o8-bvg z%3Hf%>Q#$O^Cl#FS)Pw%>msaLU7AfXqR@s!3+st`Ezc(cEvR$o5otAv3?=&S zP-Xgz!H~RZnWCzI7>HcU&5P~&6O>=F;Ur~QkY)WCkz-Z%&lGu|!@Ra5MObsBgp?}Q z9_7vQSQ)nRX1M)gHTFv_(z`75 z3zbuys0J~LNWVdLlgyqd`Ou$G)n*_r~!*Q zCflWS-co@!mFT4cZJO#~)Z6~wAky4LITn5-&8D;wk!MYywRHm{65Yb@HUNlm5#{fkO#Nst_8U7VvTr4*c`K)ggi;S2tZ&)~_UMoDv#tY-}35L;(U|Rc*ThwYDrU2;>_>4Z99=xo2~8~k#sPT=nJ|;3$iS7ErXV& z1Ad7Tec6d3?b1gfAHa35Vchf?#clsln1xJGq9{@l&EP+GG(e{PSto$ z&!dE8KSF%g6k^h+5S~1Vkc0^}8H#7DF#IEyr!inAfj8U=ig6i}G8 zNb$d7X_R^Ke$&_IV-?S5%4)vH0(>?sc{UmUO;!Iz<#WYILtHD*7(Q?0znmK=*6SsR zbFKy(&(~6>>#_ZE19nLjsq1`R+-Xsri;H|dTopmGZ^v!^9`Cwz!kiLq#ow(B1=@}R z?G)4pkFX*5MvfvNau^{o!w8KZr$F=X!p}e;J5mAZkJa1+k?i0k<*`FmIERu_{=Lg) zQC>cy^4E)04*MfT5&!N5B{S?G%^{~`7MV(@XHdk?MP&s$SLKxGvT-tjYEo4)gxZop zG?oscxnuxMMZKuY>qbp>2WrwgQJvO>^5jkwC$u9!woQ|0i9|;X4Q^5b z9qC_>NS}IydrLY;6M{WOpc~*vd??X^pB|MCaBGA=McK!_8D1XE@bG9+l`&k1GjVcn z#Y5sGb*5cKo<*XY;Ygu&c5jCpah3Yhu3YDNoY+33NIUuTz%_6Pu0ca^4;X-#cP~76 zoVyz(-Ie0ZW8En09vV%W>W)a%L>?=6~Wi@A)1bTh236y3NR#TLhGB42U zCH*5zv7F1xdAzhMDA5|>MDnU5RjZg!@zByd<|TRwBFLu8^T;-NjpSJK^(!gP!U~b+ z4cyMwKt%cPFqLb;Gitt0~k5Yj$bXJXX#RK4cGEuSYFq z_oQ~UF$v6%(WF@-*ea^6N3&&lJ!RYw=k+{~V7hFe)yUQ)dVcmrIj4ax)pCPUHz|Qq zZJ=gm6ZG2`$~dV~OqPoPZlk~N2*oX3g~ zigZ~16GTy_r5Z>wB`3Z1Ez;{`VBs$)q-020z0!`4DDV1=%APN%qL5Yf=?s2B&9Icv z(-4^xv6JYi)SGT(EA^_4$x759M4~2SnbAt%8MUNM(?5(#9;V2^M8(s^45@8AMon!_ z$rkC>G0r#7{IukMs7Sb))sZ6q|5TtCMVw6pT0uEB5NS4DZ|8M%JpNN5%clHDq78wT z2(*;5(|*_A^_d92sZ z6{K=OpckrfP*N$ZM)X(eMN2eT)qrl*Cr{z$FO#ynz$eJecrtlQI;CILY5v~`G{sp# z0Z&ozJw&AW`b4Qk-TVm&Y~$)bszHNtjdHz`zpVyGmcCKdECTXh!aM7!n!v;*NsIJK zIXTOaahM0n1QzciadAY zJP~=8+*sj~gQRx!sT|qsE?QpnO>a%2Z~2emZtxf^!^Ysq@4P2F&wfc02&F_tranS+ z#w2319wCl0omeo3=s_kF2aV&&TpYyW=O@^9cIQRuJ{_=!Zvjw2}H5rUGZ5s=F7sEnOSX9t%89mq~@ z3_G}K{JR%Yj>{;}6_pg}s>di|n_n@Dg7P^Ol|N-C>jiRTusN3#OP?XB^f|K2pP;mA z4%O^r)mJ@2WA!66R86A3d=!o4BWNifLQ82snu~i;pWlVr+z!-ccA_@DTScNv6FX6u zprvwTMYSP=0-YAxf|QWf`JCw30Lh7NM3f9JBq0>(K(BfPde$qE4$=o3hH{&U;nfZ=uQrNzD__^51l-%JRZRrr zAJl`O@F9e-LlP<#PlCD-;LGEDDA9b4yH6)vWniaIH{88D)xwPKnQbokg zr5^4QA$O>Qr)@QytxDlyQ3faTQaqqQKe$swN?=J*zJI$EcWxEo?%fiY-!Frec{%LN zE8s{REURE|SqVFf3fNM_?QH8*&bF0(Gw#{9;I=~>Zp-(M!hD6_IpLyJD^6Rt;3V%c zrzqP0ShYS7BGCp*C3>+u%euBr1bPMcuUJQMCd<}^kRS?lu#Txr>+-CKr0AMTv@z)L zuPl>j^=3_$7a@7oeBFu-f~-@B*Yhr(!%jt`6)9J*1b&7O6q0jKWq z=4jT4^Y@!@!J-M5tebJ!wwb-hHr$XNCFS|H3{0dz-{yl@awhM1^s<#@jT0cr00{WGp*>QX!m_YMgJ#M z4M=_JZ>Sskjs{9y^Vn}{Fk&mkuZ;pIW#o*s4@vo0h)Vya1|F)xh&m$3L(c>e=07p? ze1YK?(sDb|s(<`FMyWlKY01ep%IX81cL@aJME z%LbjihR(+#(S*xFyFhu?1X}W>C5=M_`gh8+NVKGJP@)Y)rX{6Agqk!+jx^WnCcdj_ zzGae?8hw_+s^RabP*Z#BgA2Q*tlekiwtrMpd8f;yC=wa2l}ZhiUWr1NyhOM}q1oAz zPFFzooQ5ptK1Oiv6L!R&sa$C(br&Y3@5=ZZ8H8B*nj%hd)&~%30xhNO-XMk@wOGo! zRJTy&;V98+aurH+GCyk)8CEkG%ET%Xp(e&8E7A(aY=$W+B}1ah+Emo36zJ5JubM!! z6^Tx1(NZ@OdEWS@4~XN>8pEG8l0R!CuOXb$98y71rZfkYQ=Y z2^)oH)FeD&r{Iw=4Ugm*xTlcRX?U{3>m?G+j&3*wI;rRha>^d7|3nm2K0zMEIKAvC zQp%rEn4cq~;w94gJxG>PmFx)1c$pY}zY{6Z8D$i23UpJ=B-(2y&{i{!ma0)SR}3qG zZWe(q=|e+d59$lLP@mI-y6kRLr+1(#tpjDr?I?%Lkgk09?x1b8*T&$EF-tjV+& zh1OHeib%=z=~A;Ldi%(1i0x`(mr(yUg!{E3GN2uifgOkr=|X%|7ZPK7kR0Eyh>z|@ zR49)NYEhHN_y+JifxYn5XK3^Y;Ie-=yk&MpnPpLAn&R%^&i8lYwNR#go$BCiR|PMd zD!5x!z@CyU0}k!2Dit=?)v%RPcn+=q+mQn9Py=_q-px*;+A{dD z7M3=3Fte62dri1y)qtz_>u~9w(1>$pbd0<8IC-xb$Ju%P$4Vv88Y|WXK&`K*OjCjl zjMR>$Xb0y?kvjmI>v7 z4Usm`W^NRj{?{0iZOZy3RYlejqvo|BX&0+iBtTo9hue&(fx6+zO;B!_a#Q4uav5)| zKk*mHmgGVHQ0Trh7S$4 zWG^2g5`8&(vtC6OPVhs1=64!mA$%N;d@w$Y9)WYr7~J9~ zRDQHq@)Z2kwBs`frwm0E$V_Q6V;U)Yp^!-+A}K;rl_9a}6_Tr8A*GfyyhLi_Yh*OO zMP}1GWH-MaNcy?bfL1{enu$nhZ!(DImjIed#7WIxRCEYSC#K$fpae zq*th1X+0t>Jo$|xE&NGQ28CHLB>E49Cen)q+rWrm8v?D>rCz8cvLMmgbu2`nDaT7h zc`-bO7*nRpph=;JQv4%&pZqybH#-MikNG+UU&m|f;%klcjy7GOB_CSlL=zF|W?o+l zuXO>DYb`ph@}+r9Bijc4Uh5}*NA1|}s1}JH{T(vRci8~{Hk4>poul)kibCg5ptC5j z84VOzN~Jap^;^VHCZj0ClI|me2}25=Dr8wjr$w+!wRDdNepVtRcUng(Z$z>)rYdlV zOe>Mrw^ehQoiwQz9mV&Gp~OZ@li2hHNsV71(^V%leCF|=5L^F|=l=!aRhldZQ4#`5N!e@omb`)|h1rwe1Mh-o z@XCD(ZwiNdx>UB9gKfeL%qb@p6le4B3ET@F!`*-pHR;6-uVGwu>&Io6UR-dZ^g4D@ ze7kU&;w!n%2AAwQDV^Q8B%;jat6aXq9q>kz&b>zDns9F7dS&xL*_KE+Xgbq(t*v z+%H_>{o$H(AMUzI={w1{9#rM;%>9S(fC6k6JPMnTA%&FFl>bU;t)zMla=T0hC1Wav zV8Q!^Ie(UW{8{cu4y9{9u1Z-oryg}JO6YmbXCAcUv?WEE_pX!on{YzX)NVKO{?>>? zlE=(@or*ADmD>8%*m0>!rMhi9U#Zw~R;p7-IZ`Q2S^*iPEo`7PuRmT2sb(R_fB01T z9K~5!&HLR1`p zNQxmPdNU!_T92c*_&cUVOCp#F|_p`)-5 zpM;&{!N<+OE^!w2DYJN(HV4;?CveYv0?#b@PtjAv6+c5t=~GJbQ>2zss7sz8p5O0i zBKLe5Tpq{oMRdt)c8mpn|49VDBe9h4g7O)()=i_geiB`^<7lm>Kvxc^<_lqR)Nq}xys+lsvC7UV=VBRjGMS&_}i2ya4q zm_(_$j%9|D8A z5kLWspd=?mbRj*a8#%FED2VGsfm92L<9eJ_4(UX$40ufFL=KlTVx$CLE8_V65rHiT z4QN%fD+UJjATYEaej$DE;(Pe|cXE3d{QbJrbmiWjt?>11hOb8xd|fHg4{P8{5%#vL zfy;v$INR01iGu9xTnks1CU|*M!OElX>rZl zf`vsDZr!cG%{!I2a!TF1Y%0u) z;Qo~ogjM((vr`Pf2<)|rn`lNBmbtqne$uj)aO7FMbf ze3CzHpygB>tdV?bQd&BG4kt`Zi(J!D6gDphuJ?x~y)%IU2ht&b+byw2L=i{yu(UN8=}UIF{|m zz;g0Wb~t|8%?=1}&@y4p`h$7caI^>;WpwzN3jBPw8oT+>*nO!QyZ8{vcGK zqmBYCJwy5TTnjExo-bLqs+qS$ps(@GROqq7=r8HBV-8(7JT}q^H zQlf<$t~}nkQ{@`obmFp$9M`7?FWvL*hZP^DmVpC!z=x)N_^6f}9XqBZ+Bane0W#Pq zYYt(#PY|J}bjVc0YPw-*3m+q#Z3NpW-aMmuJ!4vMJYj6l0X#sv1aHS>gHuyDtvrHn?5j6t2a%#q@YTgg`{`%Q!-UV z`l*O8bTly*L6%6g2Il#ZK>sxwP3E-Lh)Qc>t)+AH%>03F6;;+SM0qJo#o1JpP3e0I zoj=jT>kvewMV@=OO-VFgucdYTH9xwQ*QN2>Jlej+RXt>Kmc~baP@+YkMV^0ER1V7& z-(OX!J1vdllQy|Z+edbUG>J}cc&}Dtn8dk~avX00{ntui#tof^sZ^`~p7tFW{B;0v@@~;hysx zE?LjuMA3Md@feQ$UN|I8!!e#B5;KJdQB#!WN!$(_$DQC2+@?6+@RhQ1gSf)Z?G=${ zimVJIzWA_*0!=8K7p01ZV=slc4_BS~lu%#cF*0CSxaQQ0Ydq!#_p7L~+9vx%SS9zF z;%qPH(sHP!Qpb6TXj7;!@?4jcNXr02iF)f1Y0rLD0q71z+RS%|NIRgU`o78awGwHjR0?Y( zjqadCb0xAXDHF!Mn(%p{1v(627SYX~m)?K!-u=(rne+Ta7D=<**LCC`|4-iU4gQ~Gotx(47P@;F7tHD10 z9uHq{#1X0QYu131)-CGa@#pwCxaQKw=Rm(!GdzHQSAHhV*;z6Vm6C>!U=ceFtAtrR zNMgq?^$8sKxpvfXmsBh%Wm-6kM3Pke1c@b2`Q3i0CQJ%0VnHBCAuHA1wBfnHJR=~wG1wl zh;#}`ZbwB@JIWHATWL}U zN)m)l6esdHigX?YIxD;dsq$Vyjfkhr#|H7-!JP=9poau?At+Eo1lrfH1K!^4@DU`X z!-Ep-E)uPkqb)C08-=okRHoxjFxhLQY*VrMcG3=>f!5D z0}s0@I7`I(emU;nE5)r_rMPjU99OPVps!G#uax7=)pDG>Rf$XVjH`DWaqdpN3?lTS z{A#RRFZdHfswu}5WJ&EXT1oLR(miC`@--B1k!V4eWmADRmFRia36euid1kBfs5eNS z^a9H%)xt_H|3!?~a~oN)j>nM|N~E=Yvb?~;Yh-yT#y$gM#>JJAR%9 z#2=AJsn#lbEMqH}md#tJLH$=u^Qn!TYJ=4~xXx{Bd1G0}n~Ld_<%T?)uFng!lI(@l zkiSEs*?&RmTT`r6nLY}&k@}&c%e+}h{4WQj8CQ3>8mm$SpL zf|9&!M;exGPgVT%bBaQimy>^P=Q>$Iu~v1z_hhP^y!A&4v4szapU+gO5#+lrRAM&; zT9C3#$FA4nI6u&*%$iiqQ<<*z@&iqvukhw51B@hR^4h~rKFB(iJYRRFD7#A3xm(He z9nWrsM4}b$g!}GFl3!fpu|>6bh|`;RrL2) z^gcGyIz)iAD72|SkGvFVHWg`Yo5A9J#%uL_X^O3pH?72)@~Y)ZOH0w!rC3-RT{abG zDcxr(%Kf}XQx@b|;4!@RUSf!JFOLz4Rw6C%JYBj_3!VHONZQAIst2#p$d@)`Tg#bV zD1Rp@9g@aTJ@Q>k;~4o36+i(uuIr7>+QKUa1rUz3!$ZyV?L0Bi(Vn9Q0hg$KmY~S zKkqqP!gZGY4maYO z$^Ge+X`I2=K@ueUe0pIs<2~XkJLYmdm-xPtYkiLIbLK%ePRaxy<{fJ0)x&pNaPYRwqSu6dHyW_# zS_5`dn0H-~2(y&*)FztxnbIc{Pigg!E3o-Yxnk2PEyBFTf)NYi>ih;?CA*2sZ;pRXM{u!4tSGwZS8%VG%t8%h(xM$BRfmhE4Kg z*rh&&qg0H^CBK(``Rr`wy@C(%BC;&`{~_ETPvI`8 zm_c>bBh;3Uqpo}mjb$S`W2i43LtTkRZP6$hKs6=0x^NJ+g+r(*7*HbJkk^gIoK7@m zb)Ye`6Ah9poz{jLigj(edBe z)WXlH67II;aIz|gtywYd-_ZvcUMs=1E2Wg^3Y@=8iKa}Szg3Q__iAwcu9nvEkCkhE zv0{x6q+KNvy+ou}tfffnviz?ETD9v%mNk~~b;9y>+$ZTF^ZN|B)+BoQS}l*-7>sBj zdDX(o^%AAlgn0oidaX&gzDegbWn(a3=CPxa|kPVPLb!ID9%4gl$l6$ zc?Ex-6+fqAnFura=hmcoe%h9-lZ53`&QR6-PQ|KS=~%Tl6YCGsKdTGbm zGVHljjeUIB9J)q%zFDsZ5T2$$OZ54QHO1M6;%qCqgp_HAc0Ncu6q25CgYv8*&u(3m zhM7kX?t97hK~?5X{yl3MG(vfnSrNzJAQd{I z#^4$^29JaZ_#{740v(h&ry|jzLbgoAGD~ruMsVg7w@U>N3N~-L0o?B=In;cOPue73 zJBfhwNnNsM5Ro^B7#WnPCTe-EBs#NR1o{`1LtWJI5hY!pQP%Sr<$a$~)h`u3zMyvK zi&iR+A}!TDS|)!(t3L3sWBR+6BR#7PG!)4-1lou&izJV}`rkA1Qj=-ZR1ibRwFtGy zv?kYm0)8EIinKo6w}`Y%1+Pu_t<74POQFt^ z0fsHVAieQD(kRm+)F}&k%`L0xjZP{z^@#A``F#7K$v(#2(M! zL)rDtdSZfBQ(Bh2vPsrcco<|phI_^w+|uXZO7V3~r2r?*z&`#FY!jwo6F*Jiol@9P zf(>jb4z_W#u!-gVm?>COh^?XxxE?hH%g8BMNW_lqePR|NJc4-yr8#^O_d~~F7BY@| zl;=Bv6RKp~4c`%5_Zh}jFHNAYQes4)C57WU#pb#b*D1@FDAAW)Da{m}t0FtDgShNM z!KUDfA>j+?P?ZOX=PmoAf91+mSSz> zOkd#Zg^Lo&b~500u1h+IL@@;^vw525J^7#m$F16N+@c*v@3-U7y*3=W+p5G_>fNgW z(n^?XvE!14%+M-P=B?){6^qg`PL*QA$r8o-6Phe92(w6|AOb1kEN!tGgR)AOX490q zMbf-aqP!xiBB&B+7J((ZD7GTEDpD+xYd`^}B>&f6BsqgpOn9#sWZQr8+4UbkXDDRZ zl>ZQTj35&IA3BCm8`v54%125o<+csQNo^>IZ$TcVHYc_n*)i?N zpv=l_hsh)_%94 zH-dt@;1}2q|9}nz1$H7dNYX|+5gpQw_^>vlMs*-FwhMWQJt#^sW^(L6p~!o3D=L$l zQ5M&P+=xab1=J(Ps}7N#weWYUpg@C82> z$h0XU*G6kZrhmlJTRHUTp7dH@c=LvRQg)ER(XkZkW~I|TdS5!g$_na4=Jqh06- zrFk4KG2?KHlN{&?cqQvKqD7puwHXT|^PV9p|2ZP_pK$pxB68*st|HG`^f^SJTnA;& z@^xCITEsbw;v7}*1hGYuU-uLVl;`9MJpx_(8rco+DA4ba-|`+sQq`kNO5{ZrbJhySBo_NIWNXzf5Yf29`jm9 zmbpz{XNa_sL%opZu@IFu#Mlt!r7VcECdLDl^8Xng&-WAhD9|!fo|sS(2!^~(2z(vNA2ius2TYc)oKPqqe_RAz%xqU z>FFJ++}&s7w0}f)+b>$4w5lKd0qIS@sQS?|2}`o%BiFn}Y_$v;q(oP06+C1xVMqz( znH{PCel}#*LO*`aWTrlmXdgbT<)JMTvv^UGJw=qIG#o|QIc*LPQ)XeGI0ZYA<=98C zk*IJKg*RNpb`)mp6qzlThDZjv$K|_W8fM&g zFLVNTLnd%1XbiUl##EJyn|`D70)5SU2-iI|i56s%lUwdX?Cf&8+aRvEYm=T_;r8pE zax6uM%d$=CIA3z5OiP|I5s|)3`IZuON}wsalD2V9Npla*TTyr|I&g*(b(*4k+KM8r z+IF0@Y{N;mr_?qc^FYs2);S}2%M@|p6h-^wgEpL?fD6Z@)aLzG3Udn%N~D<_xYL5Y zH!026CEBcs^R6qBmQkYyN^d(S$TZcIJ&7=%E>oDMW~@72gtf;CDTi$5qs*Exs)`hb zFzcv6+mfcCi}T9;e-&sofR7k*s>!O6TCorzR)VW!mojXii?YFg{G4urCd|_Q`?hq2 zA<_o_F7S1NocrH(453~s(CRvMei(f|yjYwi>MS!B%7iOwmfTAX*nUN(|8Bqm3iMI4 zcAT*6=HIyor|tT5fga)KXcQMcMsUSv6j$Z5DIGlN=#g9O^xY0+$1m&=?y-}2pCWA) zJ*Otzv6dX^xM?_1svjoIs%hX|Q)R&6EMX@#?HRv=6zPm7@Rz*ljK_%1eS(C-CrB%n za)#3=qC}UMPoug*Ce)cidBr5kD9;sT6R4zASCu_Nb@>#Lb`sT;?RvJ&mE&lu7(r*{ zFuE%H(N*4yjxti(gZ7ec3UxPH3pJT;%9k{YPE=8%%hEb1o1Mr{>Of9HJF?=tkQvv7 z^f-z#Nsgm9$M+#QVE`$KgGf&7M{06E(o*}8nck1QtU=^u_92hLnwQ#*+~iK4L(bEI zEXr{@Wjrma6`9fPifpM_t>;5$$H+{IZAhoACq}d(HoODTkzI(4>`~$z5hZOe1-ci( zVciI%Fb9QpalHpo+#eUMNqq*zJ3o=|`iqh~P@2+7X{N9%+EA9*g0lE#ltfdYLn+X{ z^@wt>LYP~XCeU`JcxYJydy8T`FfW1iePUJu^LwSF0_OLtVPRDZE8BWCo1)aecJ$~F zf%e9#bqlOqt3{&?R#A=(R{jMF_iBnZk!4eXHjPdjEJUs~R!S6FUsfooHc~uHSuD@S zYqet+_Wc!+Y(to3c_|ArEf^xbVqFB5t&PwmTB6m4Nb4+LFV#Q(I!IBmJ{HTQB8aLA zBBl1UGI`5%WLf1^r+%2Wt9cXr zTTEW_7YP@k<_%S%)T_B{l;)GceuA;C;PESY4q=6=M3PSVT}ahXQD>3oZOQm2W%-|5 z;wjE?_)qd5!WOcX+lUDCGN}W;JsHa=(93tFV$J?6$W*Z#j})nCX1AX%#m@5;Y8u)@ zd=QC1pQJ#aF>6re>m>So%|ROkc-65@Ro1xW+J!qrMp-|QDR2Emock%v18@!-fNR7M z+#-kJ86zX?g%Nnhk@zvVb6rN$yGx2j^aPw`WPR)yyb>l5kTQjU)M*5!i}(^sZ%E!8 z!en9`BMbHOvj6cT9*o+_d#KSE@&@Kjanh?Zl7q9J_qU-yoO5 zEYh6a@D@3ZS{`&E#jCjOJxV&>quj`ara)Iyplb&{DS>Vr`h+G5bo1CZHLy?(;Cu8P zJ=4FcL4R8MhLp7XO$o0i&LYKsVC>DG7^fWn9b>{9?Kr^@=>>^41|rJ&P0HpGi71;2 z^n&OL6ki^%$uNbRGXB3u6Xc~65y@25qjyG$gyeU8#>BGDqyZBu_Li8dn9 zTIG)Kl<417b&mPccREr_+9-QBU-FKW_I*Q9k0#RvUD`w}BGj3!TIstqsoWu`84cee zvF9^Xh;qqyeu3c9X9y~Jjv#)1v}p8m_~$-{pNMppM84+Wkv_`@IUnj2 zXURd92=fDp97l=lQhLKDR7Chr;HW}!ku|jCTmIVeoj}Sm_nT3k6?}~ug;=J&R!FX> zKvBLQPPtI>{0MhKr*J2Pt;k0ZB_v>6MV)W@QK0?!zT_4~<)+`Tn(^zl&Kr?lL}=8pydDaUaMf!FmnqQ~-TQIDtq&K8M546_ai5xeNg~BkaYNEFlnh&U z;jCpR&RU2>6HAKl{Wi!1PDd%fBEUz@TXD>+6(`84`;xD$@8kMWndOcGCLH1N;d`wr zE#nYH`GBNm+>$7>)E{18H^pto%!{*0s>NRgno?}Szi&&!zY%FQ z((!LwxlP9qX@h?gDBFTa^uOsCLcLU?jeKZRdDg0M@b_?pzaLc_x|E%cauvDXc23e@ z>agRYKIQkdX6(7ug8lsaNT=o)|Nh5qdU1mCe9Ey8=bR|et|HK5IPX4;OFooncI?#W zEnq@P^tHfoTw|y420M$AyMH%=pLOYcMza$dJ;m=8zh5%_d;DYACp>`@zjLlB?7*fx zWheF-+$Ac_&aFHf;z$yo1Eswy6#zG@OJHRI^08ADgi2zqKq&|5o*-kJgQR1qc9edsDB#l2`R>{00z z4Y^&Y%IZQ{x=a(^jlAS8WG7Ob6MB#q--{HIL~%}_IL9XpA}*f94RU!1aa@m0=tm6Y zIy$-=(NSH9j_5#CcspXEIw`j#q7&g^Z3qi(MMMY*Zf7g`)9n=HP7MliWKbKz16vUq z*p7gJHu(6p!pFZ2et{hn<}L(>_8=&@2LU87xSJx~i|~kEL`8KYA+{G8iG7szZWK|P zi*#u&P3b^+N;}HAT+Z#qiESv3Ye8XTBeFvpknCHl1Ul5U5&<#>(X-!d=I zTwcD$-vklqh4mF8+$x$avP_}o{*@xlYeckVU6*Vn$3c|&V1*&d22xtjRJd126nha1 z0xjtw{}LXrSh9WD+E5~CBqGt8P^*EC3PY&1!HY{-zAi?wjQbZPT4SLGwU$G@AkvaI zous9A=sDC%q?JUcV3mk8S#2WJiv(JPnm3z;+SEpywP?G6l-n~{TGLvh*%}g=7Kzpc zn#&@|^T>X6jE0)(T#!6x5olhQsX$8vT1(wX#((RP=KtCpkN>1FD|z0=mi+fN$}{&X zfu=k!+ev}morbj(=#7W+)U>i&PZU$4i?Q=ug^EHSx>AEvH|ufwZavPKH{r5%t2Wb= z%5m8_2qi%o zj}Vyl2tjF-V~R*n#+-^S2W3A&MD`q_a~>licMcJGvuaSF8XU+QVq_tum@*t)^qfNc z9I?gE5l0eAUm}sjmA*hM1v$3-IpQhFi4`x9Qt=XLGJ|3DD`eHbRid0r@|)hHNYXS& zF(s?0|l3OSDLTWGIS5)F({q=#Srt_d_56p5zL4oX^w zNU=nV=S5Z|*GSV4H2IZdgh7h2f>N%5e*s^|_nQ}Lbq*~;ZHV-b=+&dpk{>M+ty+mj z&rzsn$;_XW=s(nCHmy@y1&>z#K3h~R=|5D3jz)G)8rVrOrG8w}J$^^+s6IWoO81a7 z52N(mS2ZIdm(| zJ96Eg(rnv>(-i1aR%|KGXUJLePMp5qfuodPnY`(+iX3Z_CFvFiDZ__lM!VaTuUk#n zM-EWL4ED>^(btXWvg9&r?7B>8qqvE{Nh>n9?R*Wko|l>Ps<7#d3~tkA(%X2t3L8#V zsEMki*1j?El+@riX5~{QLUlx(4S8NE1Gi1&Sy;ZGlB~v~$iP5Nm`%l5$+KPw<1f-o zG+8Fwxh!y5NiLr~ru@6eug?F)XOv>Ah9=P5zD48Tw(|Al-?s@|{}%$SBGJ5#zX-Ja zEb}QF#d;lRiO%zP$KT;5$rYDUewQfC^83Hqh}{(D-Pc>N`<6`a-HwCi?KmuHHT)bL zv+u!i%JV7bew=d|qa8=u4g>xXMo3HGcMQ@w0e0V3MEfN4OnI!Y3)tlApUDCp?8y>SGmkc29o_&&+4=%93#|Pt>zO%HE|EQKE~VAc1W> zJHd%;GuSCkD}Ic4K11S*pCP&UDN;)wBdhc=@c-GpKY~8e z-!P2+xPbCbG|mC&Wq zGo;L1BIP+Qu8)%3k7$Z>cuYUSBKr|eLMgC;VciG_?V{v%!#}W-(%TLX%CDDy2i$$z z6mGuFY@6Zf*QW52*0%+oK27lQX;yf8X}A+tuNJs?x53r718#op@C@vNPjDZ60%dyj zKKO_9BP4>?BG(bogSgl}WTy5*a1tWrIFgHBw7YgmL_%}Us6*dn~@#Xi1ffZ zBzjjP#p%37W;a5I5)!2s}*6v>>z~qu#?b(NEQ7a zpui9RD}nZ=6dM>Kt)Yb3M4SzgmTjhJ+qj>s;xSqzJCIPGDb*`QqF3v1S=*!PZzR#)~l-=*ZWp_4$3KQS7BiYKWFAy)>sJq&Wjh3_yCDBWx(mJcQE}789gn6MBnf?*R{=Y%eL>401>6B=RW@{-TN|1L^ zo_|OVSuD^ihzt}okjfg%cuqs0mv3jsfc$e?5(PRLKPi&9o`C<}PO09(<)0H(4)jVM zCwb7TD94{YP?}4htCB_0B`;O^x>zO3FAzsr zmZ)-k=~E<>y;KvdBv6>OL4hxkUh@(;^{=rbCIKcjj03tGoSm<_(7^U*hSO@F62YxSTd-+7R7I80F;p5+q@z+|4$1oWZ;hh)R1)*KYk=7#8#`@o3M8}8! zM}htq$UsM(r9v$d-}~gZ`G~j(b&rTNI~qn+5LIGN)vBggQ=mJh|5UVco0jT9p%$rD z{HY;n9}|C|X+qLIepiDJYfVx;Br2^^Jfw!Sn$hqx@+HlKTzeKMo(APzv zJq9VyeK_qxiFO{qxrY=YdoDZl;;cQf?WQ<);nag}9J8Vw+c z6pNtb{vD=~Wz$IWkNCf~P^yWsu3jVP0Egc+ofcBfU;Q;0NkR|`#l;;!n-8dy3E7t)nWy5O> zS3UU|_2Fk)K3n{Z-3g&AhfTvQd`6`tSw!(O86zD~N!OTxecUYUweza4dn+v*8m(z`kj2@Jv_o5)B2U&^TNKfcik!MNMh^L%JM0F!9 zst=*jg9wPAz=rm~FSwgR+euOGgu5R_*QWz6UTtvjXoamu3vAt6U`K3S*t)jD-i`a* zTH)x{LYfs0u1)jH4|%MkOCwvZ^Bnf>ZLs(3fW3D&?Dbh89fAhn6g&vG&|!Fm4#6+1 zA3@>$h)ozka%vAU)BBLiwlK9zMWXYQ+L0IEhQio(6vVV6Q%d(q9qix+#QWDFRwB^e zHHh%8RRZnlSP3`BD!4n>!riqFK0d7o^lwFEXeSb52aps$h}48Zq$CU>DRBfzNu!8P zoWws?Q`bG;r(^AI ztqRBLeKLqJ51Wq`V%y15>^M`7-4`mc?@A30U9ZEbyA8M?k!RaB+;i=LrKD*14ZtB} z7%mYbDjg#zc>>|-Q;5!sQmP{;)smVa(dcmAEJF+C5Xl>7 zbm3FPNd&r*=c|5=)SB0-nnq63TjV#tLm|bvsDt9%`GLaxNzG7LL%FIW^%O0s`)peK zS>-m5N_xguw2ps8hc3+BlHWWnlc;=0pDI^pl&YhQ8sbce9HWemlZjW9W)WxdBT=5B zB>xDJXHB3rnO+d+KPk|9BwD0bqR`KmNHVWOW<6Y(*oD_;AlGSnEHM@6ArbSx;fLaE zut=a6OT9$BRo=9;dRm88{X+&d3Q`$FO6!SC%km%So~2+*bXp&9s9{v{&?;8(|uCK^$|B6hR6;LKU zY5Ii3`VVTVY$3MlJ)$e$LZV16QE6THhi9k!l_>M@p z4 z^bFym8>QK00B0!AXPt*|_TeB-+YjJ`T`!K?^x%YuvqdM4ns?w3Md~0qq^DdQx-GMu zQj#gq`))N-oEz1IO(MqzBFMY1YLYC2{Vf$#drC z@j>hyY$?n336CkvPn0k_QFL zv2!ak5GF}grSexuEPsu762`8fmZS^q*x|i(gHCld&LuPU>viY-Q#djh-Mg|qOAU&cL zsbMWh32i}KV56FiG1^Ziim5}Cj|g-fLOkmb;86>IuSWQKHzGj9nVkknTS-YCMshNR zIe8Q*X=6xEn?!8t6oQiub&3z`US}hW-p`y|z zQEZ7wFRaV5i1Y#yr8dwcT8~Imc2xwMNLq*tU{vrm%T@>cOBMuMgqhoT4h>oVt3;c! zSg0jEMDnIZq^(E4>s|-9wjXHP}!jT2eg7vh@p5X+xYBt^Y`%jrA>y_nBhkRhzPC zzp#zVN@$meG$q)WQBlpRXd=r$5@?yiTqIghDII+MN=mdL zYbAZ+H8QwOklbTM-CG|28tFAMfbS(Th%Bd6zT^#1BFwz`$pjcuwkYQ@{Ih1^M|`tp z2wO=9QG*JfA%-`~c#&|vUg|rmfq-rAQA}|z>!LXKh&X?yIDbanz*jU3eM6H#$&%Rz zHNK%?^s5r*mWeND)5W=G`a60l&Al_fqMxES_~bW=>hBm<@+$erx;Rrb$H@eld`)z5 zo_hCp{BH$XlW0w#|CrB*)&$yQ)6t=$QP0B{DPbgigeo-q|1zg zjUSQH_yI|j==l_nN^Rhw%9k#B&WG^}1WAr`j!b4ERXHBRo!=3c@TK>H);-Mq!!6M4qfI%ZaT7=m=Lh_>x zRqmiLTT-AcV+mW?CRob9$0MhC?6fK&Ch~ko25<#U^1UQ?nzHUaj4N(~xJY?E@6?Yo z4t+S~(2tY$eKB#+d9M&sgXw@)Uw8--H231OK@6876R38I8yOUsFbn&nIXI>~#luu7+xQ%= z8P6%o&*72v0-ia-OMaK0!8eEZ{=Ao}G@W1G3wRT$^Bh_97O|!85mWkB335#7E5w(* zMjWL%x{T{Y>cLB0_?XIf?EJn*SlN4o@-?BQ?+{WZ$Q1Z*5L59M@gz>fyW$n{YhIwa z=_&f!W--)0h0)F_Om<9QqGbeQ%~~{ifCAl1f$pY2w-@)LF|P+znLQ|xfq#i2os{P| zDN9Fjj1_tAMX01`gml3tup1u!-Ej5mg0pug9(s1b!Mz=Ju5EbW+(z+jg@r>i%*b8J z@*TS-+_ohT8gbj2SP2bWHmINH4%hGSxLZ8_reNQKTaInG<|xR!dacK;&89zMN9NjOSQJ30JK4(-E2W$4{$-h4sZkEtNwIxUMiHItV{; z*_34z_<592i$pJGVPK-EOiM&sLk1-NM2Y@seY7spk|V7RI#j5r^j{;>E0jb_I!DqE zS-vHau$6&`I+}1(m<1DgmON`ykzOYIDbNOsL|WvRH!jmjUls-+{#c+5iB{4ruhRv3 z4PU>OVyvRd3JNlhUArd>Yxi=UtlXQ96??e6FB@z2
^xP5U8l=Q8Fq*qlWix96JUVcLLM_i8inKgSh$Z$#F z=%Fz8Au@*hVh0e-<6{#>5SuuP$b?aZ$B!T^ZUmvRBM6QjK@dAKq2a>_4I4yoXg`8N z`Vbt>Hmn~3p}iU*y$B=$K|Kfv>_%W2(e5ry@Nm}BBAgr=;9yrv>fm5s z4_n(>*il?5Xv(ssZ4K-k>fz!{;eXfy#|QPWme<`Z z!_6DTxN=<+=t~sni|6xj@pLXOQlQVA%)oI<^pV4<6zCM}+n<6x`;xJHUlMjuptqBs zcTuEwQlu%-o3|%n>-I!!-I0t*+I%ILhzHQM(=- zVW;eb$g}4ZPWn)w-6!ym)te)+YEuM^cGae6tlmUX{sF5tnWlPZk!q3XRa+7?DAEQi zw@J8MLgXUSBGg7?It{BNI!&22SSOL`eYuN}bPrQDP^#CnUAHd}YmErANb#PWzh(9R zz^XlY^Q_*RugUa&U8oHx)@$|`Db^m+Bzg^zcCE-XS%0(~8z{CLRCJmWeX?er&8O=Y zpcrq_*($pL#2C2v|3uq^$i_<@D6g{{;Lhx zeXSn5xW4N;&&TE6*JN8Gmz&s9#&1cqx)bMZdvMLAA9uY5Vc|Cn>!2}M1P;S0WK>}h zJOpdD4xtlp2TSsGw9=_I*Z`K(VeK_%ct8F!&WU!`~KBP4TW8{EP}7U)uAL0!8>BEGE(l zrQBBDBl*~TK=xDG2R@^Aka8{3KJtx1tqHp(&=gA|0^LQ?6oKxZ{;nkYe`$&Iy( zexz`Wn}!o5+BIbso*9}%duKg^U+!}R6~0DD@mqwJyrxLMp+vu@L`x+4BVuYkXc8?^ zX#Q<-_`A<%r>U^vR?Rr44n6?4cdn?-Kf6J!<5s??3-ycxu#Pbh&7O&rtYIdTZ@p_J#qJ~#&U zYq`!IJt}46mQxq5+P2}673JBy1!wL`esdF!-wxON4nZ1$NIl zNzc$C%sVN*BG1B(la$~SCHVPx5w?=e#|jmjjur4ZCFJYv0-KKW7|OJ;g|cnRR*`Ut zUQ?)lrku}@gE~`zZNk|~Y?pthpRd903$@sDi4x6cjFM;x+Q>Ma`yDuL*`=op_T$3C zeq3}Jz!i@nT=g2lEuUdL;Il=3H!i{ba0?!Q2V4KJK7>RKA}odi9W{UmN^T^jI#L8W zs+$tsg(#6{%5;>Zb0iEPHhvIs2}4Lu8ADRi2ogy`qR8|JVkp&7al@4CVZ`v9F|k94 zrgTR~4{*I7v2lETTtBzVI`?yXEZ-|Ox({)Y-AIV)LIU42k;{n@or)B$CsE|%!`cxY z+)k-(M?_EuBH2a-wjsj51z`cr2nlRLpkFI|yj$VvNjzHN<=O;KO0=6(Bb*!?;Amfq zhYodcRMabxwxPUQSyxeti6uqayc!RPHRaig;%udanPN?(6?wL{u2&+h1e(X*6DZ4f z%_I_Ch5JO4Z*Hdu-;>s&P6@P`O$+XkJJwAwd(cFAZiKBv1Dsu(Dg7;Qr$jqSZuPxt z3Umbpx)fKh731oaLR_UlU!_1_J)MKgCo^&WR2EL1$iUGf6zIb#*ncn?dk+xGv`F;s zy~)_MCmB10y{XtvY2LXfh07_}zBe7)_GMz*!CdU*=W`!Fe@FN|ILz-Xj;k)Ch&iL-zW2UKX?gh`YGFMP9&@Rd$MYPQYKXKv_r-diljq=ZyCjl8374Gev(`PhVBd{vu6?-a*^fJ3 z{V?|%gjL{(l4(n}w!vdcq8-D>;X;Y_h@XTHACv)nXoh6YAv{<9h5H=Qr7sX){t`)* zuaHvxS_yS}-CJa<2iXVY@}ZW)2U>0mTRzmXo8EH$9oOF>pAWbK5p4=~VaG=lcYh{d zP)Om(!QqoCT7g6v1tfX8d zx`7g{MQw=;%%f;Z+K8lpEKcPx&4rf1g_5!%+kaDtI7`|_@2tGWkmv6TO`yLjR1}&L z-D5)6s0g)2H|1IcyMuz=KJsy%wqc4hMY@FoEl5g-qCEo$$RgQYY&*4xvkW}UX#d9d{;DG9NzGc6Jb{8A z%ML~?uTk1Kk|2?16>Zj#&VzI!gxCrZ=a-12G>1u~nbI6Cg9G!Pz&~piK@{hJv}yPz zPr*BJ3hr@}coULl?E2SU&&JLl|>^L}zaXax7Td^d0u=9DU{5K45|BiKZc z=qcF6Pvc?IG~Ci2!y}y%ogul=FA$jj3c-aM^XVLv=!lA65L5LLan+xA{k*Rcu5%;hDYciT!Q-H;MWIR-vL;9 z_TjEeH*P-c!WFv?oVTJl-*3Wcvu2#Q)1(IQ9lBbNgI8*?kN4=k7bTsd3cD%KyUtc9 zcAPHL(lSm`mdVx=#n>j%}j`XC3=+!KDwE-1;fe1Gr0pw)P)_y?;NP{CnXVASo2R@DJ%nFoiid zoP-S^ghCxA5>2U%iteRAcT=7z($PJLj_so$Q+O%Q@gmMCqex91LR#7|(o#o|k}`ti zthjb$|t{d5LUC2>% zBP+HWX;GaN=?=t(wjn0C4Y4F9sD*8-A~LWUp?=K>_Gv+Y7x8F@pIZ}r+$68K0WJ^g zDbY1>cBp}iLoHk#>)>opk)}X9*jB^QrW*DSs$pwY0~@Pq*gU9#t)x;=a39##>omaH zt_ct9n_j-}$jkrW&_oQnOzAz4nNP^S`Ug>>vY$o615b{)vTj{TX~btDIe&J^L)l?oiW zSdRVY%h{2vz}{=MIAGb1Lk@j7<1vBr-j8tJ=MgUZQJ`0B2%$8GD|yxgI@|MfM%AH#hj&Ptl~ z0gQSMwk&Uv<7M!wf!#VqQTy^fk9rr$%`wYN>H$4jqv{k^cl4!e7 z5;g*d$Wge)Ou&l|OwYIpcqdK4FHHs>KIX&qDZ=xgA-d=VrG-+WkbKFP+@}4@H>!lf zO`=Plb04L<@+IOco+FMVQg{-pDB)GFDC5t`3pG$lM*7F`JVFc~XmMrF)qutX9+O=8 z0%=t{*l8T2eZ+T#B>H#C^Y4`B-%(3(t{MJLw8*m|&{A!qY~U+O`@f>3?RQ@@2qZ zyW})~Kx)G~q)-SGC30Lv$))^7RJ=eWKU2~AKsOQSP<{qOgyQE2;O8%ht*nO^OJr3W zuqK3+y+J4mDSwS1e)dAjUm=|545y4oR=!avnWjXGJR6Z|L88wht`c$9qRv`GIE%8M zLxGk`9#YvOpAuX^X*STJ%!@HnF(k4qWb!;2JWmEiSxDo#g$zn_I;B~V!E?z?pH-Au zq&cxcDphC`QN8u4R442?R zJPa66X&V-Lg^lYD9k^`Uj`P;7IBnj96L)1$UIPx_kU)osbFIodmR#hWl0G5QEa?;! z=bt6rLYL+($3<|9lqAn{I3HWcW}OX(^RVu4u447U96lTK7P0OS#ad_m5;hzz;B!k8 zX{od&NUGw-#Ig&=ZF+Fh zz6WO>_TqwbFRoCc?|2Tv!n+?2eEVSUD^)AH;p*3;@{m1)`ryOPhhK0X0)l%HN{J4S z&?GuCx{vfzo(B;dKZLl%VZ_A`s;F~X#so4m$B~^if$U6@KB>sc5XRNM%&al)A4N{~ zDDrbhQJ6D=B0<)xD%0TCwqxh6URn8bZUl!QwwQ@9a~#squMsuI<_j2wv-%d z>l#=RBSLLuErS%BU`5HcvZ>=S_1sV4wr+wsv9O_l+w=9dO|a(cZRNdOo8j!z2q%Yn zI9gZ3fztipZaK_}*_~3D-z-a~jw2`1aP)W@ z4pXK@sE-^=$Kj*0o`Hi$3EM+Q$k9yfKbnQTM>28fR6b5!D#f|0>Wh*C?*}jPrgmf!hHS?0}DH0Lu}#_~-f&^nta z(wpKGE4L&nkrttr$n<=g$F^jwRJqa%GCg0xgRG-S3nJ8pEE`A)h_F(k*-Hdk+W$Qw z;5s`f&^sy33K43NZ(XECqW9)0iI((^ReLp7?=wm7ph$~E8yF&OD$qYdq*`OKG%NYl zQ4+07v#B_5I>ejV5=5XkNL7(TQtFS_An+XPP2u_0AJ^DGY1eYM1#M8IpbcOYq1L2Y zZdis~D`cPKQmb5V%C!--79v#j)sNPF%gzPxT&5-Z4jptAz zeYFlp@3*Qlc$XZzaMPs+x7~Vi+p`CEeEVS@FbYe4a4myIaX)Z~56K}|2amuuWRy}p z42OtOI7$g2$#G1WhFbz3u!*y9PnzNK6x`w`;YQi^NSK6s;v;w@KZ0lKj5hi{{V{y_ zK=w(O+{tOK&%jTrfMm=efXhK#4$hiW`ITY0j}b2aHZEXWsQnu`yx@tF^4LeDcKj&C<9`z9&RM9|kZ4m(1$rrq<#}luhZcQa5a{oVBhX5ozbH!jzM!b*GYYys zBClPlYkWjT%X?j*-yyM1&(*DxiVf^+QL17q*@-KEi71IUQ<9ZH7d}G}J9$BcltXs- zgaCfF0`i~2KmUo!yH)U*K)xoJ9k(#bbU2Bq5P_y-N|aj{Xwx(f5oi^8raVguMV4NI zE?XkU-IQpBmcFq#!YtCP#WE0Jiu`hBV2nRGnfAc{X@Bs;!g^7|=x4&R)ol-S4c zOrPWTcM6WN<8X+Xgk6M4Gw%%)W;2mv%JNOGLEP{rp8dG))`#2f{kUbcM7CXLd&7gr zcn#sEH}M_DT}t%5kV%+@@gW&Gi3f4BuuFIhr<7-K&v*f^tQWkF7x2rKoak2wD0+?H zlGo}#2N^Z5P{!|kZOa?9wLM2)$1HkVXVB9;i>}5QG}q0bu6`Ev6y)ZXXJ~JIj`pUf zXs1B8H9baS?KJAEr%*#gp3BN-P(*nyD49i0;S^GHrx2gOju?^8W@yu-D7ErC@-hC{m)=AEQW9pp`@`ftKfn_DquJ z&5lbA*eRu0uQg-ujaKZv(}w+&=);zsIQF0mr|o)h-k}GVo%lC*>cc(PKA3y-;DKj1 zCAtqzDjMAbXBo&BK#30SM_@=lf-j?77>e^r))30F z2T+;ahqA0*6lM0IFs&O!sa+K6E)*qqQKY+&7vDvh?xIY0A|t9DsS#~R32#MGXfqNi z(eXjei1cejjBg7fJR1<^B@*3$K=*q1xz@qowO$cG{9NkcO__Fgsv#6>=Q_3$scwK1 zadv5fvs(+rwhfLht#ELmJUcI-B>G`1Z0s9hYYa}L49md82X+m3;LwZ*jud6e@dF1Q zW7`Z{it__W+jt;(&l25k;<}cHZEvrolsMY*Tvk1I+W_7q=*N$uKc-`?I#Xa8@9 z$~>kbZD8a~OWKFQ{A+$r!HTWiwk=g_m#|#e_V=v#nXmgB<|SK2vbE^+%H4Y2^lsi9 z^xSD9PkJp`cR(tHXeIU5nG8Zy6+U#Cl@fVBWf}@~98qX~J0>-uy+P`JlVyIe=U8P?U!p zAB@*M`f!C0iEAEw81ln?*K-JWCCcQ}ueXEzuyeaanF_dizxJ0_V^)h?@nUw0>Ifc}jm47Kq z-D-(0#`3|RB9UlG>u7$bQbI~7TIIc(MAr^})dnw8q6O0dcv2Na(m1qK4o#+2s)ooi z>7`8fu0QJz)lItp|;|K4eh?~y{0PHuRwsz}7wz9o|HOqniwK`E3}1WL5X zGetQdPa>O7;hi}L@2tlPPj=Qkx$c|>GRGGW{I>1r*8Rmk2GD)B(zK#oNC`phcWz zP@Wo?M}bYJ&@KqHHc(JYy;vZNBAsbUmqe=rjFcAm`wD82m7-xG0mrxdf(eZ{#K7oB<(7wn~Ga0jkBcH{a( z-ZP|jup@=)A&;X_U3ZiF$sjJf58$!~?=k*kxE(lw+Z5@$5{-_T#e?|Aa7=m%my{>) zOnV9`br-?ENqo^uBvbIy*(u7ce2$9RXJ~4AhGq(KTjMl38Ya+EKZ&-68MINHTSTCh zM9-j6gjpiZq`v+!w>?Hp%^WH#r%_QkgW~dOinGY`9P&z@AhTc=@f5X))CmM9j>9i* z9G;OB=MX7THv${qAy~4rU`9c_&7bX>T_-MCx8t;V3y$-CdgNvU4qdOqfvb|LQH@>a zD;GtaMVz;uD4{g-IU~8uMz1eK}to$3+ z9LS$%^?ng=iA-zd!`AaTw@&7IPy$^FHMfBX^vMb>c%uh$Byh9!d>yu3sK@q8jo5Lu z3EQtVVdqWC^xak*Fl)n6iw>N$?!tN7K3t|u-{SA)9zP@ZUAtlK)}?a9t-ZQo>(h;g zzPdyQ4Zx2Q?N5mgj2u7+XiA^G8vhH;BsoAynlLqd2!8 zMOi&4&+b80b}uS3yHSzO^|T%oC3c`7z61FrH>Mq#QLRW1Z$?^p3z8|)36$tq|0YEH zH6hZc0pTRrvmSwNbqI2kwgCa|4HW7Ic#AYUQ=*9ovDAP8SXOU_fM>&q7O$oPkC339PwdS!BahAMi5obG! z^g|x+Y)^6L`a=q|6NTKxz8WsJm3U}VNr^5YWq5G61lG5TVR54ncdz8&*2OH`yp)4$ zZMSKDcdV0 z9YossNH&irt0kJPN4lj|%Var^Gi4dy^Cz-Q);IGwQ`z2AN2+=hU@cjvi?pdsE14xKB24DXO>*a@dhEGgkK^~-aM89Km*ih?yy=}M=N!7! z@>xC<&+&nHp8GC3_TsWk)bg+kmmDQ>%?GA^hw3?AqiB~8RCa9FkGlyAwuw&cUvCWsOp{s_*|kKh(ZkxrBwfEvX%4uaHprO4X>&Y{C`iN|B7jZx^q?}`p2|UtlQ^6 z{#47Rw5kZTM59Hbe^)e3{-$VT2SyX~3Z-ZxraS6zDG$=r1UvI18n{M)|rgs&t(i zJSY;)j#75(FUV|>@^yOE4U$BWPN;iJQIyE)D@0JDCHfpHHHu~MTc(zd;E_5FcXrMM zxAbXvWX>vq_NT0gKnE7SL@)(9oB|zDM#-dXO7vL-I<{65Q$wJ&^bIZgYzVX=!&xHH zCM=|5@L2vlhA>N&4k>eIx~%0>e?zK5>KZRZowZVJBJLVSBwVGGup^OF_fCiFyk5!O zmOO5`{z!>7vr`a8aSqCVfq>i>@J@dM_v9Hkv-9e}@1JGpn5r9pU(z!8{k`GdgR5>m zxb8|}bn3A%TCl?Bb^=tayTwidmFa&Z4q< z4z-lvCdzUv#ksY90u6OjsH>eqeeEMO)J{{9r%_uwgX-E@RMpL)rv4eKNoCDblvO@P zQTZJ5OJ|WIb)NHQm7FE<{t}b=2!V+c@QxXU8$11uL4&aM*Cs@{$Dd!s@TznctlMzb zq7|piG;uzBqX7r5)nVV2I#rfy&!rmdI9Gw~XDQ8R%CPOUw#~p>qrBP9J$RiF|i z=pxS}J@s(IDXyQX)=~~B(_1bzVB6J3{49gjDbo9Ix8l%UnFy={XD#^|;OF2vpARy5 z%w6YB{_eVP-@OYKQa70*?daPJSN}eE1`ohHlp-B9gdhrZSp29e6Bv?6QYH~7@H-ov zK7*L7X(Z>)s1%z5%CSgsLGdJViYAa-^a#0yk0{L)YYMcGTRe+A3UnR?x{$|~l}@3u zWD+%{lc*{hM@8W%DoJ_a04fUlP*d24+WcPB=JudEyBk%RU8qd$LPc^HN<^gN+L068 zimb>MWJa`5qMKDTIxeUQF#(N;3ZOvy)+5BL4#6IE2yt&jAPMkjgfB(f(}mLPQVS2t zv&giUQ$0MLnkdvwaC2=|0`213hKDZAct{a;;5J+Cv!hJgQ;r=d#g5Lhza36ebD9!u zFCzW02@Y=Uu;Xj(-8u>e>z$r)E{U&XvczDH=6+9c~Y62_@RT ziV|H8d#f@ySd_!TtQ>ZCOJQ@T2v#=>V17Lhx3A{n=9OIBxRi(M7jtp>LLM%i&&3tC zS1;yMuJdq_+b>?u!+9Qaj!1j{QUR{rs=$qVHMniwfLj&~xM@Rq<~`*WJJ+}6e&8{L z>+EFTU?=;I4?FH&qxi=P%JV9bXB{QbCWtWq9i|e!g3`Q<>%wx8YLVy-BGp=o#|jZ? zwXBKs@{N+}5vR#Cg;otZ)DhX0DjsT`%kya;BHFT#uhCjT7v@cT-NtyH^T#aPC@Cci z%UW8ANVfsyoDwapQc}%zk!Z^FDoXT9L!x(PVdV}<^I-3qtfow_+0CB#9vR%o8-bvg z%3Hf%>Q#$O^Cl#FS)Pw%>msaLU7AfXqR@s!3+st`Ezc(cEvR$o5otAv3?=&S zP-Xgz!H~RZnWCzI7>HcU&5P~&6O>=F;Ur~QkY)WCkz-Z%&lGu|!@Ra5MObsBgp?}Q z9_7vQSQ)nRX1M)gHTFv_(z`75 z3zbuys0J~LNWVdLlgyqd`Ou$G)n*_r~!*Q zCflWS-co@!mFT4cZJO#~)Z6~wAky4LITn5-&8D;wk!MYywRHm{65Yb@HUNlm5#{fkO#Nst_8U7VvTr4*c`K)ggi;S2tZ&)~_UMoDv#tY-}35L;(U|Rc*ThwYDrU2;>_>4Z99=xo2~8~k#sPT=nJ|;3$iS7ErXV& z1Ad7Tec6d3?b1gfAHa35Vchf?#clsln1xJGq9{@l&EP+GG(e{PSto$ z&!dE8KSF%g6k^h+5S~1Vkc0^}8H#7DF#IEyr!inAfj8U=ig6i}G8 zNb$d7X_R^Ke$&_IV-?S5%4)vH0(>?sc{UmUO;!Iz<#WYILtHD*7(Q?0znmK=*6SsR zbFKy(&(~6>>#_ZE19nLjsq1`R+-Xsri;H|dTopmGZ^v!^9`Cwz!kiLq#ow(B1=@}R z?G)4pkFX*5MvfvNau^{o!w8KZr$F=X!p}e;J5mAZkJa1+k?i0k<*`FmIERu_{=Lg) zQC>cy^4E)04*MfT5&!N5B{S?G%^{~`7MV(@XHdk?MP&s$SLKxGvT-tjYEo4)gxZop zG?oscxnuxMMZKuY>qbp>2WrwgQJvO>^5jkwC$u9!woQ|0i9|;X4Q^5b z9qC_>NS}IydrLY;6M{WOpc~*vd??X^pB|MCaBGA=McK!_8D1XE@bG9+l`&k1GjVcn z#Y5sGb*5cKo<*XY;Ygu&c5jCpah3Yhu3YDNoY+33NIUuTz%_6Pu0ca^4;X-#cP~76 zoVyz(-Ie0ZW8En09vV%W>W)a%L>?=6~Wi@A)1bTh236y3NR#TLhGB42U zCH*5zv7F1xdAzhMDA5|>MDnU5RjZg!@zByd<|TRwBFLu8^T;-NjpSJK^(!gP!U~b+ z4cyMwKt%cPFqLb;Gitt0~k5Yj$bXJXX#RK4cGEuSYFq z_oQ~UF$v6%(WF@-*ea^6N3&&lJ!RYw=k+{~V7hFe)yUQ)dVcmrIj4ax)pCPUHz|Qq zZJ=gm6ZG2`$~dV~OqPoPZlk~N2*oX3g~ zigZ~16GTy_r5Z>wB`3Z1Ez;{`VBs$)q-020z0!`4DDV1=%APN%qL5Yf=?s2B&9Icv z(-4^xv6JYi)SGT(EA^_4$x759M4~2SnbAt%8MUNM(?5(#9;V2^M8(s^45@8AMon!_ z$rkC>G0r#7{IukMs7Sb))sZ6q|5TtCMVw6pT0uEB5NS4DZ|8M%JpNN5%clHDq78wT z2(*;5(|*_A^_d92sZ z6{K=OpckrfP*N$ZM)X(eMN2eT)qrl*Cr{z$FO#ynz$eJecrtlQI;CILY5v~`G{sp# z0Z&ozJw&AW`b4Qk-TVm&Y~$)bszHNtjdHz`zpVyGmcCKdECTXh!aM7!n!v;*NsIJK zIXTOaahM0n1QzciadAY zJP~=8+*sj~gQRx!sT|qsE?QpnO>a%2Z~2emZtxf^!^Ysq@4P2F&wfc02&F_tranS+ z#w2319wCl0omeo3=s_kF2aV&&TpYyW=O@^9cIQRuJ{_=!Zvjw2}H5rUGZ5s=F7sEnOSX9t%89mq~@ z3_G}K{JR%Yj>{;}6_pg}s>di|n_n@Dg7P^Ol|N-C>jiRTusN3#OP?XB^f|K2pP;mA z4%O^r)mJ@2WA!66R86A3d=!o4BWNifLQ82snu~i;pWlVr+z!-ccA_@DTScNv6FX6u zprvwTMYSP=0-YAxf|QWf`JCw30Lh7NM3f9JBq0>(K(BfPde$qE4$=o3hH{&U;nfZ=uQrNzD__^51l-%JRZRrr zAJl`O@F9e-LlP<#PlCD-;LGEDDA9b4yH6)vWniaIH{88D)xwPKnQbokg zr5^4QA$O>Qr)@QytxDlyQ3faTQaqqQKe$swN?=J*zJI$EcWxEo?%fiY-!Frec{%LN zE8s{REURE|SqVFf3fNM_?QH8*&bF0(Gw#{9;I=~>Zp-(M!hD6_IpLyJD^6Rt;3V%c zrzqP0ShYS7BGCp*C3>+u%euBr1bPMcuUJQMCd<}^kRS?lu#Txr>+-CKr0AMTv@z)L zuPl>j^=3_$7a@7oeBFu-f~-@B*Yhr(!%jt`6)9J*1b&7O6q0jKWq z=4jT4^Y@!@!J-M5tebJ!wwb-hHr$XNCFS|H3{0dz-{yl@awhM1^s<#@jT0cr00{WGp*>QX!m_YMgJ#M z4M=_JZ>Sskjs{9y^Vn}{Fk&mkuZ;pIW#o*s4@vo0h)Vya1|F)xh&m$3L(c>e=07p? ze1YK?(sDb|s(<`FMyWlKY01ep%IX81cL@aJME z%LbjihR(+#(S*xFyFhu?1X}W>C5=M_`gh8+NVKGJP@)Y)rX{6Agqk!+jx^WnCcdj_ zzGae?8hw_+s^RabP*Z#BgA2Q*tlekiwtrMpd8f;yC=wa2l}ZhiUWr1NyhOM}q1oAz zPFFzooQ5ptK1Oiv6L!R&sa$C(br&Y3@5=ZZ8H8B*nj%hd)&~%30xhNO-XMk@wOGo! zRJTy&;V98+aurH+GCyk)8CEkG%ET%Xp(e&8E7A(aY=$W+B}1ah+Emo36zJ5JubM!! z6^Tx1(NZ@OdEWS@4~XN>8pEG8l0R!CuOXb$98y71rZfkYQ=Y z2^)oH)FeD&r{Iw=4Ugm*xTlcRX?U{3>m?G+j&3*wI;rRha>^d7|3nm2K0zMEIKAvC zQp%rEn4cq~;w94gJxG>PmFx)1c$pY}zY{6Z8D$i23UpJ=B-(2y&{i{!ma0)SR}3qG zZWe(q=|e+d59$lLP@mI-y6kRLr+1(#tpjDr?I?%Lkgk09?x1b8*T&$EF-tjV+& zh1OHeib%=z=~A;Ldi%(1i0x`(mr(yUg!{E3GN2uifgOkr=|X%|7ZPK7kR0Eyh>z|@ zR49)NYEhHN_y+JifxYn5XK3^Y;Ie-=yk&MpnPpLAn&R%^&i8lYwNR#go$BCiR|PMd zD!5x!z@CyU0}k!2Dit=?)v%RPcn+=q+mQn9Py=_q-px*;+A{dD z7M3=3Fte62dri1y)qtz_>u~9w(1>$pbd0<8IC-xb$Ju%P$4Vv88Y|WXK&`K*OjCjl zjMR>$Xb0y?kvjmI>v7 z4Usm`W^NRj{?{0iZOZy3RYlejqvo|BX&0+iBtTo9hue&(fx6+zO;B!_a#Q4uav5)| zKk*mHmgGVHQ0Trh7S$4 zWG^2g5`8&(vtC6OPVhs1=64!mA$%N;d@w$Y9)WYr7~J9~ zRDQHq@)Z2kwBs`frwm0E$V_Q6V;U)Yp^!-+A}K;rl_9a}6_Tr8A*GfyyhLi_Yh*OO zMP}1GWH-MaNcy?bfL1{enu$nhZ!(DImjIed#7WIxRCEYSC#K$fpae zq*th1X+0t>Jo$|xE&NGQ28CHLB>E49Cen)q+rWrm8v?D>rCz8cvLMmgbu2`nDaT7h zc`-bO7*nRpph=;JQv4%&pZqybH#-MikNG+UU&m|f;%klcjy7GOB_CSlL=zF|W?o+l zuXO>DYb`ph@}+r9Bijc4Uh5}*NA1|}s1}JH{T(vRci8~{Hk4>poul)kibCg5ptC5j z84VOzN~Jap^;^VHCZj0ClI|me2}25=Dr8wjr$w+!wRDdNepVtRcUng(Z$z>)rYdlV zOe>Mrw^ehQoiwQz9mV&Gp~OZ@li2hHNsV71(^V%leCF|=5L^F|=l=!aRhldZQ4#`5N!e@omb`)|h1rwe1Mh-o z@XCD(ZwiNdx>UB9gKfeL%qb@p6le4B3ET@F!`*-pHR;6-uVGwu>&Io6UR-dZ^g4D@ ze7kU&;w!n%2AAwQDV^Q8B%;jat6aXq9q>kz&b>zDns9F7dS&xL*_KE+Xgbq(t*v z+%H_>{o$H(AMUzI={w1{9#rM;%>9S(fC6k6JPMnTA%&FFl>bU;t)zMla=T0hC1Wav zV8Q!^Ie(UW{8{cu4y9{9u1Z-oryg}JO6YmbXCAcUv?WEE_pX!on{YzX)NVKO{?>>? zlE=(@or*ADmD>8%*m0>!rMhi9U#Zw~R;p7-IZ`Q2S^*iPEo`7PuRmT2sb(R_fB01T z9K~5!&HLR1`p zNQxmPdNU!_T92c*_&cUVOCp#F|_p`)-5 zpM;&{!N<+OE^!w2DYJN(HV4;?CveYv0?#b@PtjAv6+c5t=~GJbQ>2zss7sz8p5O0i zBKLe5Tpq{oMRdt)c8mpn|49VDBe9h4g7O)()=i_geiB`^<7lm>Kvxc^<_lqR)Nq}xys+lsvC7UV=VBRjGMS&_}i2ya4q zm_(_$j%9|D8A z5kLWspd=?mbRj*a8#%FED2VGsfm92L<9eJ_4(UX$40ufFL=KlTVx$CLE8_V65rHiT z4QN%fD+UJjATYEaej$DE;(Pe|cXE3d{QbJrbmiWjt?>11hOb8xd|fHg4{P8{5%#vL zfy;v$INR01iGu9xTnks1CU|*M!OElX>rZl zf`vsDZr!cG%{!I2a!TF1Y%0u) z;Qo~ogjM((vr`Pf2<)|rn`lNBmbtqne$uj)aO7FMbf ze3CzHpygB>tdV?bQd&BG4kt`Zi(J!D6gDphuJ?x~y)%IU2ht&b+byw2L=i{yu(UN8=}UIF{|m zz;g0Wb~t|8%?=1}&@y4p`h$7caI^>;WpwzN3jBPw8oT+>*nO!QyZ8{vcGK zqmBYCJwy5TTnjExo-bLqs+qS$ps(@GROqq7=r8HBV-8(7JT}q^H zQlf<$t~}nkQ{@`obmFp$9M`7?FWvL*hZP^DmVpC!z=x)N_^6f}9XqBZ+Bane0W#Pq zYYt(#PY|J}bjVc0YPw-*3m+q#Z3NpW-aMmuJ!4vMJYj6l0X#sv1aHS>gHuyDtvrHn?5j6t2a%#q@YTgg`{`%Q!-UV z`l*O8bTly*L6%6g2Il#ZK>sxwP3E-Lh)Qc>t)+AH%>03F6;;+SM0qJo#o1JpP3e0I zoj=jT>kvewMV@=OO-VFgucdYTH9xwQ*QN2>Jlej+RXt>Kmc~baP@+YkMV^0ER1V7& z-(OX!J1vdllQy|Z+edbUG>J}cc&}Dtn8dk~avX00{ntui#tof^sZ^`~p7tFW{B;0v@@~;hysx zE?LjuMA3Md@feQ$UN|I8!!e#B5;KJdQB#!WN!$(_$DQC2+@?6+@RhQ1gSf)Z?G=${ zimVJIzWA_*0!=8K7p01ZV=slc4_BS~lu%#cF*0CSxaQQ0Ydq!#_p7L~+9vx%SS9zF z;%qPH(sHP!Qpb6TXj7;!@?4jcNXr02iF)f1Y0rLD0q71z+RS%|NIRgU`o78awGwHjR0?Y( zjqadCb0xAXDHF!Mn(%p{1v(627SYX~m)?K!-u=(rne+Ta7D=<**LCC`|4-iU4gQ~Gotx(47P@;F7tHD10 z9uHq{#1X0QYu131)-CGa@#pwCxaQKw=Rm(!GdzHQSAHhV*;z6Vm6C>!U=ceFtAtrR zNMgq?^$8sKxpvfXmsBh%Wm-6kM3Pke1c@b2`Q3i0CQJ%0VnHBCAuHA1wBfnHJR=~wG1wl zh;#}`ZbwB@JIWHATWL}U zN)m)l6esdHigX?YIxD;dsq$Vyjfkhr#|H7-!JP=9poau?At+Eo1lrfH1K!^4@DU`X z!-Ep-E)uPkqb)C08-=okRHoxjFxhLQY*VrMcG3=>f!5D z0}s0@I7`I(emU;nE5)r_rMPjU99OPVps!G#uax7=)pDG>Rf$XVjH`DWaqdpN3?lTS z{A#RRFZdHfswu}5WJ&EXT1oLR(miC`@--B1k!V4eWmADRmFRia36euid1kBfs5eNS z^a9H%)xt_H|3!?~a~oN)j>nM|N~E=Yvb?~;Yh-yT#y$gM#>JJAR%9 z#2=AJsn#lbEMqH}md#tJLH$=u^Qn!TYJ=4~xXx{Bd1G0}n~Ld_<%T?)uFng!lI(@l zkiSEs*?&RmTT`r6nLY}&k@}&c%e+}h{4WQj8CQ3>8mm$SpL zf|9&!M;exGPgVT%bBaQimy>^P=Q>$Iu~v1z_hhP^y!A&4v4szapU+gO5#+lrRAM&; zT9C3#$FA4nI6u&*%$iiqQ<<*z@&iqvukhw51B@hR^4h~rKFB(iJYRRFD7#A3xm(He z9nWrsM4}b$g!}GFl3!fpu|>6bh|`;RrL2) z^gcGyIz)iAD72|SkGvFVHWg`Yo5A9J#%uL_X^O3pH?72)@~Y)ZOH0w!rC3-RT{abG zDcxr(%Kf}XQx@b|;4!@RUSf!JFOLz4Rw6C%JYBj_3!VHONZQAIst2#p$d@)`Tg#bV zD1Rp@9g@aTJ@Q>k;~4o36+i(uuIr7>+QKUa1rUz3!$ZyV?L0Bi(Vn9Q0hg$KmY~S zKkqqP!gZGY4maYO z$^Ge+X`I2=K@ueUe0pIs<2~XkJLYmdm-xPtYkiLIbLK%ePRaxy<{fJ0)x&pNaPYRwqSu6dHyW_# zS_5`dn0H-~2(y&*)FztxnbIc{Pigg!E3o-Yxnk2PEyBFTf)NYi>ih;?CA*2sZ;pRXM{u!4tSGwZS8%VG%t8%h(xM$BRfmhE4Kg z*rh&&qg0H^CBK(``Rr`wy@C(%BC;&`{~_ETPvI`8 zm_c>bBh;3Uqpo}mjb$S`W2i43LtTkRZP6$hKs6=0x^NJ+g+r(*7*HbJkk^gIoK7@m zb)Ye`6Ah9poz{jLigj(edBe z)WXlH67II;aIz|gtywYd-_ZvcUMs=1E2Wg^3Y@=8iKa}Szg3Q__iAwcu9nvEkCkhE zv0{x6q+KNvy+ou}tfffnviz?ETD9v%mNk~~b;9y>+$ZTF^ZN|B)+BoQS}l*-7>sBj zdDX(o^%AAlgn0oidaX&gzDegbWn(a3=CPxa|kPVPLb!ID9%4gl$l6$ zc?Ex-6+fqAnFura=hmcoe%h9-lZ53`&QR6-PQ|KS=~%Tl6YCGsKdTGbm zGVHljjeUIB9J)q%zFDsZ5T2$$OZ54QHO1M6;%qCqgp_HAc0Ncu6q25CgYv8*&u(3m zhM7kX?t97hK~?5X{yl3MG(vfnSrNzJAQd{I z#^4$^29JaZ_#{740v(h&ry|jzLbgoAGD~ruMsVg7w@U>N3N~-L0o?B=In;cOPue73 zJBfhwNnNsM5Ro^B7#WnPCTe-EBs#NR1o{`1LtWJI5hY!pQP%Sr<$a$~)h`u3zMyvK zi&iR+A}!TDS|)!(t3L3sWBR+6BR#7PG!)4-1lou&izJV}`rkA1Qj=-ZR1ibRwFtGy zv?kYm0)8EIinKo6w}`Y%1+Pu_t<74POQFt^ z0fsHVAieQD(kRm+)F}&k%`L0xjZP{z^@#A``F#7K$v(#2(M! zL)rDtdSZfBQ(Bh2vPsrcco<|phI_^w+|uXZO7V3~r2r?*z&`#FY!jwo6F*Jiol@9P zf(>jb4z_W#u!-gVm?>COh^?XxxE?hH%g8BMNW_lqePR|NJc4-yr8#^O_d~~F7BY@| zl;=Bv6RKp~4c`%5_Zh}jFHNAYQes4)C57WU#pb#b*D1@FDAAW)Da{m}t0FtDgShNM z!KUDfA>j+?P?ZOX=PmoAf91+mSSz> zOkd#Zg^Lo&b~500u1h+IL@@;^vw525J^7#m$F16N+@c*v@3-U7y*3=W+p5G_>fNgW z(n^?XvE!14%+M-P=B?){6^qg`PL*QA$r8o-6Phe92(w6|AOb1kEN!tGgR)AOX490q zMbf-aqP!xiBB&B+7J((ZD7GTEDpD+xYd`^}B>&f6BsqgpOn9#sWZQr8+4UbkXDDRZ zl>ZQTj35&IA3BCm8`v54%125o<+csQNo^>IZ$TcVHYc_n*)i?N zpv=l_hsh)_%94 zH-dt@;1}2q|9}nz1$H7dNYX|+5gpQw_^>vlMs*-FwhMWQJt#^sW^(L6p~!o3D=L$l zQ5M&P+=xab1=J(Ps}7N#weWYUpg@C82> z$h0XU*G6kZrhmlJTRHUTp7dH@c=LvRQg)ER(XkZkW~I|TdS5!g$_na4=Jqh06- zrFk4KG2?KHlN{&?cqQvKqD7puwHXT|^PV9p|2ZP_pK$pxB68*st|HG`^f^SJTnA;& z@^xCITEsbw;v7}*1hGYuU-uLVl;`9MJpx_(8rco+DA4ba-|`+sQq`kNO5{ZrbJhySBo_NIWNXzf5Yf29`jm9 zmbpz{XNa_sL%opZu@IFu#Mlt!r7VcECdLDl^8Xng&-WAhD9|!fo|sS(2!^~(2z(vNA2ius2TYc)oKPqqe_RAz%xqU z>FFJ++}&s7w0}f)+b>$4w5lKd0qIS@sQS?|2}`o%BiFn}Y_$v;q(oP06+C1xVMqz( znH{PCel}#*LO*`aWTrlmXdgbT<)JMTvv^UGJw=qIG#o|QIc*LPQ)XeGI0ZYA<=98C zk*IJKg*RNpb`)mp6qzlThDZjv$K|_W8fM&g zFLVNTLnd%1XbiUl##EJyn|`D70)5SU2-iI|i56s%lUwdX?Cf&8+aRvEYm=T_;r8pE zax6uM%d$=CIA3z5OiP|I5s|)3`IZuON}wsalD2V9Npla*TTyr|I&g*(b(*4k+KM8r z+IF0@Y{N;mr_?qc^FYs2);S}2%M@|p6h-^wgEpL?fD6Z@)aLzG3Udn%N~D<_xYL5Y zH!026CEBcs^R6qBmQkYyN^d(S$TZcIJ&7=%E>oDMW~@72gtf;CDTi$5qs*Exs)`hb zFzcv6+mfcCi}T9;e-&sofR7k*s>!O6TCorzR)VW!mojXii?YFg{G4urCd|_Q`?hq2 zA<_o_F7S1NocrH(453~s(CRvMei(f|yjYwi>MS!B%7iOwmfTAX*nUN(|8Bqm3iMI4 zcAT*6=HIyor|tT5fga)KXcQMcMsUSv6j$Z5DIGlN=#g9O^xY0+$1m&=?y-}2pCWA) zJ*Otzv6dX^xM?_1svjoIs%hX|Q)R&6EMX@#?HRv=6zPm7@Rz*ljK_%1eS(C-CrB%n za)#3=qC}UMPoug*Ce)cidBr5kD9;sT6R4zASCu_Nb@>#Lb`sT;?RvJ&mE&lu7(r*{ zFuE%H(N*4yjxti(gZ7ec3UxPH3pJT;%9k{YPE=8%%hEb1o1Mr{>Of9HJF?=tkQvv7 z^f-z#Nsgm9$M+#QVE`$KgGf&7M{06E(o*}8nck1QtU=^u_92hLnwQ#*+~iK4L(bEI zEXr{@Wjrma6`9fPifpM_t>;5$$H+{IZAhoACq}d(HoODTkzI(4>`~$z5hZOe1-ci( zVciI%Fb9QpalHpo+#eUMNqq*zJ3o=|`iqh~P@2+7X{N9%+EA9*g0lE#ltfdYLn+X{ z^@wt>LYP~XCeU`JcxYJydy8T`FfW1iePUJu^LwSF0_OLtVPRDZE8BWCo1)aecJ$~F zf%e9#bqlOqt3{&?R#A=(R{jMF_iBnZk!4eXHjPdjEJUs~R!S6FUsfooHc~uHSuD@S zYqet+_Wc!+Y(to3c_|ArEf^xbVqFB5t&PwmTB6m4Nb4+LFV#Q(I!IBmJ{HTQB8aLA zBBl1UGI`5%WLf1^r+%2Wt9cXr zTTEW_7YP@k<_%S%)T_B{l;)GceuA;C;PESY4q=6=M3PSVT}ahXQD>3oZOQm2W%-|5 z;wjE?_)qd5!WOcX+lUDCGN}W;JsHa=(93tFV$J?6$W*Z#j})nCX1AX%#m@5;Y8u)@ zd=QC1pQJ#aF>6re>m>So%|ROkc-65@Ro1xW+J!qrMp-|QDR2Emock%v18@!-fNR7M z+#-kJ86zX?g%Nnhk@zvVb6rN$yGx2j^aPw`WPR)yyb>l5kTQjU)M*5!i}(^sZ%E!8 z!en9`BMbHOvj6cT9*o+_d#KSE@&@Kjanh?Zl7q9J_qU-yoO5 zEYh6a@D@3ZS{`&E#jCjOJxV&>quj`ara)Iyplb&{DS>Vr`h+G5bo1CZHLy?(;Cu8P zJ=4FcL4R8MhLp7XO$o0i&LYKsVC>DG7^fWn9b>{9?Kr^@=>>^41|rJ&P0HpGi71;2 z^n&OL6ki^%$uNbRGXB3u6Xc~65y@25qjyG$gyeU8#>BGDqyZBu_Li8dn9 zTIG)Kl<417b&mPccREr_+9-QBU-FKW_I*Q9k0#RvUD`w}BGj3!TIstqsoWu`84cee zvF9^Xh;qqyeu3c9X9y~Jjv#)1v}p8m_~$-{pNMppM84+Wkv_`@IUnj2 zXURd92=fDp97l=lQhLKDR7Chr;HW}!ku|jCTmIVeoj}Sm_nT3k6?}~ug;=J&R!FX> zKvBLQPPtI>{0MhKr*J2Pt;k0ZB_v>6MV)W@QK0?!zT_4~<)+`Tn(^zl&Kr?lL}=8pydDaUaMf!FmnqQ~-TQIDtq&K8M546_ai5xeNg~BkaYNEFlnh&U z;jCpR&RU2>6HAKl{Wi!1PDd%fBEUz@TXD>+6(`84`;xD$@8kMWndOcGCLH1N;d`wr zE#nYH`GBNm+>$7>)E{18H^pto%!{*0s>NRgno?}Szi&&!zY%FQ z((!LwxlP9qX@h?gDBFTa^uOsCLcLU?jeKZRdDg0M@b_?pzaLc_x|E%cauvDXc23e@ z>agRYKIQkdX6(7ug8lsaNT=o)|Nh5qdU1mCe9Ey8=bR|et|HK5IPX4;OFooncI?#W zEnq@P^tHfoTw|y420M$AyMH%=pLOYcMza$dJ;m=8zh5%_d;DYACp>`@zjLlB?7*fx zWheF-+$Ac_&aFHf;z$yo1Eswy6#zG@OJHRI^08ADgi2zqKq&|5o*-kJgQR1qc9edsDB#l2`R>{00z z4Y^&Y%IZQ{x=a(^jlAS8WG7Ob6MB#q--{HIL~%}_IL9XpA}*f94RU!1aa@m0=tm6Y zIy$-=(NSH9j_5#CcspXEIw`j#q7&g^Z3qi(MMMY*Zf7g`)9n=HP7MliWKbKz16vUq z*p7gJHu(6p!pFZ2et{hn<}L(>_8=&@2LU87xSJx~i|~kEL`8KYA+{G8iG7szZWK|P zi*#u&P3b^+N;}HAT+Z#qiESv3Ye8XTBeFvpknCHl1Ul5U5&<#>(X-!d=I zTwcD$-vklqh4mF8+$x$avP_}o{*@xlYeckVU6*Vn$3c|&V1*&d22xtjRJd126nha1 z0xjtw{}LXrSh9WD+E5~CBqGt8P^*EC3PY&1!HY{-zAi?wjQbZPT4SLGwU$G@AkvaI zous9A=sDC%q?JUcV3mk8S#2WJiv(JPnm3z;+SEpywP?G6l-n~{TGLvh*%}g=7Kzpc zn#&@|^T>X6jE0)(T#!6x5olhQsX$8vT1(wX#((RP=KtCpkN>1FD|z0=mi+fN$}{&X zfu=k!+ev}morbj(=#7W+)U>i&PZU$4i?Q=ug^EHSx>AEvH|ufwZavPKH{r5%t2Wb= z%5m8_2qi%o zj}Vyl2tjF-V~R*n#+-^S2W3A&MD`q_a~>licMcJGvuaSF8XU+QVq_tum@*t)^qfNc z9I?gE5l0eAUm}sjmA*hM1v$3-IpQhFi4`x9Qt=XLGJ|3DD`eHbRid0r@|)hHNYXS& zF(s?0|l3OSDLTWGIS5)F({q=#Srt_d_56p5zL4oX^w zNU=nV=S5Z|*GSV4H2IZdgh7h2f>N%5e*s^|_nQ}Lbq*~;ZHV-b=+&dpk{>M+ty+mj z&rzsn$;_XW=s(nCHmy@y1&>z#K3h~R=|5D3jz)G)8rVrOrG8w}J$^^+s6IWoO81a7 z52N(mS2ZIdm(| zJ96Eg(rnv>(-i1aR%|KGXUJLePMp5qfuodPnY`(+iX3Z_CFvFiDZ__lM!VaTuUk#n zM-EWL4ED>^(btXWvg9&r?7B>8qqvE{Nh>n9?R*Wko|l>Ps<7#d3~tkA(%X2t3L8#V zsEMki*1j?El+@riX5~{QLUlx(4S8NE1Gi1&Sy;ZGlB~v~$iP5Nm`%l5$+KPw<1f-o zG+8Fwxh!y5NiLr~ru@6eug?F)XOv>Ah9=P5zD48Tw(|Al-?s@|{}%$SBGJ5#zX-Ja zEb}QF#d;lRiO%zP$KT;5$rYDUewQfC^83Hqh}{(D-Pc>N`<6`a-HwCi?KmuHHT)bL zv+u!i%JV7bew=d|qa8=u4g>xXMo3HGcMQ@w0e0V3MEfN4OnI!Y3)tlApUDCp?8y>SGmkc29o_&&+4=%93#|Pt>zO%HE|EQKE~VAc1W> zJHd%;GuSCkD}Ic4K11S*pCP&UDN;)wBdhc=@c-GpKY~8e z-!P2+xPbCbG|mC&Wq zGo;L1BIP+Qu8)%3k7$Z>cuYUSBKr|eLMgC;VciG_?V{v%!#}W-(%TLX%CDDy2i$$z z6mGuFY@6Zf*QW52*0%+oK27lQX;yf8X}A+tuNJs?x53r718#op@C@vNPjDZ60%dyj zKKO_9BP4>?BG(bogSgl}WTy5*a1tWrIFgHBw7YgmL_%}Us6*dn~@#Xi1ffZ zBzjjP#p%37W;a5I5)!2s}*6v>>z~qu#?b(NEQ7a zpui9RD}nZ=6dM>Kt)Yb3M4SzgmTjhJ+qj>s;xSqzJCIPGDb*`QqF3v1S=*!PZzR#)~l-=*ZWp_4$3KQS7BiYKWFAy)>sJq&Wjh3_yCDBWx(mJcQE}789gn6MBnf?*R{=Y%eL>401>6B=RW@{-TN|1L^ zo_|OVSuD^ihzt}okjfg%cuqs0mv3jsfc$e?5(PRLKPi&9o`C<}PO09(<)0H(4)jVM zCwb7TD94{YP?}4htCB_0B`;O^x>zO3FAzsr zmZ)-k=~E<>y;KvdBv6>OL4hxkUh@(;^{=rbCIKcjj03tGoSm<_(7^U*hSO@F62YxSTd-+7R7I80F;p5+q@z+|4$1oWZ;hh)R1)*KYk=7#8#`@o3M8}8! zM}htq$UsM(r9v$d-}~gZ`G~j(b&rTNI~qn+5LIGN)vBggQ=mJh|5UVco0jT9p%$rD z{HY;n9}|C|X+qLIepiDJYfVx;Br2^^Jfw!Sn$hqx@+HlKTzeKMo(APzv zJq9VyeK_qxiFO{qxrY=YdoDZl;;cQf?WQ<);nag}9J8Vw+c z6pNtb{vD=~Wz$IWkNCf~P^yWsu3jVP0Egc+ofcBfU;Q;0NkR|`#l;;!n-8dy3E7t)nWy5O> zS3UU|_2Fk)K3n{Z-3g&AhfTvQd`6`tSw!(O86zD~N!OTxecUYUweza4dn+v*8m(z`kj2@Jv_o5)B2U&^TNKfcik!MNMh^L%JM0F!9 zst=*jg9wPAz=rm~FSwgR+euOGgu5R_*QWz6UTtvjXoamu3vAt6U`K3S*t)jD-i`a* zTH)x{LYfs0u1)jH4|%MkOCwvZ^Bnf>ZLs(3fW3D&?Dbh89fAhn6g&vG&|!Fm4#6+1 zA3@>$h)ozka%vAU)BBLiwlK9zMWXYQ+L0IEhQio(6vVV6Q%d(q9qix+#QWDFRwB^e zHHh%8RRZnlSP3`BD!4n>!riqFK0d7o^lwFEXeSb52aps$h}48Zq$CU>DRBfzNu!8P zoWws?Q`bG;r(^AI ztqRBLeKLqJ51Wq`V%y15>^M`7-4`mc?@A30U9ZEbyA8M?k!RaB+;i=LrKD*14ZtB} z7%mYbDjg#zc>>|-Q;5!sQmP{;)smVa(dcmAEJF+C5Xl>7 zbm3FPNd&r*=c|5=)SB0-nnq63TjV#tLm|bvsDt9%`GLaxNzG7LL%FIW^%O0s`)peK zS>-m5N_xguw2ps8hc3+BlHWWnlc;=0pDI^pl&YhQ8sbce9HWemlZjW9W)WxdBT=5B zB>xDJXHB3rnO+d+KPk|9BwD0bqR`KmNHVWOW<6Y(*oD_;AlGSnEHM@6ArbSx;fLaE zut=a6OT9$BRo=9;dRm88{X+&d3Q`$FO6!SC%km%So~2+*bXp&9s9{v{&?;8(|uCK^$|B6hR6;LKU zY5Ii3`VVTVY$3MlJ)$e$LZV16QE6THhi9k!l_>M@p z4 z^bFym8>QK00B0!AXPt*|_TeB-+YjJ`T`!K?^x%YuvqdM4ns?w3Md~0qq^DdQx-GMu zQj#gq`))N-oEz1IO(MqzBFMY1YLYC2{Vf$#drC z@j>hyY$?n336CkvPn0k_QFL zv2!ak5GF}grSexuEPsu762`8fmZS^q*x|i(gHCld&LuPU>viY-Q#djh-Mg|qOAU&cL zsbMWh32i}KV56FiG1^Ziim5}Cj|g-fLOkmb;86>IuSWQKHzGj9nVkknTS-YCMshNR zIe8Q*X=6xEn?!8t6oQiub&3z`US}hW-p`y|z zQEZ7wFRaV5i1Y#yr8dwcT8~Imc2xwMNLq*tU{vrm%T@>cOBMuMgqhoT4h>oVt3;c! zSg0jEMDnIZq^(E4>s|-9wjXHP}!jT2eg7vh@p5X+xYBt^Y`%jrA>y_nBhkRhzPC zzp#zVN@$meG$q)WQBlpRXd=r$5@?yiTqIghDII+MN=mdL zYbAZ+H8QwOklbTM-CG|28tFAMfbS(Th%Bd6zT^#1BFwz`$pjcuwkYQ@{Ih1^M|`tp z2wO=9QG*JfA%-`~c#&|vUg|rmfq-rAQA}|z>!LXKh&X?yIDbanz*jU3eM6H#$&%Rz zHNK%?^s5r*mWeND)5W=G`a60l&Al_fqMxES_~bW=>hBm<@+$erx;Rrb$H@eld`)z5 zo_hCp{BH$XlW0w#|CrB*)&$yQ)6t=$QP0B{DPbgigeo-q|1zg zjUSQH_yI|j==l_nN^Rhw%9k#B&WG^}1WAr`j!b4ERXHBRo!=3c@TK>H);-Mq!!6M4qfI%ZaT7=m=Lh_>x zRqmiLTT-AcV+mW?CRob9$0MhC?6fK&Ch~ko25<#U^1UQ?nzHUaj4N(~xJY?E@6?Yo z4t+S~(2tY$eKB#+d9M&sgXw@)Uw8--H231OK@6876R38I8yOUsFbn&nIXI>~#luu7+xQ%= z8P6%o&*72v0-ia-OMaK0!8eEZ{=Ao}G@W1G3wRT$^Bh_97O|!85mWkB335#7E5w(* zMjWL%x{T{Y>cLB0_?XIf?EJn*SlN4o@-?BQ?+{WZ$Q1Z*5L59M@gz>fyW$n{YhIwa z=_&f!W--)0h0)F_Om<9QqGbeQ%~~{ifCAl1f$pY2w-@)LF|P+znLQ|xfq#i2os{P| zDN9Fjj1_tAMX01`gml3tup1u!-Ej5mg0pug9(s1b!Mz=Ju5EbW+(z+jg@r>i%*b8J z@*TS-+_ohT8gbj2SP2bWHmINH4%hGSxLZ8_reNQKTaInG<|xR!dacK;&89zMN9NjOSQJ30JK4(-E2W$4{$-h4sZkEtNwIxUMiHItV{; z*_34z_<592i$pJGVPK-EOiM&sLk1-NM2Y@seY7spk|V7RI#j5r^j{;>E0jb_I!DqE zS-vHau$6&`I+}1(m<1DgmON`ykzOYIDbNOsL|WvRH!jmjUls-+{#c+5iB{4ruhRv3 z4PU>OVyvRd3JNlhUArd>Yxi=UtlXQ96??e6FB@z2
MfM%AH#hj&Ptl~ z0gQSMwk&Uv<7M!wf!#VqQTy^fk9rr$%`wYN>H$4jqv{k^cl4!e7 z5;g*d$Wge)Ou&l|OwYIpcqdK4FHHs>KIX&qDZ=xgA-d=VrG-+WkbKFP+@}4@H>!lf zO`=Plb04L<@+IOco+FMVQg{-pDB)GFDC5t`3pG$lM*7F`JVFc~XmMrF)qutX9+O=8 z0%=t{*l8T2eZ+T#B>H#C^Y4`B-%(3(t{MJLw8*m|&{A!qY~U+O`@f>3?RQ@@2qZ zyW})~Kx)G~q)-SGC30Lv$))^7RJ=eWKU2~AKsOQSP<{qOgyQE2;O8%ht*nO^OJr3W zuqK3+y+J4mDSwS1e)dAjUm=|545y4oR=!avnWjXGJR6Z|L88wht`c$9qRv`GIE%8M zLxGk`9#YvOpAuX^X*STJ%!@HnF(k4qWb!;2JWmEiSxDo#g$zn_I;B~V!E?z?pH-Au zq&cxcDphC`QN8u4R442?R zJPa66X&V-Lg^lYD9k^`Uj`P;7IBnj96L)1$UIPx_kU)osbFIodmR#hWl0G5QEa?;! z=bt6rLYL+($3<|9lqAn{I3HWcW}OX(^RVu4u447U96lTK7P0OS#ad_m5;hzz;B!k8 zX{od&NUGw-#Ig&=ZF+Fh zz6WO>_TqwbFRoCc?|2Tv!n+?2eEVSUD^)AH;p*3;@{m1)`ryOPhhK0X0)l%HN{J4S z&?GuCx{vfzo(B;dKZLl%VZ_A`s;F~X#so4m$B~^if$U6@KB>sc5XRNM%&al)A4N{~ zDDrbhQJ6D=B0<)xD%0TCwqxh6URn8bZUl!QwwQ@9a~#squMsuI<_j2wv-%d z>l#=RBSLLuErS%BU`5HcvZ>=S_1sV4wr+wsv9O_l+w=9dO|a(cZRNdOo8j!z2q%Yn zI9gZ3fztipZaK_}*_~3D-z-a~jw2`1aP)W@ z4pXK@sE-^=$Kj*0o`Hi$3EM+Q$k9yfKbnQTM>28fR6b5!D#f|0>Wh*C?*}jPrgmf!hHS?0}DH0Lu}#_~-f&^nta z(wpKGE4L&nkrttr$n<=g$F^jwRJqa%GCg0xgRG-S3nJ8pEE`A)h_F(k*-Hdk+W$Qw z;5s`f&^sy33K43NZ(XECqW9)0iI((^ReLp7?=wm7ph$~E8yF&OD$qYdq*`OKG%NYl zQ4+07v#B_5I>ejV5=5XkNL7(TQtFS_An+XPP2u_0AJ^DGY1eYM1#M8IpbcOYq1L2Y zZdis~D`cPKQmb5V%C!--79v#j)sNPF%gzPxT&5-Z4jptAz zeYFlp@3*Qlc$XZzaMPs+x7~Vi+p`CEeEVS@FbYe4a4myIaX)Z~56K}|2amuuWRy}p z42OtOI7$g2$#G1WhFbz3u!*y9PnzNK6x`w`;YQi^NSK6s;v;w@KZ0lKj5hi{{V{y_ zK=w(O+{tOK&%jTrfMm=efXhK#4$hiW`ITY0j}b2aHZEXWsQnu`yx@tF^4LeDcKj&C<9`z9&RM9|kZ4m(1$rrq<#}luhZcQa5a{oVBhX5ozbH!jzM!b*GYYys zBClPlYkWjT%X?j*-yyM1&(*DxiVf^+QL17q*@-KEi71IUQ<9ZH7d}G}J9$BcltXs- zgaCfF0`i~2KmUo!yH)U*K)xoJ9k(#bbU2Bq5P_y-N|aj{Xwx(f5oi^8raVguMV4NI zE?XkU-IQpBmcFq#!YtCP#WE0Jiu`hBV2nRGnfAc{X@Bs;!g^7|=x4&R)ol-S4c zOrPWTcM6WN<8X+Xgk6M4Gw%%)W;2mv%JNOGLEP{rp8dG))`#2f{kUbcM7CXLd&7gr zcn#sEH}M_DT}t%5kV%+@@gW&Gi3f4BuuFIhr<7-K&v*f^tQWkF7x2rKoak2wD0+?H zlGo}#2N^Z5P{!|kZOa?9wLM2)$1HkVXVB9;i>}5QG}q0bu6`Ev6y)ZXXJ~JIj`pUf zXs1B8H9baS?KJAEr%*#gp3BN-P(*nyD49i0;S^GHrx2gOju?^8W@yu-D7ErC@-hC{m)=AEQW9pp`@`ftKfn_DquJ z&5lbA*eRu0uQg-ujaKZv(}w+&=);zsIQF0mr|o)h-k}GVo%lC*>cc(PKA3y-;DKj1 zCAtqzDjMAbXBo&BK#30SM_@=lf-j?77>e^r))30F z2T+;ahqA0*6lM0IFs&O!sa+K6E)*qqQKY+&7vDvh?xIY0A|t9DsS#~R32#MGXfqNi z(eXjei1cejjBg7fJR1<^B@*3$K=*q1xz@qowO$cG{9NkcO__Fgsv#6>=Q_3$scwK1 zadv5fvs(+rwhfLht#ELmJUcI-B>G`1Z0s9hYYa}L49md82X+m3;LwZ*jud6e@dF1Q zW7`Z{it__W+jt;(&l25k;<}cHZEvrolsMY*Tvk1I+W_7q=*N$uKc-`?I#Xa8@9 z$~>kbZD8a~OWKFQ{A+$r!HTWiwk=g_m#|#e_V=v#nXmgB<|SK2vbE^+%H4Y2^lsi9 z^xSD9PkJp`cR(tHXeIU5nG8Zy6+U#Cl@fVBWf}@~98qX~J0>-uy+P`JlVyIe=U8P?U!p zAB@*M`f!C0iEAEw81ln?*K-JWCCcQ}ueXEzuyeaanF_dizxJ0_V^)h?@nUw0>Ifc}jm47Kq z-D-(0#`3|RB9UlG>u7$bQbI~7TIIc(MAr^})dnw8q6O0dcv2Na(m1qK4o#+2s)ooi z>7`8fu0QJz)lItp|;|K4eh?~y{0PHuRwsz}7wz9o|HOqniwK`E3}1WL5X zGetQdPa>O7;hi}L@2tlPPj=Qkx$c|>GRGGW{I>1r*8Rmk2GD)B(zK#oNC`phcWz zP@Wo?M}bYJ&@KqHHc(JYy;vZNBAsbUmqe=rjFcAm`wD82m7-xG0mrxdf(eZ{#K7oB<(7wn~Ga0jkBcH{a( z-ZP|jup@=)A&;X_U3ZiF$sjJf58$!~?=k*kxE(lw+Z5@$5{-_T#e?|Aa7=m%my{>) zOnV9`br-?ENqo^uBvbIy*(u7ce2$9RXJ~4AhGq(KTjMl38Ya+EKZ&-68MINHTSTCh zM9-j6gjpiZq`v+!w>?Hp%^WH#r%_QkgW~dOinGY`9P&z@AhTc=@f5X))CmM9j>9i* z9G;OB=MX7THv${qAy~4rU`9c_&7bX>T_-MCx8t;V3y$-CdgNvU4qdOqfvb|LQH@>a zD;GtaMVz;uD4{g-IU~8uMz1eK}to$3+ z9LS$%^?ng=iA-zd!`AaTw@&7IPy$^FHMfBX^vMb>c%uh$Byh9!d>yu3sK@q8jo5Lu z3EQtVVdqWC^xak*Fl)n6iw>N$?!tN7K3t|u-{SA)9zP@ZUAtlK)}?a9t-ZQo>(h;g zzPdyQ4Zx2Q?N5mgj2u7+XiA^G8vhH;BsoAynlLqd2!8 zMOi&4&+b80b}uS3yHSzO^|T%oC3c`7z61FrH>Mq#QLRW1Z$?^p3z8|)36$tq|0YEH zH6hZc0pTRrvmSwNbqI2kwgCa|4HW7Ic#AYUQ=*9ovDAP8SXOU_fM>&q7O$oPkC339PwdS!BahAMi5obG! z^g|x+Y)^6L`a=q|6NTKxz8WsJm3U}VNr^5YWq5G61lG5TVR54ncdz8&*2OH`yp)4$ zZMSKDcdV0 z9YossNH&irt0kJPN4lj|%Var^Gi4dy^Cz-Q);IGwQ`z2AN2+=hU@cjvi?pdsE14xKB24DXO>*a@dhEGgkK^~-aM89Km*ih?yy=}M=N!7! z@>xC<&+&nHp8GC3_TsWk)bg+kmmDQ>%?GA^hw3?AqiB~8RCa9FkGlyAwuw&cUvCWsOp{s_*|kKh(ZkxrBwfEvX%4uaHprO4X>&Y{C`iN|B7jZx^q?}`p2|UtlQ^6 z{#47Rw5kZTM59Hbe^)e3{-$VT2SyX~3Z-ZxraS6zDG$=r1UvI18n{M)|rgs&t(i zJSY;)j#75(FUV|>@^yOE4U$BWPN;iJQIyE)D@0JDCHfpHHHu~MTc(zd;E_5FcXrMM zxAbXvWX>vq_NT0gKnE7SL@)(9oB|zDM#-dXO7vL-I<{65Q$wJ&^bIZgYzVX=!&xHH zCM=|5@L2vlhA>N&4k>eIx~%0>e?zK5>KZRZowZVJBJLVSBwVGGup^OF_fCiFyk5!O zmOO5`{z!>7vr`a8aSqCVfq>i>@J@dM_v9Hkv-9e}@1JGpn5r9pU(z!8{k`GdgR5>m zxb8|}bn3A%TCl?Bb^=tayTwidmFa&Z4q< z4z-lvCdzUv#ksY90u6OjsH>eqeeEMO)J{{9r%_uwgX-E@RMpL)rv4eKNoCDblvO@P zQTZJ5OJ|WIb)NHQm7FE<{t}b=2!V+c@QxXU8$11uL4&aM*Cs@{$Dd!s@TznctlMzb zq7|piG;uzBqX7r5)nVV2I#rfy&!rmdI9Gw~XDQ8R%CPOUw#~p>qrBP9J$RiF|i z=pxS}J@s(IDXyQX)=~~B(_1bzVB6J3{49gjDbo9Ix8l%UnFy={XD#^|;OF2vpARy5 z%w6YB{_eVP-@OYKQa70*?daPJSN}eE1`ohHlp-B9gdhrZSp29e6Bv?6QYH~7@H-ov zK7*L7X(Z>)s1%z5%CSgsLGdJViYAa-^a#0yk0{L)YYMcGTRe+A3UnR?x{$|~l}@3u zWD+%{lc*{hM@8W%DoJ_a04fUlP*d24+WcPB=JudEyBk%RU8qd$LPc^HN<^gN+L068 zimb>MWJa`5qMKDTIxeUQF#(N;3ZOvy)+5BL4#6IE2yt&jAPMkjgfB(f(}mLPQVS2t zv&giUQ$0MLnkdvwaC2=|0`213hKDZAct{a;;5J+Cv!hJgQ;r=d#g5Lhza36ebD9!u zFCzW02@Y=Uu;Xj(-8u>e>z$r)E{U&XvczDH=6+9c~Y62_@RT ziV|H8d#f@ySd_!TtQ>ZCOJQ@T2v#=>V17Lhx3A{n=9OIBxRi(M7jtp>LLM%i&&3tC zS1;yMuJdq_+b>?u!+9Qaj!1j{QUR{rs=$qVHMniwfLj&~xM@Rq<~`*WJJ+}6e&8{L z>+EFTU?=;I4?FH&qxi=P%JV9bXB{QbCWtWq9i|e!g3`Q<>%wx8YLVy-BGp=o#|jZ? zwXBKs@{N+}5vR#Cg;otZ)DhX0DjsT`%kya;BHFT#uhCjT7v@cT-NtyH^T#aPC@Cci z%UW8ANVfsyoDwapQc}%zk!Z^FDoXT9L!x(PVdV}<^I-3qtfow_+0CB#9vR%o8-bvg z%3Hf%>Q#$O^Cl#FS)Pw%>msaLU7AfXqR@s!3+st`Ezc(cEvR$o5otAv3?=&S zP-Xgz!H~RZnWCzI7>HcU&5P~&6O>=F;Ur~QkY)WCkz-Z%&lGu|!@Ra5MObsBgp?}Q z9_7vQSQ)nRX1M)gHTFv_(z`75 z3zbuys0J~LNWVdLlgyqd`Ou$G)n*_r~!*Q zCflWS-co@!mFT4cZJO#~)Z6~wAky4LITn5-&8D;wk!MYywRHm{65Yb@HUNlm5#{fkO#Nst_8U7VvTr4*c`K)ggi;S2tZ&)~_UMoDv#tY-}35L;(U|Rc*ThwYDrU2;>_>4Z99=xo2~8~k#sPT=nJ|;3$iS7ErXV& z1Ad7Tec6d3?b1gfAHa35Vchf?#clsln1xJGq9{@l&EP+GG(e{PSto$ z&!dE8KSF%g6k^h+5S~1Vkc0^}8H#7DF#IEyr!inAfj8U=ig6i}G8 zNb$d7X_R^Ke$&_IV-?S5%4)vH0(>?sc{UmUO;!Iz<#WYILtHD*7(Q?0znmK=*6SsR zbFKy(&(~6>>#_ZE19nLjsq1`R+-Xsri;H|dTopmGZ^v!^9`Cwz!kiLq#ow(B1=@}R z?G)4pkFX*5MvfvNau^{o!w8KZr$F=X!p}e;J5mAZkJa1+k?i0k<*`FmIERu_{=Lg) zQC>cy^4E)04*MfT5&!N5B{S?G%^{~`7MV(@XHdk?MP&s$SLKxGvT-tjYEo4)gxZop zG?oscxnuxMMZKuY>qbp>2WrwgQJvO>^5jkwC$u9!woQ|0i9|;X4Q^5b z9qC_>NS}IydrLY;6M{WOpc~*vd??X^pB|MCaBGA=McK!_8D1XE@bG9+l`&k1GjVcn z#Y5sGb*5cKo<*XY;Ygu&c5jCpah3Yhu3YDNoY+33NIUuTz%_6Pu0ca^4;X-#cP~76 zoVyz(-Ie0ZW8En09vV%W>W)a%L>?=6~Wi@A)1bTh236y3NR#TLhGB42U zCH*5zv7F1xdAzhMDA5|>MDnU5RjZg!@zByd<|TRwBFLu8^T;-NjpSJK^(!gP!U~b+ z4cyMwKt%cPFqLb;Gitt0~k5Yj$bXJXX#RK4cGEuSYFq z_oQ~UF$v6%(WF@-*ea^6N3&&lJ!RYw=k+{~V7hFe)yUQ)dVcmrIj4ax)pCPUHz|Qq zZJ=gm6ZG2`$~dV~OqPoPZlk~N2*oX3g~ zigZ~16GTy_r5Z>wB`3Z1Ez;{`VBs$)q-020z0!`4DDV1=%APN%qL5Yf=?s2B&9Icv z(-4^xv6JYi)SGT(EA^_4$x759M4~2SnbAt%8MUNM(?5(#9;V2^M8(s^45@8AMon!_ z$rkC>G0r#7{IukMs7Sb))sZ6q|5TtCMVw6pT0uEB5NS4DZ|8M%JpNN5%clHDq78wT z2(*;5(|*_A^_d92sZ z6{K=OpckrfP*N$ZM)X(eMN2eT)qrl*Cr{z$FO#ynz$eJecrtlQI;CILY5v~`G{sp# z0Z&ozJw&AW`b4Qk-TVm&Y~$)bszHNtjdHz`zpVyGmcCKdECTXh!aM7!n!v;*NsIJK zIXTOaahM0n1QzciadAY zJP~=8+*sj~gQRx!sT|qsE?QpnO>a%2Z~2emZtxf^!^Ysq@4P2F&wfc02&F_tranS+ z#w2319wCl0omeo3=s_kF2aV&&TpYyW=O@^9cIQRuJ{_=!Zvjw2}H5rUGZ5s=F7sEnOSX9t%89mq~@ z3_G}K{JR%Yj>{;}6_pg}s>di|n_n@Dg7P^Ol|N-C>jiRTusN3#OP?XB^f|K2pP;mA z4%O^r)mJ@2WA!66R86A3d=!o4BWNifLQ82snu~i;pWlVr+z!-ccA_@DTScNv6FX6u zprvwTMYSP=0-YAxf|QWf`JCw30Lh7NM3f9JBq0>(K(BfPde$qE4$=o3hH{&U;nfZ=uQrNzD__^51l-%JRZRrr zAJl`O@F9e-LlP<#PlCD-;LGEDDA9b4yH6)vWniaIH{88D)xwPKnQbokg zr5^4QA$O>Qr)@QytxDlyQ3faTQaqqQKe$swN?=J*zJI$EcWxEo?%fiY-!Frec{%LN zE8s{REURE|SqVFf3fNM_?QH8*&bF0(Gw#{9;I=~>Zp-(M!hD6_IpLyJD^6Rt;3V%c zrzqP0ShYS7BGCp*C3>+u%euBr1bPMcuUJQMCd<}^kRS?lu#Txr>+-CKr0AMTv@z)L zuPl>j^=3_$7a@7oeBFu-f~-@B*Yhr(!%jt`6)9J*1b&7O6q0jKWq z=4jT4^Y@!@!J-M5tebJ!wwb-hHr$XNCFS|H3{0dz-{yl@awhM1^s<#@jT0cr00{WGp*>QX!m_YMgJ#M z4M=_JZ>Sskjs{9y^Vn}{Fk&mkuZ;pIW#o*s4@vo0h)Vya1|F)xh&m$3L(c>e=07p? ze1YK?(sDb|s(<`FMyWlKY01ep%IX81cL@aJME z%LbjihR(+#(S*xFyFhu?1X}W>C5=M_`gh8+NVKGJP@)Y)rX{6Agqk!+jx^WnCcdj_ zzGae?8hw_+s^RabP*Z#BgA2Q*tlekiwtrMpd8f;yC=wa2l}ZhiUWr1NyhOM}q1oAz zPFFzooQ5ptK1Oiv6L!R&sa$C(br&Y3@5=ZZ8H8B*nj%hd)&~%30xhNO-XMk@wOGo! zRJTy&;V98+aurH+GCyk)8CEkG%ET%Xp(e&8E7A(aY=$W+B}1ah+Emo36zJ5JubM!! z6^Tx1(NZ@OdEWS@4~XN>8pEG8l0R!CuOXb$98y71rZfkYQ=Y z2^)oH)FeD&r{Iw=4Ugm*xTlcRX?U{3>m?G+j&3*wI;rRha>^d7|3nm2K0zMEIKAvC zQp%rEn4cq~;w94gJxG>PmFx)1c$pY}zY{6Z8D$i23UpJ=B-(2y&{i{!ma0)SR}3qG zZWe(q=|e+d59$lLP@mI-y6kRLr+1(#tpjDr?I?%Lkgk09?x1b8*T&$EF-tjV+& zh1OHeib%=z=~A;Ldi%(1i0x`(mr(yUg!{E3GN2uifgOkr=|X%|7ZPK7kR0Eyh>z|@ zR49)NYEhHN_y+JifxYn5XK3^Y;Ie-=yk&MpnPpLAn&R%^&i8lYwNR#go$BCiR|PMd zD!5x!z@CyU0}k!2Dit=?)v%RPcn+=q+mQn9Py=_q-px*;+A{dD z7M3=3Fte62dri1y)qtz_>u~9w(1>$pbd0<8IC-xb$Ju%P$4Vv88Y|WXK&`K*OjCjl zjMR>$Xb0y?kvjmI>v7 z4Usm`W^NRj{?{0iZOZy3RYlejqvo|BX&0+iBtTo9hue&(fx6+zO;B!_a#Q4uav5)| zKk*mHmgGVHQ0Trh7S$4 zWG^2g5`8&(vtC6OPVhs1=64!mA$%N;d@w$Y9)WYr7~J9~ zRDQHq@)Z2kwBs`frwm0E$V_Q6V;U)Yp^!-+A}K;rl_9a}6_Tr8A*GfyyhLi_Yh*OO zMP}1GWH-MaNcy?bfL1{enu$nhZ!(DImjIed#7WIxRCEYSC#K$fpae zq*th1X+0t>Jo$|xE&NGQ28CHLB>E49Cen)q+rWrm8v?D>rCz8cvLMmgbu2`nDaT7h zc`-bO7*nRpph=;JQv4%&pZqybH#-MikNG+UU&m|f;%klcjy7GOB_CSlL=zF|W?o+l zuXO>DYb`ph@}+r9Bijc4Uh5}*NA1|}s1}JH{T(vRci8~{Hk4>poul)kibCg5ptC5j z84VOzN~Jap^;^VHCZj0ClI|me2}25=Dr8wjr$w+!wRDdNepVtRcUng(Z$z>)rYdlV zOe>Mrw^ehQoiwQz9mV&Gp~OZ@li2hHNsV71(^V%leCF|=5L^F|=l=!aRhldZQ4#`5N!e@omb`)|h1rwe1Mh-o z@XCD(ZwiNdx>UB9gKfeL%qb@p6le4B3ET@F!`*-pHR;6-uVGwu>&Io6UR-dZ^g4D@ ze7kU&;w!n%2AAwQDV^Q8B%;jat6aXq9q>kz&b>zDns9F7dS&xL*_KE+Xgbq(t*v z+%H_>{o$H(AMUzI={w1{9#rM;%>9S(fC6k6JPMnTA%&FFl>bU;t)zMla=T0hC1Wav zV8Q!^Ie(UW{8{cu4y9{9u1Z-oryg}JO6YmbXCAcUv?WEE_pX!on{YzX)NVKO{?>>? zlE=(@or*ADmD>8%*m0>!rMhi9U#Zw~R;p7-IZ`Q2S^*iPEo`7PuRmT2sb(R_fB01T z9K~5!&HLR1`p zNQxmPdNU!_T92c*_&cUVOCp#F|_p`)-5 zpM;&{!N<+OE^!w2DYJN(HV4;?CveYv0?#b@PtjAv6+c5t=~GJbQ>2zss7sz8p5O0i zBKLe5Tpq{oMRdt)c8mpn|49VDBe9h4g7O)()=i_geiB`^<7lm>Kvxc^<_lqR)Nq}xys+lsvC7UV=VBRjGMS&_}i2ya4q zm_(_$j%9|D8A z5kLWspd=?mbRj*a8#%FED2VGsfm92L<9eJ_4(UX$40ufFL=KlTVx$CLE8_V65rHiT z4QN%fD+UJjATYEaej$DE;(Pe|cXE3d{QbJrbmiWjt?>11hOb8xd|fHg4{P8{5%#vL zfy;v$INR01iGu9xTnks1CU|*M!OElX>rZl zf`vsDZr!cG%{!I2a!TF1Y%0u) z;Qo~ogjM((vr`Pf2<)|rn`lNBmbtqne$uj)aO7FMbf ze3CzHpygB>tdV?bQd&BG4kt`Zi(J!D6gDphuJ?x~y)%IU2ht&b+byw2L=i{yu(UN8=}UIF{|m zz;g0Wb~t|8%?=1}&@y4p`h$7caI^>;WpwzN3jBPw8oT+>*nO!QyZ8{vcGK zqmBYCJwy5TTnjExo-bLqs+qS$ps(@GROqq7=r8HBV-8(7JT}q^H zQlf<$t~}nkQ{@`obmFp$9M`7?FWvL*hZP^DmVpC!z=x)N_^6f}9XqBZ+Bane0W#Pq zYYt(#PY|J}bjVc0YPw-*3m+q#Z3NpW-aMmuJ!4vMJYj6l0X#sv1aHS>gHuyDtvrHn?5j6t2a%#q@YTgg`{`%Q!-UV z`l*O8bTly*L6%6g2Il#ZK>sxwP3E-Lh)Qc>t)+AH%>03F6;;+SM0qJo#o1JpP3e0I zoj=jT>kvewMV@=OO-VFgucdYTH9xwQ*QN2>Jlej+RXt>Kmc~baP@+YkMV^0ER1V7& z-(OX!J1vdllQy|Z+edbUG>J}cc&}Dtn8dk~avX00{ntui#tof^sZ^`~p7tFW{B;0v@@~;hysx zE?LjuMA3Md@feQ$UN|I8!!e#B5;KJdQB#!WN!$(_$DQC2+@?6+@RhQ1gSf)Z?G=${ zimVJIzWA_*0!=8K7p01ZV=slc4_BS~lu%#cF*0CSxaQQ0Ydq!#_p7L~+9vx%SS9zF z;%qPH(sHP!Qpb6TXj7;!@?4jcNXr02iF)f1Y0rLD0q71z+RS%|NIRgU`o78awGwHjR0?Y( zjqadCb0xAXDHF!Mn(%p{1v(627SYX~m)?K!-u=(rne+Ta7D=<**LCC`|4-iU4gQ~Gotx(47P@;F7tHD10 z9uHq{#1X0QYu131)-CGa@#pwCxaQKw=Rm(!GdzHQSAHhV*;z6Vm6C>!U=ceFtAtrR zNMgq?^$8sKxpvfXmsBh%Wm-6kM3Pke1c@b2`Q3i0CQJ%0VnHBCAuHA1wBfnHJR=~wG1wl zh;#}`ZbwB@JIWHATWL}U zN)m)l6esdHigX?YIxD;dsq$Vyjfkhr#|H7-!JP=9poau?At+Eo1lrfH1K!^4@DU`X z!-Ep-E)uPkqb)C08-=okRHoxjFxhLQY*VrMcG3=>f!5D z0}s0@I7`I(emU;nE5)r_rMPjU99OPVps!G#uax7=)pDG>Rf$XVjH`DWaqdpN3?lTS z{A#RRFZdHfswu}5WJ&EXT1oLR(miC`@--B1k!V4eWmADRmFRia36euid1kBfs5eNS z^a9H%)xt_H|3!?~a~oN)j>nM|N~E=Yvb?~;Yh-yT#y$gM#>JJAR%9 z#2=AJsn#lbEMqH}md#tJLH$=u^Qn!TYJ=4~xXx{Bd1G0}n~Ld_<%T?)uFng!lI(@l zkiSEs*?&RmTT`r6nLY}&k@}&c%e+}h{4WQj8CQ3>8mm$SpL zf|9&!M;exGPgVT%bBaQimy>^P=Q>$Iu~v1z_hhP^y!A&4v4szapU+gO5#+lrRAM&; zT9C3#$FA4nI6u&*%$iiqQ<<*z@&iqvukhw51B@hR^4h~rKFB(iJYRRFD7#A3xm(He z9nWrsM4}b$g!}GFl3!fpu|>6bh|`;RrL2) z^gcGyIz)iAD72|SkGvFVHWg`Yo5A9J#%uL_X^O3pH?72)@~Y)ZOH0w!rC3-RT{abG zDcxr(%Kf}XQx@b|;4!@RUSf!JFOLz4Rw6C%JYBj_3!VHONZQAIst2#p$d@)`Tg#bV zD1Rp@9g@aTJ@Q>k;~4o36+i(uuIr7>+QKUa1rUz3!$ZyV?L0Bi(Vn9Q0hg$KmY~S zKkqqP!gZGY4maYO z$^Ge+X`I2=K@ueUe0pIs<2~XkJLYmdm-xPtYkiLIbLK%ePRaxy<{fJ0)x&pNaPYRwqSu6dHyW_# zS_5`dn0H-~2(y&*)FztxnbIc{Pigg!E3o-Yxnk2PEyBFTf)NYi>ih;?CA*2sZ;pRXM{u!4tSGwZS8%VG%t8%h(xM$BRfmhE4Kg z*rh&&qg0H^CBK(``Rr`wy@C(%BC;&`{~_ETPvI`8 zm_c>bBh;3Uqpo}mjb$S`W2i43LtTkRZP6$hKs6=0x^NJ+g+r(*7*HbJkk^gIoK7@m zb)Ye`6Ah9poz{jLigj(edBe z)WXlH67II;aIz|gtywYd-_ZvcUMs=1E2Wg^3Y@=8iKa}Szg3Q__iAwcu9nvEkCkhE zv0{x6q+KNvy+ou}tfffnviz?ETD9v%mNk~~b;9y>+$ZTF^ZN|B)+BoQS}l*-7>sBj zdDX(o^%AAlgn0oidaX&gzDegbWn(a3=CPxa|kPVPLb!ID9%4gl$l6$ zc?Ex-6+fqAnFura=hmcoe%h9-lZ53`&QR6-PQ|KS=~%Tl6YCGsKdTGbm zGVHljjeUIB9J)q%zFDsZ5T2$$OZ54QHO1M6;%qCqgp_HAc0Ncu6q25CgYv8*&u(3m zhM7kX?t97hK~?5X{yl3MG(vfnSrNzJAQd{I z#^4$^29JaZ_#{740v(h&ry|jzLbgoAGD~ruMsVg7w@U>N3N~-L0o?B=In;cOPue73 zJBfhwNnNsM5Ro^B7#WnPCTe-EBs#NR1o{`1LtWJI5hY!pQP%Sr<$a$~)h`u3zMyvK zi&iR+A}!TDS|)!(t3L3sWBR+6BR#7PG!)4-1lou&izJV}`rkA1Qj=-ZR1ibRwFtGy zv?kYm0)8EIinKo6w}`Y%1+Pu_t<74POQFt^ z0fsHVAieQD(kRm+)F}&k%`L0xjZP{z^@#A``F#7K$v(#2(M! zL)rDtdSZfBQ(Bh2vPsrcco<|phI_^w+|uXZO7V3~r2r?*z&`#FY!jwo6F*Jiol@9P zf(>jb4z_W#u!-gVm?>COh^?XxxE?hH%g8BMNW_lqePR|NJc4-yr8#^O_d~~F7BY@| zl;=Bv6RKp~4c`%5_Zh}jFHNAYQes4)C57WU#pb#b*D1@FDAAW)Da{m}t0FtDgShNM z!KUDfA>j+?P?ZOX=PmoAf91+mSSz> zOkd#Zg^Lo&b~500u1h+IL@@;^vw525J^7#m$F16N+@c*v@3-U7y*3=W+p5G_>fNgW z(n^?XvE!14%+M-P=B?){6^qg`PL*QA$r8o-6Phe92(w6|AOb1kEN!tGgR)AOX490q zMbf-aqP!xiBB&B+7J((ZD7GTEDpD+xYd`^}B>&f6BsqgpOn9#sWZQr8+4UbkXDDRZ zl>ZQTj35&IA3BCm8`v54%125o<+csQNo^>IZ$TcVHYc_n*)i?N zpv=l_hsh)_%94 zH-dt@;1}2q|9}nz1$H7dNYX|+5gpQw_^>vlMs*-FwhMWQJt#^sW^(L6p~!o3D=L$l zQ5M&P+=xab1=J(Ps}7N#weWYUpg@C82> z$h0XU*G6kZrhmlJTRHUTp7dH@c=LvRQg)ER(XkZkW~I|TdS5!g$_na4=Jqh06- zrFk4KG2?KHlN{&?cqQvKqD7puwHXT|^PV9p|2ZP_pK$pxB68*st|HG`^f^SJTnA;& z@^xCITEsbw;v7}*1hGYuU-uLVl;`9MJpx_(8rco+DA4ba-|`+sQq`kNO5{ZrbJhySBo_NIWNXzf5Yf29`jm9 zmbpz{XNa_sL%opZu@IFu#Mlt!r7VcECdLDl^8Xng&-WAhD9|!fo|sS(2!^~(2z(vNA2ius2TYc)oKPqqe_RAz%xqU z>FFJ++}&s7w0}f)+b>$4w5lKd0qIS@sQS?|2}`o%BiFn}Y_$v;q(oP06+C1xVMqz( znH{PCel}#*LO*`aWTrlmXdgbT<)JMTvv^UGJw=qIG#o|QIc*LPQ)XeGI0ZYA<=98C zk*IJKg*RNpb`)mp6qzlThDZjv$K|_W8fM&g zFLVNTLnd%1XbiUl##EJyn|`D70)5SU2-iI|i56s%lUwdX?Cf&8+aRvEYm=T_;r8pE zax6uM%d$=CIA3z5OiP|I5s|)3`IZuON}wsalD2V9Npla*TTyr|I&g*(b(*4k+KM8r z+IF0@Y{N;mr_?qc^FYs2);S}2%M@|p6h-^wgEpL?fD6Z@)aLzG3Udn%N~D<_xYL5Y zH!026CEBcs^R6qBmQkYyN^d(S$TZcIJ&7=%E>oDMW~@72gtf;CDTi$5qs*Exs)`hb zFzcv6+mfcCi}T9;e-&sofR7k*s>!O6TCorzR)VW!mojXii?YFg{G4urCd|_Q`?hq2 zA<_o_F7S1NocrH(453~s(CRvMei(f|yjYwi>MS!B%7iOwmfTAX*nUN(|8Bqm3iMI4 zcAT*6=HIyor|tT5fga)KXcQMcMsUSv6j$Z5DIGlN=#g9O^xY0+$1m&=?y-}2pCWA) zJ*Otzv6dX^xM?_1svjoIs%hX|Q)R&6EMX@#?HRv=6zPm7@Rz*ljK_%1eS(C-CrB%n za)#3=qC}UMPoug*Ce)cidBr5kD9;sT6R4zASCu_Nb@>#Lb`sT;?RvJ&mE&lu7(r*{ zFuE%H(N*4yjxti(gZ7ec3UxPH3pJT;%9k{YPE=8%%hEb1o1Mr{>Of9HJF?=tkQvv7 z^f-z#Nsgm9$M+#QVE`$KgGf&7M{06E(o*}8nck1QtU=^u_92hLnwQ#*+~iK4L(bEI zEXr{@Wjrma6`9fPifpM_t>;5$$H+{IZAhoACq}d(HoODTkzI(4>`~$z5hZOe1-ci( zVciI%Fb9QpalHpo+#eUMNqq*zJ3o=|`iqh~P@2+7X{N9%+EA9*g0lE#ltfdYLn+X{ z^@wt>LYP~XCeU`JcxYJydy8T`FfW1iePUJu^LwSF0_OLtVPRDZE8BWCo1)aecJ$~F zf%e9#bqlOqt3{&?R#A=(R{jMF_iBnZk!4eXHjPdjEJUs~R!S6FUsfooHc~uHSuD@S zYqet+_Wc!+Y(to3c_|ArEf^xbVqFB5t&PwmTB6m4Nb4+LFV#Q(I!IBmJ{HTQB8aLA zBBl1UGI`5%WLf1^r+%2Wt9cXr zTTEW_7YP@k<_%S%)T_B{l;)GceuA;C;PESY4q=6=M3PSVT}ahXQD>3oZOQm2W%-|5 z;wjE?_)qd5!WOcX+lUDCGN}W;JsHa=(93tFV$J?6$W*Z#j})nCX1AX%#m@5;Y8u)@ zd=QC1pQJ#aF>6re>m>So%|ROkc-65@Ro1xW+J!qrMp-|QDR2Emock%v18@!-fNR7M z+#-kJ86zX?g%Nnhk@zvVb6rN$yGx2j^aPw`WPR)yyb>l5kTQjU)M*5!i}(^sZ%E!8 z!en9`BMbHOvj6cT9*o+_d#KSE@&@Kjanh?Zl7q9J_qU-yoO5 zEYh6a@D@3ZS{`&E#jCjOJxV&>quj`ara)Iyplb&{DS>Vr`h+G5bo1CZHLy?(;Cu8P zJ=4FcL4R8MhLp7XO$o0i&LYKsVC>DG7^fWn9b>{9?Kr^@=>>^41|rJ&P0HpGi71;2 z^n&OL6ki^%$uNbRGXB3u6Xc~65y@25qjyG$gyeU8#>BGDqyZBu_Li8dn9 zTIG)Kl<417b&mPccREr_+9-QBU-FKW_I*Q9k0#RvUD`w}BGj3!TIstqsoWu`84cee zvF9^Xh;qqyeu3c9X9y~Jjv#)1v}p8m_~$-{pNMppM84+Wkv_`@IUnj2 zXURd92=fDp97l=lQhLKDR7Chr;HW}!ku|jCTmIVeoj}Sm_nT3k6?}~ug;=J&R!FX> zKvBLQPPtI>{0MhKr*J2Pt;k0ZB_v>6MV)W@QK0?!zT_4~<)+`Tn(^zl&Kr?lL}=8pydDaUaMf!FmnqQ~-TQIDtq&K8M546_ai5xeNg~BkaYNEFlnh&U z;jCpR&RU2>6HAKl{Wi!1PDd%fBEUz@TXD>+6(`84`;xD$@8kMWndOcGCLH1N;d`wr zE#nYH`GBNm+>$7>)E{18H^pto%!{*0s>NRgno?}Szi&&!zY%FQ z((!LwxlP9qX@h?gDBFTa^uOsCLcLU?jeKZRdDg0M@b_?pzaLc_x|E%cauvDXc23e@ z>agRYKIQkdX6(7ug8lsaNT=o)|Nh5qdU1mCe9Ey8=bR|et|HK5IPX4;OFooncI?#W zEnq@P^tHfoTw|y420M$AyMH%=pLOYcMza$dJ;m=8zh5%_d;DYACp>`@zjLlB?7*fx zWheF-+$Ac_&aFHf;z$yo1Eswy6#zG@OJHRI^08ADgi2zqKq&|5o*-kJgQR1qc9edsDB#l2`R>{00z z4Y^&Y%IZQ{x=a(^jlAS8WG7Ob6MB#q--{HIL~%}_IL9XpA}*f94RU!1aa@m0=tm6Y zIy$-=(NSH9j_5#CcspXEIw`j#q7&g^Z3qi(MMMY*Zf7g`)9n=HP7MliWKbKz16vUq z*p7gJHu(6p!pFZ2et{hn<}L(>_8=&@2LU87xSJx~i|~kEL`8KYA+{G8iG7szZWK|P zi*#u&P3b^+N;}HAT+Z#qiESv3Ye8XTBeFvpknCHl1Ul5U5&<#>(X-!d=I zTwcD$-vklqh4mF8+$x$avP_}o{*@xlYeckVU6*Vn$3c|&V1*&d22xtjRJd126nha1 z0xjtw{}LXrSh9WD+E5~CBqGt8P^*EC3PY&1!HY{-zAi?wjQbZPT4SLGwU$G@AkvaI zous9A=sDC%q?JUcV3mk8S#2WJiv(JPnm3z;+SEpywP?G6l-n~{TGLvh*%}g=7Kzpc zn#&@|^T>X6jE0)(T#!6x5olhQsX$8vT1(wX#((RP=KtCpkN>1FD|z0=mi+fN$}{&X zfu=k!+ev}morbj(=#7W+)U>i&PZU$4i?Q=ug^EHSx>AEvH|ufwZavPKH{r5%t2Wb= z%5m8_2qi%o zj}Vyl2tjF-V~R*n#+-^S2W3A&MD`q_a~>licMcJGvuaSF8XU+QVq_tum@*t)^qfNc z9I?gE5l0eAUm}sjmA*hM1v$3-IpQhFi4`x9Qt=XLGJ|3DD`eHbRid0r@|)hHNYXS& zF(s?0|l3OSDLTWGIS5)F({q=#Srt_d_56p5zL4oX^w zNU=nV=S5Z|*GSV4H2IZdgh7h2f>N%5e*s^|_nQ}Lbq*~;ZHV-b=+&dpk{>M+ty+mj z&rzsn$;_XW=s(nCHmy@y1&>z#K3h~R=|5D3jz)G)8rVrOrG8w}J$^^+s6IWoO81a7 z52N(mS2ZIdm(| zJ96Eg(rnv>(-i1aR%|KGXUJLePMp5qfuodPnY`(+iX3Z_CFvFiDZ__lM!VaTuUk#n zM-EWL4ED>^(btXWvg9&r?7B>8qqvE{Nh>n9?R*Wko|l>Ps<7#d3~tkA(%X2t3L8#V zsEMki*1j?El+@riX5~{QLUlx(4S8NE1Gi1&Sy;ZGlB~v~$iP5Nm`%l5$+KPw<1f-o zG+8Fwxh!y5NiLr~ru@6eug?F)XOv>Ah9=P5zD48Tw(|Al-?s@|{}%$SBGJ5#zX-Ja zEb}QF#d;lRiO%zP$KT;5$rYDUewQfC^83Hqh}{(D-Pc>N`<6`a-HwCi?KmuHHT)bL zv+u!i%JV7bew=d|qa8=u4g>xXMo3HGcMQ@w0e0V3MEfN4OnI!Y3)tlApUDCp?8y>SGmkc29o_&&+4=%93#|Pt>zO%HE|EQKE~VAc1W> zJHd%;GuSCkD}Ic4K11S*pCP&UDN;)wBdhc=@c-GpKY~8e z-!P2+xPbCbG|mC&Wq zGo;L1BIP+Qu8)%3k7$Z>cuYUSBKr|eLMgC;VciG_?V{v%!#}W-(%TLX%CDDy2i$$z z6mGuFY@6Zf*QW52*0%+oK27lQX;yf8X}A+tuNJs?x53r718#op@C@vNPjDZ60%dyj zKKO_9BP4>?BG(bogSgl}WTy5*a1tWrIFgHBw7YgmL_%}Us6*dn~@#Xi1ffZ zBzjjP#p%37W;a5I5)!2s}*6v>>z~qu#?b(NEQ7a zpui9RD}nZ=6dM>Kt)Yb3M4SzgmTjhJ+qj>s;xSqzJCIPGDb*`QqF3v1S=*!PZzR#)~l-=*ZWp_4$3KQS7BiYKWFAy)>sJq&Wjh3_yCDBWx(mJcQE}789gn6MBnf?*R{=Y%eL>401>6B=RW@{-TN|1L^ zo_|OVSuD^ihzt}okjfg%cuqs0mv3jsfc$e?5(PRLKPi&9o`C<}PO09(<)0H(4)jVM zCwb7TD94{YP?}4htCB_0B`;O^x>zO3FAzsr zmZ)-k=~E<>y;KvdBv6>OL4hxkUh@(;^{=rbCIKcjj03tGoSm<_(7^U*hSO@F62YxSTd-+7R7I80F;p5+q@z+|4$1oWZ;hh)R1)*KYk=7#8#`@o3M8}8! zM}htq$UsM(r9v$d-}~gZ`G~j(b&rTNI~qn+5LIGN)vBggQ=mJh|5UVco0jT9p%$rD z{HY;n9}|C|X+qLIepiDJYfVx;Br2^^Jfw!Sn$hqx@+HlKTzeKMo(APzv zJq9VyeK_qxiFO{qxrY=YdoDZl;;cQf?WQ<);nag}9J8Vw+c z6pNtb{vD=~Wz$IWkNCf~P^yWsu3jVP0Egc+ofcBfU;Q;0NkR|`#l;;!n-8dy3E7t)nWy5O> zS3UU|_2Fk)K3n{Z-3g&AhfTvQd`6`tSw!(O86zD~N!OTxecUYUweza4dn+v*8m(z`kj2@Jv_o5)B2U&^TNKfcik!MNMh^L%JM0F!9 zst=*jg9wPAz=rm~FSwgR+euOGgu5R_*QWz6UTtvjXoamu3vAt6U`K3S*t)jD-i`a* zTH)x{LYfs0u1)jH4|%MkOCwvZ^Bnf>ZLs(3fW3D&?Dbh89fAhn6g&vG&|!Fm4#6+1 zA3@>$h)ozka%vAU)BBLiwlK9zMWXYQ+L0IEhQio(6vVV6Q%d(q9qix+#QWDFRwB^e zHHh%8RRZnlSP3`BD!4n>!riqFK0d7o^lwFEXeSb52aps$h}48Zq$CU>DRBfzNu!8P zoWws?Q`bG;r(^AI ztqRBLeKLqJ51Wq`V%y15>^M`7-4`mc?@A30U9ZEbyA8M?k!RaB+;i=LrKD*14ZtB} z7%mYbDjg#zc>>|-Q;5!sQmP{;)smVa(dcmAEJF+C5Xl>7 zbm3FPNd&r*=c|5=)SB0-nnq63TjV#tLm|bvsDt9%`GLaxNzG7LL%FIW^%O0s`)peK zS>-m5N_xguw2ps8hc3+BlHWWnlc;=0pDI^pl&YhQ8sbce9HWemlZjW9W)WxdBT=5B zB>xDJXHB3rnO+d+KPk|9BwD0bqR`KmNHVWOW<6Y(*oD_;AlGSnEHM@6ArbSx;fLaE zut=a6OT9$BRo=9;dRm88{X+&d3Q`$FO6!SC%km%So~2+*bXp&9s9{v{&?;8(|uCK^$|B6hR6;LKU zY5Ii3`VVTVY$3MlJ)$e$LZV16QE6THhi9k!l_>M@p z4 z^bFym8>QK00B0!AXPt*|_TeB-+YjJ`T`!K?^x%YuvqdM4ns?w3Md~0qq^DdQx-GMu zQj#gq`))N-oEz1IO(MqzBFMY1YLYC2{Vf$#drC z@j>hyY$?n336CkvPn0k_QFL zv2!ak5GF}grSexuEPsu762`8fmZS^q*x|i(gHCld&LuPU>viY-Q#djh-Mg|qOAU&cL zsbMWh32i}KV56FiG1^Ziim5}Cj|g-fLOkmb;86>IuSWQKHzGj9nVkknTS-YCMshNR zIe8Q*X=6xEn?!8t6oQiub&3z`US}hW-p`y|z zQEZ7wFRaV5i1Y#yr8dwcT8~Imc2xwMNLq*tU{vrm%T@>cOBMuMgqhoT4h>oVt3;c! zSg0jEMDnIZq^(E4>s|-9wjXHP}!jT2eg7vh@p5X+xYBt^Y`%jrA>y_nBhkRhzPC zzp#zVN@$meG$q)WQBlpRXd=r$5@?yiTqIghDII+MN=mdL zYbAZ+H8QwOklbTM-CG|28tFAMfbS(Th%Bd6zT^#1BFwz`$pjcuwkYQ@{Ih1^M|`tp z2wO=9QG*JfA%-`~c#&|vUg|rmfq-rAQA}|z>!LXKh&X?yIDbanz*jU3eM6H#$&%Rz zHNK%?^s5r*mWeND)5W=G`a60l&Al_fqMxES_~bW=>hBm<@+$erx;Rrb$H@eld`)z5 zo_hCp{BH$XlW0w#|CrB*)&$yQ)6t=$QP0B{DPbgigeo-q|1zg zjUSQH_yI|j==l_nN^Rhw%9k#B&WG^}1WAr`j!b4ERXHBRo!=3c@TK>H);-Mq!!6M4qfI%ZaT7=m=Lh_>x zRqmiLTT-AcV+mW?CRob9$0MhC?6fK&Ch~ko25<#U^1UQ?nzHUaj4N(~xJY?E@6?Yo z4t+S~(2tY$eKB#+d9M&sgXw@)Uw8--H231OK@6876R38I8yOUsFbn&nIXI>~#luu7+xQ%= z8P6%o&*72v0-ia-OMaK0!8eEZ{=Ao}G@W1G3wRT$^Bh_97O|!85mWkB335#7E5w(* zMjWL%x{T{Y>cLB0_?XIf?EJn*SlN4o@-?BQ?+{WZ$Q1Z*5L59M@gz>fyW$n{YhIwa z=_&f!W--)0h0)F_Om<9QqGbeQ%~~{ifCAl1f$pY2w-@)LF|P+znLQ|xfq#i2os{P| zDN9Fjj1_tAMX01`gml3tup1u!-Ej5mg0pug9(s1b!Mz=Ju5EbW+(z+jg@r>i%*b8J z@*TS-+_ohT8gbj2SP2bWHmINH4%hGSxLZ8_reNQKTaInG<|xR!dacK;&89zMN9NjOSQJ30JK4(-E2W$4{$-h4sZkEtNwIxUMiHItV{; z*_34z_<592i$pJGVPK-EOiM&sLk1-NM2Y@seY7spk|V7RI#j5r^j{;>E0jb_I!DqE zS-vHau$6&`I+}1(m<1DgmON`ykzOYIDbNOsL|WvRH!jmjUls-+{#c+5iB{4ruhRv3 z4PU>OVyvRd3JNlhUArd>Yxi=UtlXQ96??e6FB@z2
{C`iN|B7jZx^q?}`p2|UtlQ^6 z{#47Rw5kZTM59Hbe^)e3{-$VT2SyX~3Z-ZxraS6zDG$=r1UvI18n{M)|rgs&t(i zJSY;)j#75(FUV|>@^yOE4U$BWPN;iJQIyE)D@0JDCHfpHHHu~MTc(zd;E_5FcXrMM zxAbXvWX>vq_NT0gKnE7SL@)(9oB|zDM#-dXO7vL-I<{65Q$wJ&^bIZgYzVX=!&xHH zCM=|5@L2vlhA>N&4k>eIx~%0>e?zK5>KZRZowZVJBJLVSBwVGGup^OF_fCiFyk5!O zmOO5`{z!>7vr`a8aSqCVfq>i>@J@dM_v9Hkv-9e}@1JGpn5r9pU(z!8{k`GdgR5>m zxb8|}bn3A%TCl?Bb^=tayTwidmFa&Z4q< z4z-lvCdzUv#ksY90u6OjsH>eqeeEMO)J{{9r%_uwgX-E@RMpL)rv4eKNoCDblvO@P zQTZJ5OJ|WIb)NHQm7FE<{t}b=2!V+c@QxXU8$11uL4&aM*Cs@{$Dd!s@TznctlMzb zq7|piG;uzBqX7r5)nVV2I#rfy&!rmdI9Gw~XDQ8R%CPOUw#~p>qrBP9J$RiF|i z=pxS}J@s(IDXyQX)=~~B(_1bzVB6J3{49gjDbo9Ix8l%UnFy={XD#^|;OF2vpARy5 z%w6YB{_eVP-@OYKQa70*?daPJSN}eE1`ohHlp-B9gdhrZSp29e6Bv?6QYH~7@H-ov zK7*L7X(Z>)s1%z5%CSgsLGdJViYAa-^a#0yk0{L)YYMcGTRe+A3UnR?x{$|~l}@3u zWD+%{lc*{hM@8W%DoJ_a04fUlP*d24+WcPB=JudEyBk%RU8qd$LPc^HN<^gN+L068 zimb>MWJa`5qMKDTIxeUQF#(N;3ZOvy)+5BL4#6IE2yt&jAPMkjgfB(f(}mLPQVS2t zv&giUQ$0MLnkdvwaC2=|0`213hKDZAct{a;;5J+Cv!hJgQ;r=d#g5Lhza36ebD9!u zFCzW02@Y=Uu;Xj(-8u>e>z$r)E{U&XvczDH=6+9c~Y62_@RT ziV|H8d#f@ySd_!TtQ>ZCOJQ@T2v#=>V17Lhx3A{n=9OIBxRi(M7jtp>LLM%i&&3tC zS1;yMuJdq_+b>?u!+9Qaj!1j{QUR{rs=$qVHMniwfLj&~xM@Rq<~`*WJJ+}6e&8{L z>+EFTU?=;I4?FH&qxi=P%JV9bXB{QbCWtWq9i|e!g3`Q<>%wx8YLVy-BGp=o#|jZ? zwXBKs@{N+}5vR#Cg;otZ)DhX0DjsT`%kya;BHFT#uhCjT7v@cT-NtyH^T#aPC@Cci z%UW8ANVfsyoDwapQc}%zk!Z^FDoXT9L!x(PVdV}<^I-3qtfow_+0CB#9vR%o8-bvg z%3Hf%>Q#$O^Cl#FS)Pw%>msaLU7AfXqR@s!3+st`Ezc(cEvR$o5otAv3?=&S zP-Xgz!H~RZnWCzI7>HcU&5P~&6O>=F;Ur~QkY)WCkz-Z%&lGu|!@Ra5MObsBgp?}Q z9_7vQSQ)nRX1M)gHTFv_(z`75 z3zbuys0J~LNWVdLlgyqd`Ou$G)n*_r~!*Q zCflWS-co@!mFT4cZJO#~)Z6~wAky4LITn5-&8D;wk!MYywRHm{65Yb@HUNlm5#{fkO#Nst_8U7VvTr4*c`K)ggi;S2tZ&)~_UMoDv#tY-}35L;(U|Rc*ThwYDrU2;>_>4Z99=xo2~8~k#sPT=nJ|;3$iS7ErXV& z1Ad7Tec6d3?b1gfAHa35Vchf?#clsln1xJGq9{@l&EP+GG(e{PSto$ z&!dE8KSF%g6k^h+5S~1Vkc0^}8H#7DF#IEyr!inAfj8U=ig6i}G8 zNb$d7X_R^Ke$&_IV-?S5%4)vH0(>?sc{UmUO;!Iz<#WYILtHD*7(Q?0znmK=*6SsR zbFKy(&(~6>>#_ZE19nLjsq1`R+-Xsri;H|dTopmGZ^v!^9`Cwz!kiLq#ow(B1=@}R z?G)4pkFX*5MvfvNau^{o!w8KZr$F=X!p}e;J5mAZkJa1+k?i0k<*`FmIERu_{=Lg) zQC>cy^4E)04*MfT5&!N5B{S?G%^{~`7MV(@XHdk?MP&s$SLKxGvT-tjYEo4)gxZop zG?oscxnuxMMZKuY>qbp>2WrwgQJvO>^5jkwC$u9!woQ|0i9|;X4Q^5b z9qC_>NS}IydrLY;6M{WOpc~*vd??X^pB|MCaBGA=McK!_8D1XE@bG9+l`&k1GjVcn z#Y5sGb*5cKo<*XY;Ygu&c5jCpah3Yhu3YDNoY+33NIUuTz%_6Pu0ca^4;X-#cP~76 zoVyz(-Ie0ZW8En09vV%W>W)a%L>?=6~Wi@A)1bTh236y3NR#TLhGB42U zCH*5zv7F1xdAzhMDA5|>MDnU5RjZg!@zByd<|TRwBFLu8^T;-NjpSJK^(!gP!U~b+ z4cyMwKt%cPFqLb;Gitt0~k5Yj$bXJXX#RK4cGEuSYFq z_oQ~UF$v6%(WF@-*ea^6N3&&lJ!RYw=k+{~V7hFe)yUQ)dVcmrIj4ax)pCPUHz|Qq zZJ=gm6ZG2`$~dV~OqPoPZlk~N2*oX3g~ zigZ~16GTy_r5Z>wB`3Z1Ez;{`VBs$)q-020z0!`4DDV1=%APN%qL5Yf=?s2B&9Icv z(-4^xv6JYi)SGT(EA^_4$x759M4~2SnbAt%8MUNM(?5(#9;V2^M8(s^45@8AMon!_ z$rkC>G0r#7{IukMs7Sb))sZ6q|5TtCMVw6pT0uEB5NS4DZ|8M%JpNN5%clHDq78wT z2(*;5(|*_A^_d92sZ z6{K=OpckrfP*N$ZM)X(eMN2eT)qrl*Cr{z$FO#ynz$eJecrtlQI;CILY5v~`G{sp# z0Z&ozJw&AW`b4Qk-TVm&Y~$)bszHNtjdHz`zpVyGmcCKdECTXh!aM7!n!v;*NsIJK zIXTOaahM0n1QzciadAY zJP~=8+*sj~gQRx!sT|qsE?QpnO>a%2Z~2emZtxf^!^Ysq@4P2F&wfc02&F_tranS+ z#w2319wCl0omeo3=s_kF2aV&&TpYyW=O@^9cIQRuJ{_=!Zvjw2}H5rUGZ5s=F7sEnOSX9t%89mq~@ z3_G}K{JR%Yj>{;}6_pg}s>di|n_n@Dg7P^Ol|N-C>jiRTusN3#OP?XB^f|K2pP;mA z4%O^r)mJ@2WA!66R86A3d=!o4BWNifLQ82snu~i;pWlVr+z!-ccA_@DTScNv6FX6u zprvwTMYSP=0-YAxf|QWf`JCw30Lh7NM3f9JBq0>(K(BfPde$qE4$=o3hH{&U;nfZ=uQrNzD__^51l-%JRZRrr zAJl`O@F9e-LlP<#PlCD-;LGEDDA9b4yH6)vWniaIH{88D)xwPKnQbokg zr5^4QA$O>Qr)@QytxDlyQ3faTQaqqQKe$swN?=J*zJI$EcWxEo?%fiY-!Frec{%LN zE8s{REURE|SqVFf3fNM_?QH8*&bF0(Gw#{9;I=~>Zp-(M!hD6_IpLyJD^6Rt;3V%c zrzqP0ShYS7BGCp*C3>+u%euBr1bPMcuUJQMCd<}^kRS?lu#Txr>+-CKr0AMTv@z)L zuPl>j^=3_$7a@7oeBFu-f~-@B*Yhr(!%jt`6)9J*1b&7O6q0jKWq z=4jT4^Y@!@!J-M5tebJ!wwb-hHr$XNCFS|H3{0dz-{yl@awhM1^s<#@jT0cr00{WGp*>QX!m_YMgJ#M z4M=_JZ>Sskjs{9y^Vn}{Fk&mkuZ;pIW#o*s4@vo0h)Vya1|F)xh&m$3L(c>e=07p? ze1YK?(sDb|s(<`FMyWlKY01ep%IX81cL@aJME z%LbjihR(+#(S*xFyFhu?1X}W>C5=M_`gh8+NVKGJP@)Y)rX{6Agqk!+jx^WnCcdj_ zzGae?8hw_+s^RabP*Z#BgA2Q*tlekiwtrMpd8f;yC=wa2l}ZhiUWr1NyhOM}q1oAz zPFFzooQ5ptK1Oiv6L!R&sa$C(br&Y3@5=ZZ8H8B*nj%hd)&~%30xhNO-XMk@wOGo! zRJTy&;V98+aurH+GCyk)8CEkG%ET%Xp(e&8E7A(aY=$W+B}1ah+Emo36zJ5JubM!! z6^Tx1(NZ@OdEWS@4~XN>8pEG8l0R!CuOXb$98y71rZfkYQ=Y z2^)oH)FeD&r{Iw=4Ugm*xTlcRX?U{3>m?G+j&3*wI;rRha>^d7|3nm2K0zMEIKAvC zQp%rEn4cq~;w94gJxG>PmFx)1c$pY}zY{6Z8D$i23UpJ=B-(2y&{i{!ma0)SR}3qG zZWe(q=|e+d59$lLP@mI-y6kRLr+1(#tpjDr?I?%Lkgk09?x1b8*T&$EF-tjV+& zh1OHeib%=z=~A;Ldi%(1i0x`(mr(yUg!{E3GN2uifgOkr=|X%|7ZPK7kR0Eyh>z|@ zR49)NYEhHN_y+JifxYn5XK3^Y;Ie-=yk&MpnPpLAn&R%^&i8lYwNR#go$BCiR|PMd zD!5x!z@CyU0}k!2Dit=?)v%RPcn+=q+mQn9Py=_q-px*;+A{dD z7M3=3Fte62dri1y)qtz_>u~9w(1>$pbd0<8IC-xb$Ju%P$4Vv88Y|WXK&`K*OjCjl zjMR>$Xb0y?kvjmI>v7 z4Usm`W^NRj{?{0iZOZy3RYlejqvo|BX&0+iBtTo9hue&(fx6+zO;B!_a#Q4uav5)| zKk*mHmgGVHQ0Trh7S$4 zWG^2g5`8&(vtC6OPVhs1=64!mA$%N;d@w$Y9)WYr7~J9~ zRDQHq@)Z2kwBs`frwm0E$V_Q6V;U)Yp^!-+A}K;rl_9a}6_Tr8A*GfyyhLi_Yh*OO zMP}1GWH-MaNcy?bfL1{enu$nhZ!(DImjIed#7WIxRCEYSC#K$fpae zq*th1X+0t>Jo$|xE&NGQ28CHLB>E49Cen)q+rWrm8v?D>rCz8cvLMmgbu2`nDaT7h zc`-bO7*nRpph=;JQv4%&pZqybH#-MikNG+UU&m|f;%klcjy7GOB_CSlL=zF|W?o+l zuXO>DYb`ph@}+r9Bijc4Uh5}*NA1|}s1}JH{T(vRci8~{Hk4>poul)kibCg5ptC5j z84VOzN~Jap^;^VHCZj0ClI|me2}25=Dr8wjr$w+!wRDdNepVtRcUng(Z$z>)rYdlV zOe>Mrw^ehQoiwQz9mV&Gp~OZ@li2hHNsV71(^V%leCF|=5L^F|=l=!aRhldZQ4#`5N!e@omb`)|h1rwe1Mh-o z@XCD(ZwiNdx>UB9gKfeL%qb@p6le4B3ET@F!`*-pHR;6-uVGwu>&Io6UR-dZ^g4D@ ze7kU&;w!n%2AAwQDV^Q8B%;jat6aXq9q>kz&b>zDns9F7dS&xL*_KE+Xgbq(t*v z+%H_>{o$H(AMUzI={w1{9#rM;%>9S(fC6k6JPMnTA%&FFl>bU;t)zMla=T0hC1Wav zV8Q!^Ie(UW{8{cu4y9{9u1Z-oryg}JO6YmbXCAcUv?WEE_pX!on{YzX)NVKO{?>>? zlE=(@or*ADmD>8%*m0>!rMhi9U#Zw~R;p7-IZ`Q2S^*iPEo`7PuRmT2sb(R_fB01T z9K~5!&HLR1`p zNQxmPdNU!_T92c*_&cUVOCp#F|_p`)-5 zpM;&{!N<+OE^!w2DYJN(HV4;?CveYv0?#b@PtjAv6+c5t=~GJbQ>2zss7sz8p5O0i zBKLe5Tpq{oMRdt)c8mpn|49VDBe9h4g7O)()=i_geiB`^<7lm>Kvxc^<_lqR)Nq}xys+lsvC7UV=VBRjGMS&_}i2ya4q zm_(_$j%9|D8A z5kLWspd=?mbRj*a8#%FED2VGsfm92L<9eJ_4(UX$40ufFL=KlTVx$CLE8_V65rHiT z4QN%fD+UJjATYEaej$DE;(Pe|cXE3d{QbJrbmiWjt?>11hOb8xd|fHg4{P8{5%#vL zfy;v$INR01iGu9xTnks1CU|*M!OElX>rZl zf`vsDZr!cG%{!I2a!TF1Y%0u) z;Qo~ogjM((vr`Pf2<)|rn`lNBmbtqne$uj)aO7FMbf ze3CzHpygB>tdV?bQd&BG4kt`Zi(J!D6gDphuJ?x~y)%IU2ht&b+byw2L=i{yu(UN8=}UIF{|m zz;g0Wb~t|8%?=1}&@y4p`h$7caI^>;WpwzN3jBPw8oT+>*nO!QyZ8{vcGK zqmBYCJwy5TTnjExo-bLqs+qS$ps(@GROqq7=r8HBV-8(7JT}q^H zQlf<$t~}nkQ{@`obmFp$9M`7?FWvL*hZP^DmVpC!z=x)N_^6f}9XqBZ+Bane0W#Pq zYYt(#PY|J}bjVc0YPw-*3m+q#Z3NpW-aMmuJ!4vMJYj6l0X#sv1aHS>gHuyDtvrHn?5j6t2a%#q@YTgg`{`%Q!-UV z`l*O8bTly*L6%6g2Il#ZK>sxwP3E-Lh)Qc>t)+AH%>03F6;;+SM0qJo#o1JpP3e0I zoj=jT>kvewMV@=OO-VFgucdYTH9xwQ*QN2>Jlej+RXt>Kmc~baP@+YkMV^0ER1V7& z-(OX!J1vdllQy|Z+edbUG>J}cc&}Dtn8dk~avX00{ntui#tof^sZ^`~p7tFW{B;0v@@~;hysx zE?LjuMA3Md@feQ$UN|I8!!e#B5;KJdQB#!WN!$(_$DQC2+@?6+@RhQ1gSf)Z?G=${ zimVJIzWA_*0!=8K7p01ZV=slc4_BS~lu%#cF*0CSxaQQ0Ydq!#_p7L~+9vx%SS9zF z;%qPH(sHP!Qpb6TXj7;!@?4jcNXr02iF)f1Y0rLD0q71z+RS%|NIRgU`o78awGwHjR0?Y( zjqadCb0xAXDHF!Mn(%p{1v(627SYX~m)?K!-u=(rne+Ta7D=<**LCC`|4-iU4gQ~Gotx(47P@;F7tHD10 z9uHq{#1X0QYu131)-CGa@#pwCxaQKw=Rm(!GdzHQSAHhV*;z6Vm6C>!U=ceFtAtrR zNMgq?^$8sKxpvfXmsBh%Wm-6kM3Pke1c@b2`Q3i0CQJ%0VnHBCAuHA1wBfnHJR=~wG1wl zh;#}`ZbwB@JIWHATWL}U zN)m)l6esdHigX?YIxD;dsq$Vyjfkhr#|H7-!JP=9poau?At+Eo1lrfH1K!^4@DU`X z!-Ep-E)uPkqb)C08-=okRHoxjFxhLQY*VrMcG3=>f!5D z0}s0@I7`I(emU;nE5)r_rMPjU99OPVps!G#uax7=)pDG>Rf$XVjH`DWaqdpN3?lTS z{A#RRFZdHfswu}5WJ&EXT1oLR(miC`@--B1k!V4eWmADRmFRia36euid1kBfs5eNS z^a9H%)xt_H|3!?~a~oN)j>nM|N~E=Yvb?~;Yh-yT#y$gM#>JJAR%9 z#2=AJsn#lbEMqH}md#tJLH$=u^Qn!TYJ=4~xXx{Bd1G0}n~Ld_<%T?)uFng!lI(@l zkiSEs*?&RmTT`r6nLY}&k@}&c%e+}h{4WQj8CQ3>8mm$SpL zf|9&!M;exGPgVT%bBaQimy>^P=Q>$Iu~v1z_hhP^y!A&4v4szapU+gO5#+lrRAM&; zT9C3#$FA4nI6u&*%$iiqQ<<*z@&iqvukhw51B@hR^4h~rKFB(iJYRRFD7#A3xm(He z9nWrsM4}b$g!}GFl3!fpu|>6bh|`;RrL2) z^gcGyIz)iAD72|SkGvFVHWg`Yo5A9J#%uL_X^O3pH?72)@~Y)ZOH0w!rC3-RT{abG zDcxr(%Kf}XQx@b|;4!@RUSf!JFOLz4Rw6C%JYBj_3!VHONZQAIst2#p$d@)`Tg#bV zD1Rp@9g@aTJ@Q>k;~4o36+i(uuIr7>+QKUa1rUz3!$ZyV?L0Bi(Vn9Q0hg$KmY~S zKkqqP!gZGY4maYO z$^Ge+X`I2=K@ueUe0pIs<2~XkJLYmdm-xPtYkiLIbLK%ePRaxy<{fJ0)x&pNaPYRwqSu6dHyW_# zS_5`dn0H-~2(y&*)FztxnbIc{Pigg!E3o-Yxnk2PEyBFTf)NYi>ih;?CA*2sZ;pRXM{u!4tSGwZS8%VG%t8%h(xM$BRfmhE4Kg z*rh&&qg0H^CBK(``Rr`wy@C(%BC;&`{~_ETPvI`8 zm_c>bBh;3Uqpo}mjb$S`W2i43LtTkRZP6$hKs6=0x^NJ+g+r(*7*HbJkk^gIoK7@m zb)Ye`6Ah9poz{jLigj(edBe z)WXlH67II;aIz|gtywYd-_ZvcUMs=1E2Wg^3Y@=8iKa}Szg3Q__iAwcu9nvEkCkhE zv0{x6q+KNvy+ou}tfffnviz?ETD9v%mNk~~b;9y>+$ZTF^ZN|B)+BoQS}l*-7>sBj zdDX(o^%AAlgn0oidaX&gzDegbWn(a3=CPxa|kPVPLb!ID9%4gl$l6$ zc?Ex-6+fqAnFura=hmcoe%h9-lZ53`&QR6-PQ|KS=~%Tl6YCGsKdTGbm zGVHljjeUIB9J)q%zFDsZ5T2$$OZ54QHO1M6;%qCqgp_HAc0Ncu6q25CgYv8*&u(3m zhM7kX?t97hK~?5X{yl3MG(vfnSrNzJAQd{I z#^4$^29JaZ_#{740v(h&ry|jzLbgoAGD~ruMsVg7w@U>N3N~-L0o?B=In;cOPue73 zJBfhwNnNsM5Ro^B7#WnPCTe-EBs#NR1o{`1LtWJI5hY!pQP%Sr<$a$~)h`u3zMyvK zi&iR+A}!TDS|)!(t3L3sWBR+6BR#7PG!)4-1lou&izJV}`rkA1Qj=-ZR1ibRwFtGy zv?kYm0)8EIinKo6w}`Y%1+Pu_t<74POQFt^ z0fsHVAieQD(kRm+)F}&k%`L0xjZP{z^@#A``F#7K$v(#2(M! zL)rDtdSZfBQ(Bh2vPsrcco<|phI_^w+|uXZO7V3~r2r?*z&`#FY!jwo6F*Jiol@9P zf(>jb4z_W#u!-gVm?>COh^?XxxE?hH%g8BMNW_lqePR|NJc4-yr8#^O_d~~F7BY@| zl;=Bv6RKp~4c`%5_Zh}jFHNAYQes4)C57WU#pb#b*D1@FDAAW)Da{m}t0FtDgShNM z!KUDfA>j+?P?ZOX=PmoAf91+mSSz> zOkd#Zg^Lo&b~500u1h+IL@@;^vw525J^7#m$F16N+@c*v@3-U7y*3=W+p5G_>fNgW z(n^?XvE!14%+M-P=B?){6^qg`PL*QA$r8o-6Phe92(w6|AOb1kEN!tGgR)AOX490q zMbf-aqP!xiBB&B+7J((ZD7GTEDpD+xYd`^}B>&f6BsqgpOn9#sWZQr8+4UbkXDDRZ zl>ZQTj35&IA3BCm8`v54%125o<+csQNo^>IZ$TcVHYc_n*)i?N zpv=l_hsh)_%94 zH-dt@;1}2q|9}nz1$H7dNYX|+5gpQw_^>vlMs*-FwhMWQJt#^sW^(L6p~!o3D=L$l zQ5M&P+=xab1=J(Ps}7N#weWYUpg@C82> z$h0XU*G6kZrhmlJTRHUTp7dH@c=LvRQg)ER(XkZkW~I|TdS5!g$_na4=Jqh06- zrFk4KG2?KHlN{&?cqQvKqD7puwHXT|^PV9p|2ZP_pK$pxB68*st|HG`^f^SJTnA;& z@^xCITEsbw;v7}*1hGYuU-uLVl;`9MJpx_(8rco+DA4ba-|`+sQq`kNO5{ZrbJhySBo_NIWNXzf5Yf29`jm9 zmbpz{XNa_sL%opZu@IFu#Mlt!r7VcECdLDl^8Xng&-WAhD9|!fo|sS(2!^~(2z(vNA2ius2TYc)oKPqqe_RAz%xqU z>FFJ++}&s7w0}f)+b>$4w5lKd0qIS@sQS?|2}`o%BiFn}Y_$v;q(oP06+C1xVMqz( znH{PCel}#*LO*`aWTrlmXdgbT<)JMTvv^UGJw=qIG#o|QIc*LPQ)XeGI0ZYA<=98C zk*IJKg*RNpb`)mp6qzlThDZjv$K|_W8fM&g zFLVNTLnd%1XbiUl##EJyn|`D70)5SU2-iI|i56s%lUwdX?Cf&8+aRvEYm=T_;r8pE zax6uM%d$=CIA3z5OiP|I5s|)3`IZuON}wsalD2V9Npla*TTyr|I&g*(b(*4k+KM8r z+IF0@Y{N;mr_?qc^FYs2);S}2%M@|p6h-^wgEpL?fD6Z@)aLzG3Udn%N~D<_xYL5Y zH!026CEBcs^R6qBmQkYyN^d(S$TZcIJ&7=%E>oDMW~@72gtf;CDTi$5qs*Exs)`hb zFzcv6+mfcCi}T9;e-&sofR7k*s>!O6TCorzR)VW!mojXii?YFg{G4urCd|_Q`?hq2 zA<_o_F7S1NocrH(453~s(CRvMei(f|yjYwi>MS!B%7iOwmfTAX*nUN(|8Bqm3iMI4 zcAT*6=HIyor|tT5fga)KXcQMcMsUSv6j$Z5DIGlN=#g9O^xY0+$1m&=?y-}2pCWA) zJ*Otzv6dX^xM?_1svjoIs%hX|Q)R&6EMX@#?HRv=6zPm7@Rz*ljK_%1eS(C-CrB%n za)#3=qC}UMPoug*Ce)cidBr5kD9;sT6R4zASCu_Nb@>#Lb`sT;?RvJ&mE&lu7(r*{ zFuE%H(N*4yjxti(gZ7ec3UxPH3pJT;%9k{YPE=8%%hEb1o1Mr{>Of9HJF?=tkQvv7 z^f-z#Nsgm9$M+#QVE`$KgGf&7M{06E(o*}8nck1QtU=^u_92hLnwQ#*+~iK4L(bEI zEXr{@Wjrma6`9fPifpM_t>;5$$H+{IZAhoACq}d(HoODTkzI(4>`~$z5hZOe1-ci( zVciI%Fb9QpalHpo+#eUMNqq*zJ3o=|`iqh~P@2+7X{N9%+EA9*g0lE#ltfdYLn+X{ z^@wt>LYP~XCeU`JcxYJydy8T`FfW1iePUJu^LwSF0_OLtVPRDZE8BWCo1)aecJ$~F zf%e9#bqlOqt3{&?R#A=(R{jMF_iBnZk!4eXHjPdjEJUs~R!S6FUsfooHc~uHSuD@S zYqet+_Wc!+Y(to3c_|ArEf^xbVqFB5t&PwmTB6m4Nb4+LFV#Q(I!IBmJ{HTQB8aLA zBBl1UGI`5%WLf1^r+%2Wt9cXr zTTEW_7YP@k<_%S%)T_B{l;)GceuA;C;PESY4q=6=M3PSVT}ahXQD>3oZOQm2W%-|5 z;wjE?_)qd5!WOcX+lUDCGN}W;JsHa=(93tFV$J?6$W*Z#j})nCX1AX%#m@5;Y8u)@ zd=QC1pQJ#aF>6re>m>So%|ROkc-65@Ro1xW+J!qrMp-|QDR2Emock%v18@!-fNR7M z+#-kJ86zX?g%Nnhk@zvVb6rN$yGx2j^aPw`WPR)yyb>l5kTQjU)M*5!i}(^sZ%E!8 z!en9`BMbHOvj6cT9*o+_d#KSE@&@Kjanh?Zl7q9J_qU-yoO5 zEYh6a@D@3ZS{`&E#jCjOJxV&>quj`ara)Iyplb&{DS>Vr`h+G5bo1CZHLy?(;Cu8P zJ=4FcL4R8MhLp7XO$o0i&LYKsVC>DG7^fWn9b>{9?Kr^@=>>^41|rJ&P0HpGi71;2 z^n&OL6ki^%$uNbRGXB3u6Xc~65y@25qjyG$gyeU8#>BGDqyZBu_Li8dn9 zTIG)Kl<417b&mPccREr_+9-QBU-FKW_I*Q9k0#RvUD`w}BGj3!TIstqsoWu`84cee zvF9^Xh;qqyeu3c9X9y~Jjv#)1v}p8m_~$-{pNMppM84+Wkv_`@IUnj2 zXURd92=fDp97l=lQhLKDR7Chr;HW}!ku|jCTmIVeoj}Sm_nT3k6?}~ug;=J&R!FX> zKvBLQPPtI>{0MhKr*J2Pt;k0ZB_v>6MV)W@QK0?!zT_4~<)+`Tn(^zl&Kr?lL}=8pydDaUaMf!FmnqQ~-TQIDtq&K8M546_ai5xeNg~BkaYNEFlnh&U z;jCpR&RU2>6HAKl{Wi!1PDd%fBEUz@TXD>+6(`84`;xD$@8kMWndOcGCLH1N;d`wr zE#nYH`GBNm+>$7>)E{18H^pto%!{*0s>NRgno?}Szi&&!zY%FQ z((!LwxlP9qX@h?gDBFTa^uOsCLcLU?jeKZRdDg0M@b_?pzaLc_x|E%cauvDXc23e@ z>agRYKIQkdX6(7ug8lsaNT=o)|Nh5qdU1mCe9Ey8=bR|et|HK5IPX4;OFooncI?#W zEnq@P^tHfoTw|y420M$AyMH%=pLOYcMza$dJ;m=8zh5%_d;DYACp>`@zjLlB?7*fx zWheF-+$Ac_&aFHf;z$yo1Eswy6#zG@OJHRI^08ADgi2zqKq&|5o*-kJgQR1qc9edsDB#l2`R>{00z z4Y^&Y%IZQ{x=a(^jlAS8WG7Ob6MB#q--{HIL~%}_IL9XpA}*f94RU!1aa@m0=tm6Y zIy$-=(NSH9j_5#CcspXEIw`j#q7&g^Z3qi(MMMY*Zf7g`)9n=HP7MliWKbKz16vUq z*p7gJHu(6p!pFZ2et{hn<}L(>_8=&@2LU87xSJx~i|~kEL`8KYA+{G8iG7szZWK|P zi*#u&P3b^+N;}HAT+Z#qiESv3Ye8XTBeFvpknCHl1Ul5U5&<#>(X-!d=I zTwcD$-vklqh4mF8+$x$avP_}o{*@xlYeckVU6*Vn$3c|&V1*&d22xtjRJd126nha1 z0xjtw{}LXrSh9WD+E5~CBqGt8P^*EC3PY&1!HY{-zAi?wjQbZPT4SLGwU$G@AkvaI zous9A=sDC%q?JUcV3mk8S#2WJiv(JPnm3z;+SEpywP?G6l-n~{TGLvh*%}g=7Kzpc zn#&@|^T>X6jE0)(T#!6x5olhQsX$8vT1(wX#((RP=KtCpkN>1FD|z0=mi+fN$}{&X zfu=k!+ev}morbj(=#7W+)U>i&PZU$4i?Q=ug^EHSx>AEvH|ufwZavPKH{r5%t2Wb= z%5m8_2qi%o zj}Vyl2tjF-V~R*n#+-^S2W3A&MD`q_a~>licMcJGvuaSF8XU+QVq_tum@*t)^qfNc z9I?gE5l0eAUm}sjmA*hM1v$3-IpQhFi4`x9Qt=XLGJ|3DD`eHbRid0r@|)hHNYXS& zF(s?0|l3OSDLTWGIS5)F({q=#Srt_d_56p5zL4oX^w zNU=nV=S5Z|*GSV4H2IZdgh7h2f>N%5e*s^|_nQ}Lbq*~;ZHV-b=+&dpk{>M+ty+mj z&rzsn$;_XW=s(nCHmy@y1&>z#K3h~R=|5D3jz)G)8rVrOrG8w}J$^^+s6IWoO81a7 z52N(mS2ZIdm(| zJ96Eg(rnv>(-i1aR%|KGXUJLePMp5qfuodPnY`(+iX3Z_CFvFiDZ__lM!VaTuUk#n zM-EWL4ED>^(btXWvg9&r?7B>8qqvE{Nh>n9?R*Wko|l>Ps<7#d3~tkA(%X2t3L8#V zsEMki*1j?El+@riX5~{QLUlx(4S8NE1Gi1&Sy;ZGlB~v~$iP5Nm`%l5$+KPw<1f-o zG+8Fwxh!y5NiLr~ru@6eug?F)XOv>Ah9=P5zD48Tw(|Al-?s@|{}%$SBGJ5#zX-Ja zEb}QF#d;lRiO%zP$KT;5$rYDUewQfC^83Hqh}{(D-Pc>N`<6`a-HwCi?KmuHHT)bL zv+u!i%JV7bew=d|qa8=u4g>xXMo3HGcMQ@w0e0V3MEfN4OnI!Y3)tlApUDCp?8y>SGmkc29o_&&+4=%93#|Pt>zO%HE|EQKE~VAc1W> zJHd%;GuSCkD}Ic4K11S*pCP&UDN;)wBdhc=@c-GpKY~8e z-!P2+xPbCbG|mC&Wq zGo;L1BIP+Qu8)%3k7$Z>cuYUSBKr|eLMgC;VciG_?V{v%!#}W-(%TLX%CDDy2i$$z z6mGuFY@6Zf*QW52*0%+oK27lQX;yf8X}A+tuNJs?x53r718#op@C@vNPjDZ60%dyj zKKO_9BP4>?BG(bogSgl}WTy5*a1tWrIFgHBw7YgmL_%}Us6*dn~@#Xi1ffZ zBzjjP#p%37W;a5I5)!2s}*6v>>z~qu#?b(NEQ7a zpui9RD}nZ=6dM>Kt)Yb3M4SzgmTjhJ+qj>s;xSqzJCIPGDb*`QqF3v1S=*!PZzR#)~l-=*ZWp_4$3KQS7BiYKWFAy)>sJq&Wjh3_yCDBWx(mJcQE}789gn6MBnf?*R{=Y%eL>401>6B=RW@{-TN|1L^ zo_|OVSuD^ihzt}okjfg%cuqs0mv3jsfc$e?5(PRLKPi&9o`C<}PO09(<)0H(4)jVM zCwb7TD94{YP?}4htCB_0B`;O^x>zO3FAzsr zmZ)-k=~E<>y;KvdBv6>OL4hxkUh@(;^{=rbCIKcjj03tGoSm<_(7^U*hSO@F62YxSTd-+7R7I80F;p5+q@z+|4$1oWZ;hh)R1)*KYk=7#8#`@o3M8}8! zM}htq$UsM(r9v$d-}~gZ`G~j(b&rTNI~qn+5LIGN)vBggQ=mJh|5UVco0jT9p%$rD z{HY;n9}|C|X+qLIepiDJYfVx;Br2^^Jfw!Sn$hqx@+HlKTzeKMo(APzv zJq9VyeK_qxiFO{qxrY=YdoDZl;;cQf?WQ<);nag}9J8Vw+c z6pNtb{vD=~Wz$IWkNCf~P^yWsu3jVP0Egc+ofcBfU;Q;0NkR|`#l;;!n-8dy3E7t)nWy5O> zS3UU|_2Fk)K3n{Z-3g&AhfTvQd`6`tSw!(O86zD~N!OTxecUYUweza4dn+v*8m(z`kj2@Jv_o5)B2U&^TNKfcik!MNMh^L%JM0F!9 zst=*jg9wPAz=rm~FSwgR+euOGgu5R_*QWz6UTtvjXoamu3vAt6U`K3S*t)jD-i`a* zTH)x{LYfs0u1)jH4|%MkOCwvZ^Bnf>ZLs(3fW3D&?Dbh89fAhn6g&vG&|!Fm4#6+1 zA3@>$h)ozka%vAU)BBLiwlK9zMWXYQ+L0IEhQio(6vVV6Q%d(q9qix+#QWDFRwB^e zHHh%8RRZnlSP3`BD!4n>!riqFK0d7o^lwFEXeSb52aps$h}48Zq$CU>DRBfzNu!8P zoWws?Q`bG;r(^AI ztqRBLeKLqJ51Wq`V%y15>^M`7-4`mc?@A30U9ZEbyA8M?k!RaB+;i=LrKD*14ZtB} z7%mYbDjg#zc>>|-Q;5!sQmP{;)smVa(dcmAEJF+C5Xl>7 zbm3FPNd&r*=c|5=)SB0-nnq63TjV#tLm|bvsDt9%`GLaxNzG7LL%FIW^%O0s`)peK zS>-m5N_xguw2ps8hc3+BlHWWnlc;=0pDI^pl&YhQ8sbce9HWemlZjW9W)WxdBT=5B zB>xDJXHB3rnO+d+KPk|9BwD0bqR`KmNHVWOW<6Y(*oD_;AlGSnEHM@6ArbSx;fLaE zut=a6OT9$BRo=9;dRm88{X+&d3Q`$FO6!SC%km%So~2+*bXp&9s9{v{&?;8(|uCK^$|B6hR6;LKU zY5Ii3`VVTVY$3MlJ)$e$LZV16QE6THhi9k!l_>M@p z4 z^bFym8>QK00B0!AXPt*|_TeB-+YjJ`T`!K?^x%YuvqdM4ns?w3Md~0qq^DdQx-GMu zQj#gq`))N-oEz1IO(MqzBFMY1YLYC2{Vf$#drC z@j>hyY$?n336CkvPn0k_QFL zv2!ak5GF}grSexuEPsu762`8fmZS^q*x|i(gHCld&LuPU>viY-Q#djh-Mg|qOAU&cL zsbMWh32i}KV56FiG1^Ziim5}Cj|g-fLOkmb;86>IuSWQKHzGj9nVkknTS-YCMshNR zIe8Q*X=6xEn?!8t6oQiub&3z`US}hW-p`y|z zQEZ7wFRaV5i1Y#yr8dwcT8~Imc2xwMNLq*tU{vrm%T@>cOBMuMgqhoT4h>oVt3;c! zSg0jEMDnIZq^(E4>s|-9wjXHP}!jT2eg7vh@p5X+xYBt^Y`%jrA>y_nBhkRhzPC zzp#zVN@$meG$q)WQBlpRXd=r$5@?yiTqIghDII+MN=mdL zYbAZ+H8QwOklbTM-CG|28tFAMfbS(Th%Bd6zT^#1BFwz`$pjcuwkYQ@{Ih1^M|`tp z2wO=9QG*JfA%-`~c#&|vUg|rmfq-rAQA}|z>!LXKh&X?yIDbanz*jU3eM6H#$&%Rz zHNK%?^s5r*mWeND)5W=G`a60l&Al_fqMxES_~bW=>hBm<@+$erx;Rrb$H@eld`)z5 zo_hCp{BH$XlW0w#|CrB*)&$yQ)6t=$QP0B{DPbgigeo-q|1zg zjUSQH_yI|j==l_nN^Rhw%9k#B&WG^}1WAr`j!b4ERXHBRo!=3c@TK>H);-Mq!!6M4qfI%ZaT7=m=Lh_>x zRqmiLTT-AcV+mW?CRob9$0MhC?6fK&Ch~ko25<#U^1UQ?nzHUaj4N(~xJY?E@6?Yo z4t+S~(2tY$eKB#+d9M&sgXw@)Uw8--H231OK@6876R38I8yOUsFbn&nIXI>~#luu7+xQ%= z8P6%o&*72v0-ia-OMaK0!8eEZ{=Ao}G@W1G3wRT$^Bh_97O|!85mWkB335#7E5w(* zMjWL%x{T{Y>cLB0_?XIf?EJn*SlN4o@-?BQ?+{WZ$Q1Z*5L59M@gz>fyW$n{YhIwa z=_&f!W--)0h0)F_Om<9QqGbeQ%~~{ifCAl1f$pY2w-@)LF|P+znLQ|xfq#i2os{P| zDN9Fjj1_tAMX01`gml3tup1u!-Ej5mg0pug9(s1b!Mz=Ju5EbW+(z+jg@r>i%*b8J z@*TS-+_ohT8gbj2SP2bWHmINH4%hGSxLZ8_reNQKTaInG<|xR!dacK;&89zMN9NjOSQJ30JK4(-E2W$4{$-h4sZkEtNwIxUMiHItV{; z*_34z_<592i$pJGVPK-EOiM&sLk1-NM2Y@seY7spk|V7RI#j5r^j{;>E0jb_I!DqE zS-vHau$6&`I+}1(m<1DgmON`ykzOYIDbNOsL|WvRH!jmjUls-+{#c+5iB{4ruhRv3 z4PU>OVyvRd3JNlhUArd>Yxi=UtlXQ96??e6FB@z2
<#>(X-!d=I zTwcD$-vklqh4mF8+$x$avP_}o{*@xlYeckVU6*Vn$3c|&V1*&d22xtjRJd126nha1 z0xjtw{}LXrSh9WD+E5~CBqGt8P^*EC3PY&1!HY{-zAi?wjQbZPT4SLGwU$G@AkvaI zous9A=sDC%q?JUcV3mk8S#2WJiv(JPnm3z;+SEpywP?G6l-n~{TGLvh*%}g=7Kzpc zn#&@|^T>X6jE0)(T#!6x5olhQsX$8vT1(wX#((RP=KtCpkN>1FD|z0=mi+fN$}{&X zfu=k!+ev}morbj(=#7W+)U>i&PZU$4i?Q=ug^EHSx>AEvH|ufwZavPKH{r5%t2Wb= z%5m8_2qi%o zj}Vyl2tjF-V~R*n#+-^S2W3A&MD`q_a~>licMcJGvuaSF8XU+QVq_tum@*t)^qfNc z9I?gE5l0eAUm}sjmA*hM1v$3-IpQhFi4`x9Qt=XLGJ|3DD`eHbRid0r@|)hHNYXS& zF(s?0|l3OSDLTWGIS5)F({q=#Srt_d_56p5zL4oX^w zNU=nV=S5Z|*GSV4H2IZdgh7h2f>N%5e*s^|_nQ}Lbq*~;ZHV-b=+&dpk{>M+ty+mj z&rzsn$;_XW=s(nCHmy@y1&>z#K3h~R=|5D3jz)G)8rVrOrG8w}J$^^+s6IWoO81a7 z52N(mS2ZIdm(| zJ96Eg(rnv>(-i1aR%|KGXUJLePMp5qfuodPnY`(+iX3Z_CFvFiDZ__lM!VaTuUk#n zM-EWL4ED>^(btXWvg9&r?7B>8qqvE{Nh>n9?R*Wko|l>Ps<7#d3~tkA(%X2t3L8#V zsEMki*1j?El+@riX5~{QLUlx(4S8NE1Gi1&Sy;ZGlB~v~$iP5Nm`%l5$+KPw<1f-o zG+8Fwxh!y5NiLr~ru@6eug?F)XOv>Ah9=P5zD48Tw(|Al-?s@|{}%$SBGJ5#zX-Ja zEb}QF#d;lRiO%zP$KT;5$rYDUewQfC^83Hqh}{(D-Pc>N`<6`a-HwCi?KmuHHT)bL zv+u!i%JV7bew=d|qa8=u4g>xXMo3HGcMQ@w0e0V3MEfN4OnI!Y3)tlApUDCp?8y>SGmkc29o_&&+4=%93#|Pt>zO%HE|EQKE~VAc1W> zJHd%;GuSCkD}Ic4K11S*pCP&UDN;)wBdhc=@c-GpKY~8e z-!P2+xPbCbG|mC&Wq zGo;L1BIP+Qu8)%3k7$Z>cuYUSBKr|eLMgC;VciG_?V{v%!#}W-(%TLX%CDDy2i$$z z6mGuFY@6Zf*QW52*0%+oK27lQX;yf8X}A+tuNJs?x53r718#op@C@vNPjDZ60%dyj zKKO_9BP4>?BG(bogSgl}WTy5*a1tWrIFgHBw7YgmL_%}Us6*dn~@#Xi1ffZ zBzjjP#p%37W;a5I5)!2s}*6v>>z~qu#?b(NEQ7a zpui9RD}nZ=6dM>Kt)Yb3M4SzgmTjhJ+qj>s;xSqzJCIPGDb*`QqF3v1S=*!PZzR#)~l-=*ZWp_4$3KQS7BiYKWFAy)>sJq&Wjh3_yCDBWx(mJcQE}789gn6MBnf?*R{=Y%eL>401>6B=RW@{-TN|1L^ zo_|OVSuD^ihzt}okjfg%cuqs0mv3jsfc$e?5(PRLKPi&9o`C<}PO09(<)0H(4)jVM zCwb7TD94{YP?}4htCB_0B`;O^x>zO3FAzsr zmZ)-k=~E<>y;KvdBv6>OL4hxkUh@(;^{=rbCIKcjj03tGoSm<_(7^U*hSO@F62YxSTd-+7R7I80F;p5+q@z+|4$1oWZ;hh)R1)*KYk=7#8#`@o3M8}8! zM}htq$UsM(r9v$d-}~gZ`G~j(b&rTNI~qn+5LIGN)vBggQ=mJh|5UVco0jT9p%$rD z{HY;n9}|C|X+qLIepiDJYfVx;Br2^^Jfw!Sn$hqx@+HlKTzeKMo(APzv zJq9VyeK_qxiFO{qxrY=YdoDZl;;cQf?WQ<);nag}9J8Vw+c z6pNtb{vD=~Wz$IWkNCf~P^yWsu3jVP0Egc+ofcBfU;Q;0NkR|`#l;;!n-8dy3E7t)nWy5O> zS3UU|_2Fk)K3n{Z-3g&AhfTvQd`6`tSw!(O86zD~N!OTxecUYUweza4dn+v*8m(z`kj2@Jv_o5)B2U&^TNKfcik!MNMh^L%JM0F!9 zst=*jg9wPAz=rm~FSwgR+euOGgu5R_*QWz6UTtvjXoamu3vAt6U`K3S*t)jD-i`a* zTH)x{LYfs0u1)jH4|%MkOCwvZ^Bnf>ZLs(3fW3D&?Dbh89fAhn6g&vG&|!Fm4#6+1 zA3@>$h)ozka%vAU)BBLiwlK9zMWXYQ+L0IEhQio(6vVV6Q%d(q9qix+#QWDFRwB^e zHHh%8RRZnlSP3`BD!4n>!riqFK0d7o^lwFEXeSb52aps$h}48Zq$CU>DRBfzNu!8P zoWws?Q`bG;r(^AI ztqRBLeKLqJ51Wq`V%y15>^M`7-4`mc?@A30U9ZEbyA8M?k!RaB+;i=LrKD*14ZtB} z7%mYbDjg#zc>>|-Q;5!sQmP{;)smVa(dcmAEJF+C5Xl>7 zbm3FPNd&r*=c|5=)SB0-nnq63TjV#tLm|bvsDt9%`GLaxNzG7LL%FIW^%O0s`)peK zS>-m5N_xguw2ps8hc3+BlHWWnlc;=0pDI^pl&YhQ8sbce9HWemlZjW9W)WxdBT=5B zB>xDJXHB3rnO+d+KPk|9BwD0bqR`KmNHVWOW<6Y(*oD_;AlGSnEHM@6ArbSx;fLaE zut=a6OT9$BRo=9;dRm88{X+&d3Q`$FO6!SC%km%So~2+*bXp&9s9{v{&?;8(|uCK^$|B6hR6;LKU zY5Ii3`VVTVY$3MlJ)$e$LZV16QE6THhi9k!l_>M@p z4 z^bFym8>QK00B0!AXPt*|_TeB-+YjJ`T`!K?^x%YuvqdM4ns?w3Md~0qq^DdQx-GMu zQj#gq`))N-oEz1IO(MqzBFMY1YLYC2{Vf$#drC z@j>hyY$?n336CkvPn0k_QFL zv2!ak5GF}grSexuEPsu762`8fmZS^q*x|i(gHCld&LuPU>viY-Q#djh-Mg|qOAU&cL zsbMWh32i}KV56FiG1^Ziim5}Cj|g-fLOkmb;86>IuSWQKHzGj9nVkknTS-YCMshNR zIe8Q*X=6xEn?!8t6oQiub&3z`US}hW-p`y|z zQEZ7wFRaV5i1Y#yr8dwcT8~Imc2xwMNLq*tU{vrm%T@>cOBMuMgqhoT4h>oVt3;c! zSg0jEMDnIZq^(E4>s|-9wjXHP}!jT2eg7vh@p5X+xYBt^Y`%jrA>y_nBhkRhzPC zzp#zVN@$meG$q)WQBlpRXd=r$5@?yiTqIghDII+MN=mdL zYbAZ+H8QwOklbTM-CG|28tFAMfbS(Th%Bd6zT^#1BFwz`$pjcuwkYQ@{Ih1^M|`tp z2wO=9QG*JfA%-`~c#&|vUg|rmfq-rAQA}|z>!LXKh&X?yIDbanz*jU3eM6H#$&%Rz zHNK%?^s5r*mWeND)5W=G`a60l&Al_fqMxES_~bW=>hBm<@+$erx;Rrb$H@eld`)z5 zo_hCp{BH$XlW0w#|CrB*)&$yQ)6t=$QP0B{DPbgigeo-q|1zg zjUSQH_yI|j==l_nN^Rhw%9k#B&WG^}1WAr`j!b4ERXHBRo!=3c@TK>H);-Mq!!6M4qfI%ZaT7=m=Lh_>x zRqmiLTT-AcV+mW?CRob9$0MhC?6fK&Ch~ko25<#U^1UQ?nzHUaj4N(~xJY?E@6?Yo z4t+S~(2tY$eKB#+d9M&sgXw@)Uw8--H231OK@6876R38I8yOUsFbn&nIXI>~#luu7+xQ%= z8P6%o&*72v0-ia-OMaK0!8eEZ{=Ao}G@W1G3wRT$^Bh_97O|!85mWkB335#7E5w(* zMjWL%x{T{Y>cLB0_?XIf?EJn*SlN4o@-?BQ?+{WZ$Q1Z*5L59M@gz>fyW$n{YhIwa z=_&f!W--)0h0)F_Om<9QqGbeQ%~~{ifCAl1f$pY2w-@)LF|P+znLQ|xfq#i2os{P| zDN9Fjj1_tAMX01`gml3tup1u!-Ej5mg0pug9(s1b!Mz=Ju5EbW+(z+jg@r>i%*b8J z@*TS-+_ohT8gbj2SP2bWHmINH4%hGSxLZ8_reNQKTaInG<|xR!dacK;&89zMN9NjOSQJ30JK4(-E2W$4{$-h4sZkEtNwIxUMiHItV{; z*_34z_<592i$pJGVPK-EOiM&sLk1-NM2Y@seY7spk|V7RI#j5r^j{;>E0jb_I!DqE zS-vHau$6&`I+}1(m<1DgmON`ykzOYIDbNOsL|WvRH!jmjUls-+{#c+5iB{4ruhRv3 z4PU>OVyvRd3JNlhUArd>Yxi=UtlXQ96??e6FB@z2