diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index dcbcd74..da5c7f3 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -15,7 +15,9 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install -r requirements.txt pyinstaller + python -m pip install .[dev] pyinstaller + - name: Lint code đŸĻ„ + run: tox -e lint - name: Build binary đŸ”ĸ run: pyinstaller "main.spec" - name: Run conversion ↩ī¸ @@ -24,9 +26,10 @@ jobs: .\dist\ms_teams_parser.exe -f ".\forensicsim-data\john_doe_v_1_4_00_11161\IndexedDB\https_teams.microsoft.com_0.indexeddb.leveldb" -o "john_doe.json" - name: Test calling script 📞 run: | - python utils/dump_leveldb.py --help - python utils/dump_localstorage.py --help - python utils/dump_sessionstorage.py --help + python tools/main.py --help + python tools/dump_leveldb.py --help + python tools/dump_localstorage.py --help + python tools/dump_sessionstorage.py --help # python utils/populate_teams.py --help # python utils/populate_teams_2.py --help # python utils/populate_skype.py --help diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index ac2edaa..e71e2c7 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -16,8 +16,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install -r requirements.txt pyinstaller - - name: Build binary 🚧 + python -m pip install . pyinstaller + - name: Build binary run: pyinstaller "main.spec" - name: Zip files 🗜ī¸ run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7557b1f..88fb5be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,5 +17,8 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.11 hooks: + - id: ruff + args: + - --fix - id: ruff-format exclude: "^(export|populationdata|testdata)" diff --git a/README.md b/README.md index 22c47b8..7cb8a2b 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,6 @@ as following. Simply specify the path to the database and where you want to outp usage: dump_leveldb.py [-h] -f FILEPATH -o OUTPUTPATH dump_leveldb.py: error: the following arguments are required: -f/--filepath, -o/--outputpath - ``` --- @@ -131,16 +130,16 @@ dump_leveldb.py: error: the following arguments are required: -f/--filepath, -o/ A wee script for populating *Skype for Desktop* in a lab environment. The script can be used like this: -``` -utils\populate_skype.py -a 0 -f conversation.json +```bash +tools\populate_skype.py -a 0 -f conversation.json ``` ## populate_teams.py A wee script for populating *Microsoft Teams* in a lab environment. The script can be used like this: -``` -utils\populate_teams.py -a 0 -f conversation.json +```bash +tools\populate_teams.py -a 0 -f conversation.json ``` --- @@ -148,7 +147,6 @@ utils\populate_teams.py -a 0 -f conversation.json This repository comes with two datasets that allow reproducing the findings of this work. The `testdata` folder contains the *LevelDB* databases that have been extracted from two test clients. These can be used for benchmarking without having to perform a (lengthy) data population. The `populationdata` contains *JSON* files of the communication that has been populated into the testing environment. These can be used to reproduce the experiment from scratch. However, for a rerun, it will be essential to adjust the dates to future dates, as the populator script relies on sufficient breaks between the individual messages. - --- # Acknowledgements & Thanks diff --git a/main.spec b/main.spec index 564da30..8241e05 100644 --- a/main.spec +++ b/main.spec @@ -3,7 +3,7 @@ block_cipher = None -a = Analysis(['utils\\main.py'], +a = Analysis(['tools\\main.py'], binaries=[], datas=[], hiddenimports=[], diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..31e963c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,114 @@ +[build-system] +requires = ["setuptools >= 61.0"] +build-backend = "setuptools.build_meta" + + +[project] +name = "forensicsim" +description = "A forensic open-source parser module for Autopsy that allows extracting the messages, comments, posts, contacts, calendar entries and reactions from a Microsoft Teams IndexedDB LevelDB database." +readme = "README.md" +license = {file = "LICENSE.md"} +requires-python = ">=3.9" +authors = [ + { name = "Alexander Bilz", email = "github@markusbilz.com" }, + { name = "Markus Bilz", email = "github@markusbilz.com" } +] +dependencies = [ +"beautifulsoup4~=4.9.3", +"click~=8.0.1", +"chromedb @ git+https://github.com/karelze/ccl_chrome_indexeddb@master", +"pause~=0.3", +"pyautogui~=0.9.54", +"pywinauto~=0.6.8" +] + +dynamic = ["version"] + +[tool.setuptools.dynamic] +version = {attr = "forensicsim.__version__"} + +[project.urls] +"Homepage" = "https://forensics.im/" +"Bug Tracker" = "https://github.com/lxndrblz/forensicsim/issues" + +[project.optional-dependencies] +dev=[ + "build", + "pre-commit", + "ruff", + "tox", +] + + +[tool.ruff] + +target-version = "py39" + +# See rules: https://beta.ruff.rs/docs/rules/ +select = [ + "C", # flake8-comprehensions + "F", # pyflakes + "FURB", # refurb + "I", # isort + "PIE", # misc lints + "PT", # pytest + "PGH", # pygrep + "RUF", # ruff-specific rules + "UP", # pyupgrade + "SIM", # flake8-simplify +] + +include = ["*.py", "*.pyi", "**/pyproject.toml"] + +ignore = [ + "C901", # too complex + "E501", # line too long, handled by black + "D206", # indent with white space + "W191", # tab identation +] + +[tool.ruff.lint] +preview = true +# exclude = ["tools/**.py"] + +[tool.ruff.format] +preview = true + +[tool.ruff.isort] +known-first-party = ["forensicsim"] +section-order = ["future", "standard-library", "third-party", "first-party", "local-folder"] + +[tool.tox] +legacy_tox_ini = """ + +[tox] +envlist = format, lint, pre-commit +skipdist = True +isolated_build = True + +[testenv] +deps = .[dev] + +# Cleanup tasks +[testenv:clean] +commands = + sh -c "rm -rf build cover dist .hypothesis .mypy_cache .pytest_cache site" + +# Auto Formatting +[testenv:format] +commands = + python -m ruff src tests --fix + python -m ruff format src + +# Syntax Checks +[testenv:lint] +commands = + python -m ruff --output-format=github src + python -m ruff format src --check + +# Pre-Commit +[testenv:pre-commit] +commands = + python -m pre-commit run --all-files --show-diff-on-failure + +""" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 0dff851..0000000 --- a/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -beautifulsoup4~=4.12.2 -chardet~=5.2.0 -click~=8.1.7 -chromedb @ git+https://github.com/karelze/ccl_chrome_indexeddb@master -colorama~=0.4.6 -pause~=0.3 -pyautogui~=0.9.54 -pywinauto~=0.6.8 diff --git a/src/forensicsim/__init__.py b/src/forensicsim/__init__.py new file mode 100644 index 0000000..43a1e95 --- /dev/null +++ b/src/forensicsim/__init__.py @@ -0,0 +1 @@ +__version__ = "0.5.3" diff --git a/utils/shared.py b/src/forensicsim/backend.py similarity index 94% rename from utils/shared.py rename to src/forensicsim/backend.py index 1f8a082..06ec4ff 100644 --- a/utils/shared.py +++ b/src/forensicsim/backend.py @@ -29,10 +29,10 @@ from chromedb import ( ccl_blink_value_deserializer, ccl_chromium_indexeddb, - ccl_v8_value_deserializer, - ccl_leveldb, ccl_chromium_localstorage, ccl_chromium_sessionstorage, + ccl_leveldb, + ccl_v8_value_deserializer, ) from chromedb.ccl_chromium_indexeddb import ( DatabaseMetadataType, @@ -77,19 +77,18 @@ def fetch_data(self): if ( record.key.startswith(b"\x00\x00\x00\x00") and record.state == ccl_leveldb.KeyState.Live + ) and ( + record.key not in global_metadata_raw + or global_metadata_raw[record.key].seq < record.seq ): - if ( - record.key not in global_metadata_raw - or global_metadata_raw[record.key].seq < record.seq - ): - global_metadata_raw[record.key] = record + global_metadata_raw[record.key] = record # Convert the raw metadata to a nice GlobalMetadata Object global_metadata = ccl_chromium_indexeddb.GlobalMetadata(global_metadata_raw) # Loop through the database IDs for db_id in global_metadata.db_ids: - if None == db_id.dbid_no: + if db_id.dbid_no == None: continue if db_id.dbid_no > 0x7F: @@ -130,9 +129,11 @@ def fetch_data(self): meta_type = record.key[len(prefix_objectstore) + len(varint_raw)] - old_version = objectstore_metadata_raw.get( - (db_id.dbid_no, objstore_id, meta_type) - ) + old_version = objectstore_metadata_raw.get(( + db_id.dbid_no, + objstore_id, + meta_type, + )) if old_version is None or old_version.seq < record.seq: objectstore_metadata_raw[ @@ -160,7 +161,7 @@ def iterate_records(self, do_not_filter=False): # Loop through the databases and object stores based on their ids for global_id in self.global_metadata.db_ids: # print(f"Processing database: {global_id.name}") - if None == global_id.dbid_no: + if global_id.dbid_no == None: print(f"WARNING: Skipping database {global_id.name}") continue @@ -188,7 +189,7 @@ def iterate_records(self, do_not_filter=False): if record.value == b"": continue ( - value_version, + _value_version, varint_raw, ) = ccl_chromium_indexeddb.le_varint_from_bytes( record.value @@ -201,7 +202,7 @@ def iterate_records(self, do_not_filter=False): val_idx += 1 ( - blink_version, + _blink_version, varint_raw, ) = ccl_chromium_indexeddb.le_varint_from_bytes( record.value[val_idx:] @@ -226,7 +227,7 @@ def iterate_records(self, do_not_filter=False): "state": record.state, "seq": record.seq, } - except Exception as e: + except Exception: # TODO Some proper error handling wouldn't hurt continue # print(f"{datastore} {global_id.name} {records_per_object_store}") @@ -280,7 +281,7 @@ def write_results_to_json(data, outputpath): json.dump( data, f, indent=4, sort_keys=True, default=str, ensure_ascii=False ) - except EnvironmentError as e: + except OSError as e: print(e) @@ -290,5 +291,5 @@ def parse_json(): with open("teams.json") as json_file: data = json.load(json_file) return data - except EnvironmentError as e: + except OSError as e: print(e) diff --git a/utils/consts.py b/src/forensicsim/consts.py similarity index 100% rename from utils/consts.py rename to src/forensicsim/consts.py diff --git a/utils/main.py b/src/forensicsim/parser.py similarity index 93% rename from utils/main.py rename to src/forensicsim/parser.py index ef42c54..a27e6de 100644 --- a/utils/main.py +++ b/src/forensicsim/parser.py @@ -26,11 +26,9 @@ from datetime import datetime from pathlib import Path -import click from bs4 import BeautifulSoup -from shared import parse_db, write_results_to_json -from consts import XTRACT_HEADER +from forensicsim.backend import parse_db, write_results_to_json MESSAGE_TYPES = { "messages": { @@ -253,7 +251,7 @@ def parse_reply_chain(reply_chains): # Other types include ThreadActivity/TopicUpdate and ThreadActivity/AddMember # -> ThreadActivity/TopicUpdate occurs for meeting updates # -> ThreadActivity/AddMember occurs when someone gets added to a chat - except UnicodeDecodeError or KeyError or NameError as e: + except UnicodeDecodeError or KeyError or NameError: print( "Could not decode the following item in the reply chain (output is not deduplicated)." ) @@ -273,19 +271,18 @@ def parse_conversations(conversations): # Include file origin for records x["origin_file"] = conversation["origin_file"] # Make first at sure that the conversation has a cachedDeduplicationKey - if "lastMessage" in conversation["value"]: + if "lastMessage" in conversation["value"]: # noqa: SIM102 if ( hasattr(conversation["value"]["lastMessage"], "keys") - and "cachedDeduplicationKey" - in conversation["value"]["lastMessage"].keys() + and "cachedDeduplicationKey" in conversation["value"]["lastMessage"] ): x["cachedDeduplicationKey"] = conversation["value"]["lastMessage"][ "cachedDeduplicationKey" ] # we are only interested in meetings for now - if x["type"] == "Meeting": + if x["type"] == "Meeting": # noqa: SIM102 # assign the type for further processing as the object store might not be sufficient - if "threadProperties" in x: + if "threadProperties" in x: # noqa: SIM102 if "meeting" in x["threadProperties"]: x["threadProperties"]["meeting"] = decode_and_loads( x["threadProperties"]["meeting"] @@ -345,29 +342,3 @@ def process_db(input_path: Path, output_path: Path): extracted_values = parse_db(input_path) parsed_records = parse_records(extracted_values) write_results_to_json(parsed_records, output_path) - - -@click.command() -@click.option( - "-f", - "--filepath", - type=click.Path( - exists=True, readable=True, writable=False, dir_okay=True, path_type=Path - ), - required=True, - help="File path to the IndexedDB.", -) -@click.option( - "-o", - "--outputpath", - type=click.Path(writable=True, path_type=Path), - required=True, - help="File path to the processed output.", -) -def process_cmd(filepath, outputpath): - click.echo(XTRACT_HEADER) - process_db(filepath, outputpath) - - -if __name__ == "__main__": - process_cmd() diff --git a/Forensicsim_Parser.py b/tools/Forensicsim_Parser.py similarity index 94% rename from Forensicsim_Parser.py rename to tools/Forensicsim_Parser.py index 4723119..04a54bf 100644 --- a/Forensicsim_Parser.py +++ b/tools/Forensicsim_Parser.py @@ -42,39 +42,36 @@ from java.lang import ProcessBuilder from java.util import ArrayList from java.util.logging import Level -from org.sleuthkit.autopsy.casemodule import Case -from org.sleuthkit.autopsy.casemodule import NoCurrentCaseException -from org.sleuthkit.autopsy.coreutils import ExecUtil -from org.sleuthkit.autopsy.coreutils import Logger -from org.sleuthkit.autopsy.coreutils import PlatformUtil +from org.sleuthkit.autopsy.casemodule import Case, NoCurrentCaseException +from org.sleuthkit.autopsy.coreutils import ExecUtil, Logger, PlatformUtil from org.sleuthkit.autopsy.datamodel import ContentUtils -from org.sleuthkit.autopsy.ingest import DataSourceIngestModule -from org.sleuthkit.autopsy.ingest import DataSourceIngestModuleProcessTerminator -from org.sleuthkit.autopsy.ingest import IngestMessage -from org.sleuthkit.autopsy.ingest import IngestModule -from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter -from org.sleuthkit.autopsy.ingest import IngestServices +from org.sleuthkit.autopsy.ingest import ( + DataSourceIngestModule, + DataSourceIngestModuleProcessTerminator, + IngestMessage, + IngestModule, + IngestModuleFactoryAdapter, + IngestServices, +) from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException -from org.sleuthkit.datamodel import BlackboardArtifact -from org.sleuthkit.datamodel import BlackboardAttribute -from org.sleuthkit.datamodel import CommunicationsManager -from org.sleuthkit.datamodel import TskCoreException -from org.sleuthkit.datamodel import TskData +from org.sleuthkit.datamodel import ( + BlackboardArtifact, + BlackboardAttribute, + CommunicationsManager, + TskCoreException, + TskData, +) from org.sleuthkit.datamodel.Blackboard import BlackboardException from org.sleuthkit.datamodel.blackboardutils import CommunicationArtifactsHelper -from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import ( - CallMediaType, +from org.sleuthkit.datamodel.blackboardutils.attributes import MessageAttachments +from org.sleuthkit.datamodel.blackboardutils.attributes.MessageAttachments import ( + URLAttachment, ) from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import ( + CallMediaType, CommunicationDirection, -) -from org.sleuthkit.datamodel.blackboardutils.CommunicationArtifactsHelper import ( MessageReadStatus, ) -from org.sleuthkit.datamodel.blackboardutils.attributes import MessageAttachments -from org.sleuthkit.datamodel.blackboardutils.attributes.MessageAttachments import ( - URLAttachment, -) # Common Prefix Shared for all artefacts ARTIFACT_PREFIX = "Microsoft Teams" @@ -213,11 +210,11 @@ def _parse_databases(self, content, progress_bar): os.makedirs(temp_path_to_content) self.log( Level.INFO, - "Created temporary directory: {}.".format(temp_path_to_content), + f"Created temporary directory: {temp_path_to_content}.", ) except OSError: raise IngestModuleException( - "Could not create directory: {}.".format(temp_path_to_content) + f"Could not create directory: {temp_path_to_content}." ) # At first extract the desired artefacts to our newly created temp directory @@ -246,10 +243,10 @@ def _extract(self, content, path): elif child.isDir(): os.mkdir(child_path) self._extract(child, child_path) - self.log(Level.INFO, "Successfully extracted to {}".format(path)) + self.log(Level.INFO, f"Successfully extracted to {path}") except OSError: raise IngestModuleException( - "Could not extract files to directory: {}.".format(path) + f"Could not extract files to directory: {path}." ) def _analyze(self, content, path, progress_bar): @@ -523,14 +520,6 @@ def parse_messages(self, messages, helper, teams_leveldb_file_path): message_text = message["content"] # Group by the conversationId, these can be direct messages, but also posts thread_id = message["conversationId"] - # Additional Attributes - message_date_time_edited = 0 - message_date_time_deleted = 0 - - if "edittime" in message["properties"]: - message_date_time_edited = int(message["properties"]["edittime"]) - if "deletetime" in message["properties"]: - message_date_time_edited = int(message["properties"]["deletetime"]) additional_attributes = ArrayList() additional_attributes.add( @@ -703,7 +692,7 @@ def get_level_db_file(self, content, filepath): dir_name = os.path.join(content.getParentPath(), content.getName()) results = file_manager.findFiles(data_source, filename, dir_name) if results.isEmpty(): - self.log(Level.INFO, "Unable to locate {}".format(filename)) + self.log(Level.INFO, f"Unable to locate {filename}") return db_file = results.get( 0 @@ -781,9 +770,7 @@ def process(self, data_source, progress_bar): self.log( Level.INFO, - "Found {} {} directories to process.".format( - directories_to_process, directory - ), + f"Found {directories_to_process} {directory} directories to process.", ) for i, content in enumerate(all_ms_teams_leveldbs): diff --git a/utils/dump_leveldb.py b/tools/dump_leveldb.py similarity index 95% rename from utils/dump_leveldb.py rename to tools/dump_leveldb.py index cc986ab..bdcb814 100644 --- a/utils/dump_leveldb.py +++ b/tools/dump_leveldb.py @@ -26,8 +26,8 @@ import click -from consts import DUMP_HEADER -from shared import parse_db, write_results_to_json +from forensicsim.backend import parse_db, write_results_to_json +from forensicsim.consts import DUMP_HEADER def process_db(input_path, output_path): diff --git a/utils/dump_localstorage.py b/tools/dump_localstorage.py similarity index 94% rename from utils/dump_localstorage.py rename to tools/dump_localstorage.py index 7c89a73..02bc320 100644 --- a/utils/dump_localstorage.py +++ b/tools/dump_localstorage.py @@ -26,8 +26,8 @@ import click -from shared import parse_localstorage, write_results_to_json -from consts import DUMP_HEADER +from forensicsim.backend import parse_localstorage, write_results_to_json +from forensicsim.consts import DUMP_HEADER def process_db(filepath: Path, output_path: Path): diff --git a/utils/dump_sessionstorage.py b/tools/dump_sessionstorage.py similarity index 93% rename from utils/dump_sessionstorage.py rename to tools/dump_sessionstorage.py index 14a7154..418cbee 100644 --- a/utils/dump_sessionstorage.py +++ b/tools/dump_sessionstorage.py @@ -26,8 +26,8 @@ import click -from consts import DUMP_HEADER -from shared import parse_sessionstorage, write_results_to_json +from forensicsim.backend import parse_sessionstorage, write_results_to_json +from forensicsim.consts import DUMP_HEADER def process_db(input_path: Path, output_path: Path): diff --git a/tools/main.py b/tools/main.py new file mode 100644 index 0000000..10bda5a --- /dev/null +++ b/tools/main.py @@ -0,0 +1,66 @@ +""" +MIT License + +Copyright (c) 2021 Alexander Bilz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + +from pathlib import Path + +import click + +from forensicsim.backend import parse_db, write_results_to_json +from forensicsim.consts import XTRACT_HEADER +from forensicsim.parser import parse_records + + +def process_db(input_path: Path, output_path: Path): + if not input_path.parts[-1].endswith(".leveldb"): + raise ValueError(f"Expected a leveldb folder. Path: {input_path}") + + extracted_values = parse_db(input_path) + parsed_records = parse_records(extracted_values) + write_results_to_json(parsed_records, output_path) + + +@click.command() +@click.option( + "-f", + "--filepath", + type=click.Path( + exists=True, readable=True, writable=False, dir_okay=True, path_type=Path + ), + required=True, + help="File path to the IndexedDB.", +) +@click.option( + "-o", + "--outputpath", + type=click.Path(writable=True, path_type=Path), + required=True, + help="File path to the processed output.", +) +def process_cmd(filepath, outputpath): + click.echo(XTRACT_HEADER) + process_db(filepath, outputpath) + + +if __name__ == "__main__": + process_cmd() diff --git a/utils/populate_skype.py b/tools/populate_skype.py similarity index 99% rename from utils/populate_skype.py rename to tools/populate_skype.py index fd0e086..83fbce4 100644 --- a/utils/populate_skype.py +++ b/tools/populate_skype.py @@ -5,11 +5,10 @@ import click import pause - from pywinauto import Desktop, keyboard from pywinauto.application import Application -from consts import UTIL_HEADER +from forensicsim.consts import UTIL_HEADER logging.basicConfig( format="%(asctime)s %(message)s", diff --git a/utils/populate_teams.py b/tools/populate_teams.py similarity index 99% rename from utils/populate_teams.py rename to tools/populate_teams.py index 85cf8bd..c00d7a3 100644 --- a/utils/populate_teams.py +++ b/tools/populate_teams.py @@ -9,7 +9,8 @@ import pause import pyautogui from pywinauto import keyboard -from consts import UTIL_HEADER + +from forensicsim.consts import UTIL_HEADER # Avoid the default link as it would update Teams on startup os.startfile("C:/Users/forensics/AppData/Local/Microsoft/Teams/current/Teams.exe") diff --git a/utils/populate_teams_2.py b/tools/populate_teams_2.py similarity index 99% rename from utils/populate_teams_2.py rename to tools/populate_teams_2.py index e6120dd..0a06103 100644 --- a/utils/populate_teams_2.py +++ b/tools/populate_teams_2.py @@ -7,10 +7,10 @@ import click import pause import pyautogui - from pywinauto import keyboard -from consts import UTIL_HEADER +from forensicsim.consts import UTIL_HEADER + # Teams could be started from script, but requires change owner permissions. Better to launch Teams 2.0 first and # then set the focus to the application. # os.startfile("C:/Program Files/WindowsApps/MicrosoftTeams_21197.1103.908.5982_x64__8wekyb3d8bbwe/msteams.exe")