Skip to content

Commit

Permalink
WIP: Fix docker
Browse files Browse the repository at this point in the history
  • Loading branch information
CBroz1 committed Jul 19, 2022
1 parent 698105e commit a0a76e9
Show file tree
Hide file tree
Showing 12 changed files with 100 additions and 76 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ Observes [Semantic Versioning](https://semver.org/spec/v2.0.0.html) standard and

## 0.1.1 - Unreleased
- Added - Integration tests
- Changes - Dataset for didactic notebooks

## 0.1.0 - 2022-05-10
- Added - Process.py script
Expand Down
24 changes: 10 additions & 14 deletions docker/Dockerfile.test
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,19 @@ WORKDIR /main/workflow-deeplabcut
RUN pip install --no-deps "element-interface@git+https://github.com/datajoint/element-interface"
RUN pip install --no-deps "djarchive-client@git+https://github.com/datajoint/djarchive-client"

# Always move local to temp, if they exist - conditional install in setup.sh
COPY --chown=anaconda:anaconda ./element-lab/ \
./element-animal/ \
./element-session/ \
./element-event/ \
./element-interface/ \
./element-deeplabcut/ \
./workflow-deeplabcut/ \
/main/
# COPY --chown=anaconda:anaconda . /main/workflow-deeplabcut/
# Always move local - conditional install in setup.sh
COPY --chown=anaconda:anaconda ./element-lab/ /main/element-lab/
COPY --chown=anaconda:anaconda ./element-animal/ /main/element-animal/
COPY --chown=anaconda:anaconda ./element-session/ /main/element-session/
COPY --chown=anaconda:anaconda ./element-event/ /main/element-event/
COPY --chown=anaconda:anaconda ./element-interface/ /main/element-interface/
COPY --chown=anaconda:anaconda ./element-deeplabcut/ /main/element-deeplabcut/
COPY --chown=anaconda:anaconda ./workflow-deeplabcut/ /main/workflow-deeplabcut/

# Conditional install - local-all, local-dlc, or git
COPY --chown=anaconda:anaconda ./workflow-deeplabcut/docker/setup.sh /main/
# RUN chmod 755 /main/.env
# RUN dotenv -f /main/.env -q always set
COPY --chown=anaconda:anaconda ./workflow-deeplabcut/docker/.env /main/
RUN chmod 755 /main/setup.sh
RUN chmod 755 /main/.env
RUN /main/setup.sh

RUN rm -f ./dj_local_conf.json
# RUN pip install -r ./requirements_test.txt
2 changes: 2 additions & 0 deletions docker/apt_requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
git
libgl1
ffmpeg
locales-all
19 changes: 10 additions & 9 deletions docker/docker-compose-test.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# .env file:
# .env file. Careful that vscode black does not add spaces around '='
# COMPOSE_PROJECT_NAME='dlc'
# TEST_DATA_DIR=<local parent directory of workflow_dlc_data{1,2}>
# TEST_DATA_DIR=<local dir for from_top_tracking>
# GITHUB_USERNAME=datajoint
# INSTALL_OPTION=local-all, local-dlc, or git
# TEST_CMD="pytest" # pytest --dj-{verbose,teardown} False # options
Expand Down Expand Up @@ -48,15 +48,16 @@ services:
eval ${TEST_CMD}
tail -f /dev/null
volumes:
- ${TEST_DATA_DIR}:/main/test_data
# - ./workflow-deeplabcut/apt_requirements.txt:/tmp/apt_requirements.txt
- ./element-lab:/main/element-lab
- ./element-animal:/main/element-animal
- ./element-session:/main/element-session
- ./element-deeplabcut:/main/element-deeplabcut
- ./workflow-deeplabcut:/main/workflow-deeplabcut
- ${TEST_DATA_DIR}:/main/test_data/
- ../../workflow-deeplabcut/docker/apt_requirements.txt:/tmp/apt_requirements.txt
- ../../element-lab:/main/element-lab
- ../../element-animal:/main/element-animal
- ../../element-session:/main/element-session
- ../../element-deeplabcut:/main/element-deeplabcut
- ../../workflow-deeplabcut:/main/workflow-deeplabcut
depends_on:
db:
condition: service_healthy

networks:
deeplabcut:
13 changes: 13 additions & 0 deletions docker/setup.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
#! /bin/bash
export $(grep -v '^#' /main/.env | xargs)

echo "INSALL OPTION:" $INSTALL_OPTION
cd /main/
# all local installs, mapped from host
if [ "$INSTALL_OPTION" == "local-all" ]; then
for f in lab animal session event deeplabcut; do
Expand All @@ -22,3 +25,13 @@ else
pip install git+https://github.com/${GITHUB_USERNAME}/workflow-deeplabcut.git
fi
fi

# If test cmd contains pytest, install
if [[ "$TEST_CMD" == *pytest* ]]; then
pip install pytest
pip install pytest-cov
fi

# additional installs for running DLC
pip install torch
pip install ffmpeg
2 changes: 0 additions & 2 deletions requirements_test.txt

This file was deleted.

4 changes: 4 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
max-line-length = 88
exclude = notebooks

[tool.black]
line-length = 88
exclude = \.env

[tool:pytest]
minversion = 6.2
# addopts = -sv -p no:warnings
Expand Down
84 changes: 43 additions & 41 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import sys
import pytest
import logging
from pathlib import Path
from contextlib import nullcontext
from element_deeplabcut.model import str_to_bool
Expand Down Expand Up @@ -74,9 +75,14 @@ def setup(request):
test_user_data_dir = Path(request.config.getoption("--dj-datadir"))
test_user_data_dir.mkdir(exist_ok=True)

if not verbose:
logging.getLogger("deeplabcut").setLevel(logging.CRITICAL)
logging.getLogger("torch").setLevel(logging.CRITICAL)
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)

verbose_context = nullcontext() if verbose else QuietStdOut()

yield verbose_context
yield verbose_context, verbose


# ------------------ GENERAL FUCNTION ------------------
Expand Down Expand Up @@ -135,37 +141,27 @@ def dj_config():
return


@pytest.fixture()
def test_data(dj_config):
@pytest.fixture(scope="session")
def test_data(setup, dj_config):
"""Load demo data. Try local path. Try DJArchive w/either os environ or config"""
try:
test_data_dir = find_full_path(get_dlc_root_data_dir(), test_data_project)
except FileNotFoundError:
from workflow_deeplabcut.load_demo_data import download_djarchive_dlc_data

download_djarchive_dlc_data(get_dlc_root_data_dir()[0])

else: # if local version, check for training-dataset dir and full project path
from deeplabcut.utils.auxiliaryfunctions import read_config
from workflow_deeplabcut.load_demo_data import (
download_djarchive_dlc_data,
setup_bare_project,
shorten_video,
)

training_dataset_exists = (test_data_dir / "training-datasets").exists()
project_path_in_config = (
True
if read_config(test_data_dir / "config.yaml").get("project_path", False)
else False
)
verbose_context, _ = setup
try:
_ = find_full_path(get_dlc_root_data_dir(), test_data_project)

if training_dataset_exists and project_path_in_config: # skip project setup
return
except FileNotFoundError:
with verbose_context:
download_djarchive_dlc_data(target_directory="/main/test_data/")

with verbose_context: # Setup - expand relative paths, make a shorter video
from workflow_deeplabcut.load_demo_data import setup_bare_project, shorten_video

setup_bare_project(project=test_data_project)
shorten_video(vid_path=inference_vid)

return


@pytest.fixture(scope="session")
def pipeline(setup):
Expand Down Expand Up @@ -195,7 +191,7 @@ def pipeline(setup):


@pytest.fixture(scope="session")
def ingest_csvs(setup, pipeline):
def ingest_csvs(setup, test_data, pipeline):
"""For each input, generates csv in test_user_data_dir and ingests in schema"""
# CSV as list of 3: relevant insert func, filename, content
all_csvs = [
Expand Down Expand Up @@ -225,8 +221,6 @@ def ingest_csvs(setup, pipeline):
+ "trainingsetindex,filter_type,track_method,"
+ "scorer_legacy,maxiters",
f"0,{test_data_project},{test_data_project}/config.yaml,1,0,,,False,5",
"1,OpenField,openfield-Pranav-2018-10-30/config.yaml,1,0,,,False,5",
"2,Reaching,Reaching-Mackenzie-2018-08-30/config.yaml,1,0,,,False,5",
],
],
[ # 3
Expand Down Expand Up @@ -264,7 +258,7 @@ def ingest_csvs(setup, pipeline):
],
]

# When not tearing down, and if there's already data in last table, can skip insert
# If data in last table, presume didn't tear down last time, skip insert
if len(pipeline["model"].Model()) == 0:
for csv_info in all_csvs:
csv_path = test_user_data_dir / csv_info[1]
Expand Down Expand Up @@ -322,14 +316,30 @@ def pose_estim_task(pipeline, ingest_csvs):


@pytest.fixture()
def pose_output_path(setup, pose_estim_task):
"""Run model.PoseEstimation populate. Return expected output dir."""
def revert_checkpoint(setup):
"""Reverts checkpoint to included downloaded well-trained model"""
from workflow_deeplabcut.load_demo_data import revert_checkpoint_file

verbose_context = setup
_, device = pose_estim_task
revert_checkpoint_file()

revert_checkpoint_file() # ensures checkpoint has access to well-trained model

@pytest.fixture()
def run_pose_estim(
setup, pipeline, pose_estim_task, populate_settings, revert_checkpoint
):
"""Run pose estimation"""

verbose_context, _ = setup
with verbose_context:
pipeline["model"].PoseEstimation.populate(**populate_settings)


@pytest.fixture()
def pose_output_path(setup, pose_estim_task, run_pose_estim):
"""Run model.PoseEstimation populate. Return expected output dir."""

verbose_context, _ = setup
_, device = pose_estim_task

output_path = find_full_path(
get_dlc_root_data_dir(),
Expand All @@ -347,14 +357,6 @@ def pose_output_path(setup, pose_estim_task):
results_file.unlink()


@pytest.fixture()
def run_pose_estim(setup, pipeline, pose_estim_task, populate_settings):
"""Run pose estimation"""
verbose_context = setup
with verbose_context:
pipeline["model"].PoseEstimation.populate(**populate_settings)


@pytest.fixture()
def get_trajectory(pipeline, pose_estim_task, run_pose_estim):
"""Run model.PoseEstimation.get_trajectory for sample task, return pandas df"""
Expand Down
2 changes: 1 addition & 1 deletion tests/test_ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def test_ingest(pipeline, ingest_csvs):
table_lengths = [
(subject.Subject(), 1, "subject6"),
(session.Session(), 2, datetime.datetime(2021, 6, 1, 13, 33, 33)),
(train.TrainingParamSet(), 3, "from_top_tracking"),
(train.TrainingParamSet(), 1, "from_top_tracking"),
(train.VideoSet(), 3, 0),
(
train.VideoSet.File(),
Expand Down
18 changes: 13 additions & 5 deletions tests/test_populate.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,25 @@
"""Run each populate command - for computed/imported tables
"""

import logging
from .conftest import find_full_path, get_dlc_root_data_dir

from time import time
import pytest
import logging


def test_training(setup, test_data, pipeline, populate_settings, training_task):
verbose_context = setup
verbose_context, verbose = setup
train = pipeline["train"]

# Run training
with verbose_context:
train.ModelTraining.populate(**populate_settings)

if not verbose: # train command in DLC resets logger
logging.getLogger("deeplabcut").setLevel(logging.WARNING)

project_path = find_full_path(
get_dlc_root_data_dir(), train.TrainingTask.fetch("project_path", limit=1)[0]
)
Expand All @@ -28,7 +34,7 @@ def test_training(setup, test_data, pipeline, populate_settings, training_task):


def test_record_info(setup, test_data, pipeline, populate_settings, ingest_csvs):
verbose_context = setup
verbose_context, _ = setup
model = pipeline["model"]

# Run recording info populate
Expand All @@ -41,9 +47,11 @@ def test_record_info(setup, test_data, pipeline, populate_settings, ingest_csvs)
assert fps == 60, f"Test video fps didn't match 60: {fps}"


def test_model_eval(setup, test_data, pipeline, populate_settings, ingest_csvs):
def test_model_eval(
setup, test_data, pipeline, populate_settings, ingest_csvs, revert_checkpoint
):
"""Test model evaluation"""
verbose_context = setup
verbose_context, _ = setup
model = pipeline["model"]

# Run model evaluation
Expand All @@ -63,7 +71,7 @@ def test_model_eval(setup, test_data, pipeline, populate_settings, ingest_csvs):
assert time() == pytest.approx(eval_time, 1e4), f"Eval result is old: {eval_file}"


def test_pose_estim(setup, test_data, pipeline, populate_settings, pose_output_path):
def test_pose_estim(setup, test_data, pipeline, pose_output_path):
"""Test pose estimation"""
output_path = pose_output_path

Expand Down
3 changes: 1 addition & 2 deletions workflow_deeplabcut/ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def ingest_sessions(
csvs = [session_csv_path, session_csv_path, session_csv_path]
tables = [session.Session(), session.SessionDirectory(), session.SessionNote()]

ingest_csv_to_table(csvs, tables, skip_duplicates=skip_duplicates)
ingest_csv_to_table(csvs, tables, skip_duplicates=skip_duplicates, verbose=verbose)


def ingest_train_params(config_params_csv_path, skip_duplicates=True, verbose=True):
Expand Down Expand Up @@ -90,7 +90,6 @@ def ingest_model_vids(model_video_csv_path, skip_duplicates=True, verbose=False)
def ingest_model(model_model_csv_path, skip_duplicates=True, verbose=False):
"""Use provided CSV to insert into model.Model table"""
# NOTE: not included in ingest_dlc_items because not yet included in notebooks
import datajoint as dj

with open(model_model_csv_path, newline="") as f:
data = list(csv.DictReader(f, delimiter=","))
Expand Down
4 changes: 2 additions & 2 deletions workflow_deeplabcut/load_demo_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
import os


def download_djarchive_dlc_data(target_directory="/tmp/workflow_dlc_data/"):
def download_djarchive_dlc_data(target_directory="/tmp/test_data/"):
"""Download DLC demo data from djarchive"""
import djarchive_client

client = djarchive_client.client()
os.makedirs(target_directory)
os.makedirs(target_directory, exist_ok=True)

client.download(
"workflow-dlc-data", target_directory=target_directory, revision="v1"
Expand Down

0 comments on commit a0a76e9

Please sign in to comment.