diff --git a/.github/workflows/score_new_plugins.yml b/.github/workflows/score_new_plugins.yml index 8e4c8aec9..1f4c6a176 100644 --- a/.github/workflows/score_new_plugins.yml +++ b/.github/workflows/score_new_plugins.yml @@ -32,10 +32,10 @@ jobs: with: fetch-depth: 0 - - name: Set up Python 3.7 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.11 - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 @@ -176,10 +176,10 @@ jobs: - name: Check out repository code uses: actions/checkout@v4 - - name: Set up Python 3.7 + - name: Set up Python 3.11 uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.11 - name: Build project run: | diff --git a/.readthedocs.yml b/.readthedocs.yml index 229a16285..ecc53316a 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -3,7 +3,7 @@ version: 2 build: os: "ubuntu-20.04" tools: - python: "3.7" + python: "3.11" python: install: diff --git a/.travis.yml b/.travis.yml index 69e9e9b03..75196cb31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,6 @@ version: ~> 1.0 language: python +dist: jammy env: global: - PYTEST_SETTINGS="not requires_gpu and not memory_intense and not slow and not travis_slow" @@ -9,7 +10,7 @@ env: - WEB_SUBMISSION="False" before_install: - pip install --upgrade pip -- pip install setuptools==60.5.0 +- pip install setuptools - pip install pytest # download large files - pip install awscli @@ -31,18 +32,18 @@ import: jobs: include: - - name: 3.7 public - python: '3.7.13' - - name: 3.7 private + - name: 3.11 public + python: '3.11' + - name: 3.11 private if: fork = false - python: '3.7.13' + python: '3.11' env: - PRIVATE_ACCESS=1 - secure: f1rWEwrslh7qa2g/QlKs001sGC3uaOxZNQSfNOPj+TMCqEo2c6OzImC4hyz+WqCyc6N/lFT4yYo2RhvaqStHMRmu/+9aZmuH05Bb0KQpfzNFA+yGa/U5WR3/4u6KRvDAeNEi9drT2LuacTyGbldmQsquujK0jrPpFWpe7zUUKv0zb0lJf0zcjeSrZlDXLlgD6DCqow7OqHRvW04dPZVy1OArRwtPV6DJ6Rqo1MqFQGHJ806VPlXhSoydb7a58dhGajqPjomdmZjhd3wS6Lv6uetTE/VVb4EP4e7n0qfZIx/TpnWG0SR44pcP7OCNARWYANsAivzxnQ0shyXnIzOo8ZcPYiPpt/5D53i5idTBxXyuDaHGQvgwuY5XLZzznEedBgZa4OvjxAXlLEQjdVDfSsZeYaV9gyFkeTlLnK1zvWi0US38eF2Qtm3Sx3D/5TtBKK2n38tyK5gg/XvJNycaXvIl7iVcnI2ifpqD1mUWI6C9j9Tk19/XEpWkwaFi91+0LZF1GhjBu8o3G5Np4RIOKXi3TIHkpbMM5mf11T6Bm9LvEMq1h8bgRQigEbeJF8CbUOSVFv+AaXsggGjQhuwdyvy2JZo+tO1nfhi+kW3XrDGPsz1R7Wfqduyn7UUh5OiFymeZwKseYKnwU47KyCqDwrq5Mnx1MlSidnVmPriadR4= - secure: WE7FPwy07VzJTKAd2xwZdBhtmh8jk7ojwk4B2rIcBQu0vwUXc1MgO8tBLD7s08lBedBjqZiLZEW31uPMEyWNysouDt16a5gm2d149LR7flI3MOifBtxINfJuC3eOEG65bPgN/bYEsIpLKnu3469d5nxZkK7xsjbWTxHGoUpLvVPsmHY2ZM5/jftybs7fI0do4NMG2XffKfZbiFb447Ao3xeQeEfW6IkJllzgGnlG9FJATFidrbwDNdmzAnvPEnDoKAf7ZvhPV0x9yR5V6P4Ck5hxl8mlPdBa1cRMO8s/1ag1c7YJ3AF9ZlwcwqTiGsT8DHTVRxSz4nFHJTMlrm9j84u7WzLZJBhPgF0UeLN3AQgiAZ3c2TFDvjQWeHVuSPkV5GrKlfhSvR82s9yPEdHQxxwYymBbAr6rJR4NtXTyZX0vg8NRKHssZKLSafs/D/pt9xXspqu8HAHc+mS0lCips79XptSr5BEsioil3D2io3tbzrGugpTeJ7oEA787vKn2Cm4XmhyQ0UBhvwsPZ351l27wZYuNV07o9Ik83hN/w4o2v899QQ/zbX42Iy8ZUCWOPX7MV7+TA7SMxru3qx7HL5hDM8kTetxbLB6Ckr+JOdX8L2Fb5L3TVDpsvfv0ebXgwaQR/ez8/7bcXmBqcERApHDz73HaMXUap+iDR4FLdXE= - AWS_DEFAULT_REGION=us-east-1 - stage: "Automerge check" - python: '3.7.13' + python: '3.11' install: - pip install --no-cache-dir torch torchvision --default-timeout=1000 --retries=5 - pip install --no-cache-dir -e ".[test]" diff --git a/README.md b/README.md index eae4d140f..e0552605e 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ To contribute, please [send in a pull request](https://github.com/brain-score/vi ## Local installation -You will need Python = 3.7 and pip >= 18.1. +You will need Python = 3.11 and pip >= 18.1. `pip install git+https://github.com/brain-score/vision` diff --git a/brainscore_vision/benchmark_helpers/__init__.py b/brainscore_vision/benchmark_helpers/__init__.py index eb36e50ca..7eb506115 100644 --- a/brainscore_vision/benchmark_helpers/__init__.py +++ b/brainscore_vision/benchmark_helpers/__init__.py @@ -1,6 +1,7 @@ from typing import Union import numpy as np +import hashlib from brainio.assemblies import NeuroidAssembly, DataAssembly from brainscore_core import Score @@ -18,6 +19,13 @@ def __init__(self, features: Union[DataAssembly, dict], visual_degrees): self.features = features self._visual_degrees = visual_degrees + @property + def identifier(self) -> str: + # serialize the features to a string and create hash + features_data = str(self.features) + features_hash = hashlib.md5(features_data.encode('utf-8')).hexdigest() + return f"precomputed-{features_hash}" + def visual_degrees(self) -> int: return self._visual_degrees diff --git a/brainscore_vision/benchmark_helpers/test_helper.py b/brainscore_vision/benchmark_helpers/test_helper.py index 6e3ad4a03..57d6461f6 100644 --- a/brainscore_vision/benchmark_helpers/test_helper.py +++ b/brainscore_vision/benchmark_helpers/test_helper.py @@ -7,6 +7,7 @@ from brainio.assemblies import NeuroidAssembly, PropertyAssembly from brainscore_vision import load_benchmark from brainscore_vision.model_interface import BrainModel +from brainscore_vision.data_helpers import s3 from . import PrecomputedFeatures @@ -68,6 +69,8 @@ def run_test_properties(self, benchmark: str, files: dict, expected: float): for current_stimulus in stimulus_identifiers: stimulus_set = load_stimulus_set(current_stimulus) path = Path(__file__).parent / files[current_stimulus] + s3.download_file_if_not_exists(local_path=path, + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{files[current_stimulus]}') features = PropertyAssembly.from_files(path, stimulus_set_identifier=stimulus_set.identifier, stimulus_set=stimulus_set) diff --git a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py index 14db4121f..1a2fbbfae 100644 --- a/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py +++ b/brainscore_vision/benchmarks/coggan2024_behavior/benchmark.py @@ -75,9 +75,12 @@ def __call__(self, candidate: BrainModel) -> Score: data.model_prediction == data.object_class, dtype=int) # get correlation between model and human performance across conditions - performance = (data[data.visibility < 1] + performance = ( + data[data.visibility < 1] .groupby(['subject', 'occluder_type', 'occluder_color']) - .mean(['human_accuracy', 'model_accuracy'])).reset_index() + .mean(numeric_only=True) + .reset_index() + ) scores = performance.groupby('subject').apply( lambda df: np.corrcoef(df.human_accuracy, df.model_accuracy)[0, 1]) score = Score(np.mean(scores)) @@ -100,8 +103,9 @@ def get_noise_ceiling(performance: pd.DataFrame) -> Score: nc = [] for subject in performance.subject.unique(): performance_ind = performance[performance.subject == subject] - performance_grp = (performance[performance.subject != subject] - .groupby(['occluder_type', 'occluder_color']).mean()) + performance_grp = performance[performance.subject != subject] + numeric_cols = performance_grp.select_dtypes(include=np.number).columns + performance_grp = performance_grp.groupby(['occluder_type', 'occluder_color'])[numeric_cols].mean() merged_df = performance_ind.merge( performance_grp, on=['occluder_type', 'occluder_color']) nc.append(np.corrcoef(merged_df.human_accuracy_x, merged_df.human_accuracy_y)[0, 1]) diff --git a/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py b/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py index 9a8c07713..da3d662f2 100644 --- a/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py +++ b/brainscore_vision/benchmarks/igustibagus2024/domain_transfer_analysis.py @@ -5,6 +5,8 @@ import pandas as pd from sklearn.linear_model import RidgeClassifierCV from sklearn.model_selection import train_test_split +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler from tqdm import tqdm # import brain-score specific libraries @@ -89,7 +91,10 @@ def __call__(self, candidate: BrainModel) -> Score: def OOD_AnalysisBenchmark(): return _OOD_AnalysisBenchmark( - classifier=RidgeClassifierCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], fit_intercept=True, normalize=True) + classifier=Pipeline([ + ('scaler', StandardScaler()), + ('classifier', RidgeClassifierCV(alphas=[0.0001, 0.001, 0.01, 0.1, 1, 10], fit_intercept=True)) + ]) ) diff --git a/brainscore_vision/benchmarks/kar2019/test.py b/brainscore_vision/benchmarks/kar2019/test.py index b0fece327..34c15b9a9 100644 --- a/brainscore_vision/benchmarks/kar2019/test.py +++ b/brainscore_vision/benchmarks/kar2019/test.py @@ -24,7 +24,7 @@ def test_Kar2019ost_cornet_s(): filename = 'cornet_s-kar2019.nc' filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_features = NeuroidAssembly.from_files( filepath, stimulus_set_identifier=benchmark._assembly.stimulus_set.identifier, diff --git a/brainscore_vision/benchmarks/majajhong2015/__init__.py b/brainscore_vision/benchmarks/majajhong2015/__init__.py index 24fe8651e..5ae8988fd 100644 --- a/brainscore_vision/benchmarks/majajhong2015/__init__.py +++ b/brainscore_vision/benchmarks/majajhong2015/__init__.py @@ -11,3 +11,8 @@ benchmark_registry['MajajHong2015public.V4-pls'] = MajajHongV4PublicBenchmark benchmark_registry['MajajHong2015public.IT-pls'] = MajajHongITPublicBenchmark + +# temporal +from .benchmark import MajajHongV4TemporalPublicBenchmark, MajajHongITTemporalPublicBenchmark +benchmark_registry['MajajHong2015public.V4-temporal-pls'] = lambda: MajajHongV4TemporalPublicBenchmark(time_interval=10) +benchmark_registry['MajajHong2015public.IT-temporal-pls'] = lambda: MajajHongITTemporalPublicBenchmark(time_interval=10) diff --git a/brainscore_vision/benchmarks/majajhong2015/benchmark.py b/brainscore_vision/benchmarks/majajhong2015/benchmark.py index 766f5c93f..5270ab7af 100644 --- a/brainscore_vision/benchmarks/majajhong2015/benchmark.py +++ b/brainscore_vision/benchmarks/majajhong2015/benchmark.py @@ -1,7 +1,8 @@ from brainscore_core import Metric from brainscore_vision import load_metric, Ceiling, load_ceiling, load_dataset -from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition +from brainscore_vision.benchmark_helpers.neural_common import NeuralBenchmark, average_repetition, apply_keep_attrs +from brainscore_vision.model_helpers.brain_transformation.temporal import assembly_time_align VISUAL_DEGREES = 8 NUMBER_OF_TRIALS = 50 @@ -20,13 +21,14 @@ eprint = {https://www.jneurosci.org/content/35/39/13402.full.pdf}, journal = {Journal of Neuroscience}}""" -pls_metric = lambda: load_metric('pls', crossvalidation_kwargs=dict(stratification_coord='object_name')) - +crossvalidation_kwargs = dict(stratification_coord='object_name') +pls_metric = lambda: load_metric('pls', crossvalidation_kwargs=crossvalidation_kwargs) +spantime_pls_metric = lambda: load_metric('spantime_pls', crossvalidation_kwargs=crossvalidation_kwargs) def _DicarloMajajHong2015Region(region: str, access: str, identifier_metric_suffix: str, - similarity_metric: Metric, ceiler: Ceiling): - assembly_repetition = load_assembly(average_repetitions=False, region=region, access=access) - assembly = load_assembly(average_repetitions=True, region=region, access=access) + similarity_metric: Metric, ceiler: Ceiling, time_interval: float = None): + assembly_repetition = load_assembly(average_repetitions=False, region=region, access=access, time_interval=time_interval) + assembly = load_assembly(average_repetitions=True, region=region, access=access, time_interval=time_interval) benchmark_identifier = f'MajajHong2015.{region}' + ('.public' if access == 'public' else '') return NeuralBenchmark(identifier=f'{benchmark_identifier}-{identifier_metric_suffix}', version=3, assembly=assembly, similarity_metric=similarity_metric, @@ -60,13 +62,35 @@ def MajajHongITPublicBenchmark(): ceiler=load_ceiling('internal_consistency')) -def load_assembly(average_repetitions, region, access='private'): - assembly = load_dataset(f'MajajHong2015.{access}') +def MajajHongV4TemporalPublicBenchmark(time_interval: float = None): + return _DicarloMajajHong2015Region(region='V4', access='public', identifier_metric_suffix='pls', + similarity_metric=spantime_pls_metric(), time_interval=time_interval, + ceiler=load_ceiling('internal_consistency_temporal')) + + +def MajajHongITTemporalPublicBenchmark(time_interval: float = None): + return _DicarloMajajHong2015Region(region='IT', access='public', identifier_metric_suffix='pls', + similarity_metric=spantime_pls_metric(), time_interval=time_interval, + ceiler=load_ceiling('internal_consistency_temporal')) + + +def load_assembly(average_repetitions: bool, region: str, access: str = 'private', time_interval: float = None): + temporal = time_interval is not None + if not temporal: + assembly = load_dataset(f'MajajHong2015.{access}') + assembly = assembly.squeeze("time_bin") + else: + assembly = load_dataset(f'MajajHong2015.temporal.{access}') + assembly = assembly.__class__(assembly) + target_time_bins = [ + (t, t+time_interval) for t in range(0, assembly.time_bin_end.max().item()-time_interval, time_interval) + ] + assembly = apply_keep_attrs(assembly, lambda assembly: assembly_time_align(assembly, target_time_bins)) + assembly = assembly.sel(region=region) assembly['region'] = 'neuroid', [region] * len(assembly['neuroid']) - assembly = assembly.squeeze("time_bin") assembly.load() - assembly = assembly.transpose('presentation', 'neuroid') + assembly = assembly.transpose('presentation', 'neuroid', ...) if average_repetitions: assembly = average_repetition(assembly) return assembly diff --git a/brainscore_vision/benchmarks/rajalingham2018/test.py b/brainscore_vision/benchmarks/rajalingham2018/test.py index 7a7e96388..2ff9d38a0 100644 --- a/brainscore_vision/benchmarks/rajalingham2018/test.py +++ b/brainscore_vision/benchmarks/rajalingham2018/test.py @@ -7,7 +7,7 @@ from pytest import approx from brainio.assemblies import BehavioralAssembly -from brainscore_vision import benchmark_registry, load_benchmark, load_metric +from brainscore_vision import benchmark_registry, load_benchmark, load_metric, load_model from brainscore_vision.benchmark_helpers import PrecomputedFeatures from brainscore_vision.benchmark_helpers.test_helper import VisualDegreesTests, NumberOfTrialsTests from brainscore_vision.benchmarks.rajalingham2018 import DicarloRajalingham2018I2n @@ -115,44 +115,11 @@ class TestMetricScore: @pytest.mark.parametrize(['model', 'expected_score'], [ ('alexnet', .253), - ('resnet34', .37787), - ('resnet18', .3638), + ('resnet50_tutorial', 0.348), + ('pixels', 0.0139) ]) def test_model(self, model, expected_score): - class UnceiledBenchmark(_DicarloRajalingham2018): - def __init__(self): - metric = load_metric('i2n') - super(UnceiledBenchmark, self).__init__(metric=metric, metric_identifier='i2n') - - def __call__(self, candidate: BrainModel): - candidate.start_task(BrainModel.Task.probabilities, self._fitting_stimuli) - probabilities = candidate.look_at(self._assembly.stimulus_set) - score = self._metric(probabilities, self._assembly) - return score - - benchmark = UnceiledBenchmark() - # features - feature_responses = xr.load_dataarray(Path(__file__).parent / 'test_resources' / - f'identifier={model},stimuli_identifier=objectome-240.nc') - feature_responses['stimulus_id'] = 'stimulus_path', [os.path.splitext(os.path.basename(path))[0] - for path in feature_responses['stimulus_path'].values] - feature_responses = feature_responses.stack(presentation=['stimulus_path']) - assert len(np.unique(feature_responses['layer'])) == 1 # only penultimate layer - - class PrecomputedFeatures: - def __init__(self, precomputed_features): - self.features = precomputed_features - - def __call__(self, stimuli, layers): - np.testing.assert_array_equal(layers, ['behavioral-layer']) - self_stimulus_ids = self.features['stimulus_id'].values.tolist() - indices = [self_stimulus_ids.index(stimulus_id) for stimulus_id in stimuli['stimulus_id'].values] - features = self.features[{'presentation': indices}] - return features - - # evaluate candidate - transformation = ProbabilitiesMapping(identifier=f'TestI2N.{model}', - activations_model=PrecomputedFeatures(feature_responses), - layer='behavioral-layer') - score = benchmark(transformation) - assert score == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score}" + benchmark = load_benchmark('Rajalingham2018-i2n') + model = load_model(model) + score = benchmark(model) + assert score.raw == approx(expected_score, abs=0.005), f"expected {expected_score}, but got {score.raw}" diff --git a/brainscore_vision/benchmarks/rajalingham2020/test.py b/brainscore_vision/benchmarks/rajalingham2020/test.py index 6af813946..40b6226d5 100644 --- a/brainscore_vision/benchmarks/rajalingham2020/test.py +++ b/brainscore_vision/benchmarks/rajalingham2020/test.py @@ -35,5 +35,5 @@ def test_Rajalingham2020(benchmark, expected): filename = 'alexnet-rajalingham2020-features.12.nc' filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected) diff --git a/brainscore_vision/benchmarks/sanghavi2020/test.py b/brainscore_vision/benchmarks/sanghavi2020/test.py index b65f08f63..ac6fe79b3 100644 --- a/brainscore_vision/benchmarks/sanghavi2020/test.py +++ b/brainscore_vision/benchmarks/sanghavi2020/test.py @@ -66,7 +66,7 @@ def test_self_regression(benchmark, visual_degrees, expected): def test_model_features(benchmark, filename, expected): filepath = Path(__file__).parent / filename s3.download_file_if_not_exists(local_path=filepath, - bucket='brainio-brainscore', remote_filepath=f'tests/test_benchmarks/{filename}') + bucket='brainscore-unittests', remote_filepath=f'tests/test_benchmarks/{filename}') precomputed_test.run_test(benchmark=benchmark, precomputed_features_filepath=filepath, expected=expected) diff --git a/brainscore_vision/data/geirhos2021/test.py b/brainscore_vision/data/geirhos2021/test.py index bdc2052af..41762008d 100644 --- a/brainscore_vision/data/geirhos2021/test.py +++ b/brainscore_vision/data/geirhos2021/test.py @@ -62,7 +62,7 @@ def test_stimulus_set_assembly_alignment(self, identifier, field): full_name = f"Geirhos2021_{identifier}" assembly = load_dataset(full_name) assert assembly.stimulus_set is not None - assert assembly.stimulus_set.identifier == f"{full_name}" + assert assembly.stimulus_set.identifier == full_name assert set(assembly.stimulus_set[field]) == set(assembly[field].values) # test the number of subjects: @@ -236,7 +236,7 @@ def test_stimulus_set_exist(self, identifier): full_name = f"Geirhos2021_{identifier}" stimulus_set = load_stimulus_set(full_name) assert stimulus_set is not None - assert stimulus_set.identifier == full_name + assert stimulus_set.identifier == f"{full_name}" # test the number of images @pytest.mark.parametrize('identifier, num_images', [ diff --git a/brainscore_vision/data/scialom2024/test.py b/brainscore_vision/data/scialom2024/test.py index dbc38b3b3..657376d1a 100644 --- a/brainscore_vision/data/scialom2024/test.py +++ b/brainscore_vision/data/scialom2024/test.py @@ -258,7 +258,7 @@ def test_stimulus_set_exists(self, identifier): ]) def test_number_of_images(self, identifier, num_images): stimulus_set = load_stimulus_set(identifier) - assert len(np.unique(stimulus_set['image_id'].values)) == num_images + assert len(np.unique(stimulus_set['stimulus_id'].values)) == num_images # test assembly coords present in ALL 17 sets: @pytest.mark.parametrize('identifier', [ diff --git a/brainscore_vision/metric_helpers/temporal.py b/brainscore_vision/metric_helpers/temporal.py new file mode 100644 index 000000000..0c110b9f2 --- /dev/null +++ b/brainscore_vision/metric_helpers/temporal.py @@ -0,0 +1,119 @@ +import xarray as xr +import numpy as np + +from brainscore_vision.benchmark_helpers.neural_common import Score +from brainscore_vision.metric_helpers.transformations import standard_error_of_the_mean + +from .xarray_utils import apply_over_dims, recursive_op + + +# take the mean of scores (medians of single neuron scores) over time + + +def average_over_presentation(score: Score) -> Score: + raw = score + score = raw.mean('presentation') + score.attrs['raw'] = raw + return score + + +# PerOps is applied to every slice/chunk of the xarray along the specified dimensions +class PerOps: + def __init__(self, callable, dims, check_coords=[]): + # for coordinate checking, they are supposed to be the same across assemblies + self.dims = dims + self.callable = callable + self.check_coords = check_coords + + def __call__(self, *asms): + for check_coord in self.check_coords: + asms = [asm.sortby(check_coord) for asm in asms] + for asm in asms[1:]: + assert (asm[check_coord].values == asms[0][check_coord].values).all() + ret = apply_over_dims(self.callable, *asms, dims=self.dims) + return ret + + +# SpanOps aggregates specified dimensions to one dimension +class SpanOps: + def __init__(self, callable, source_dims, aggregated_dim, resample=False): + # if resample, randomly choose samples from the aggregated dimension, + # whose size is the same as the assembly.sizes[aggregated_dim] + self.source_dims = source_dims + self.aggregated_dim = aggregated_dim + self.callable = callable + self.resample = resample + + def __call__(self, *asms): + asms = [self._stack(asm) for asm in asms] + return self.callable(*asms) + + def _stack(self, assembly): + assembly_type = type(assembly) + size = assembly.sizes[self.aggregated_dim] + assembly = xr.DataArray(assembly) # xarray cannot deal with stacking MultiIndex (pydata/xarray#1554) + assembly = assembly.reset_index(self.source_dims) + assembly = assembly.rename({dim:dim+"_" for dim in self.source_dims}) # we'll call stacked timebins "presentation" + assembly = assembly.stack({self.aggregated_dim : [dim+"_" for dim in self.source_dims]}) + if self.resample: + indices = np.random.randint(0, assembly.sizes[self.aggregated_dim], size) + assembly = assembly.isel({self.aggregated_dim: indices}) + return assembly_type(assembly) + +class PerTime(PerOps): + def __init__(self, callable, time_dim="time_bin", check_coord="time_bin_start", **kwargs): + self.time_bin = time_dim + super().__init__(callable, dims=[time_dim], check_coords=[check_coord], **kwargs) + +class PerPresentation(PerOps): + def __init__(self, callable, presentation_dim="presentation", check_coord="stimulus_id", **kwargs): + self.presentation_dim = presentation_dim + super().__init__(callable, dims=[presentation_dim], check_coords=[check_coord], **kwargs) + +class PerNeuroid(PerOps): + def __init__(self, callable, neuroid_dim="neuroid", check_coord="neuroid_id", **kwargs): + self.neuroid_dim = neuroid_dim + super().__init__(callable, dims=[neuroid_dim], check_coords=[check_coord], **kwargs) + +class SpanTime(SpanOps): + def __init__(self, callable, time_dim="time_bin", presentation_dim="presentation", resample=False): + self.time_dim = time_dim + self.presentation_dim = presentation_dim + source_dims = [self.time_dim, self.presentation_dim] + aggregated_dim = self.presentation_dim + super().__init__(callable, source_dims, aggregated_dim, resample=resample) + +class SpanTimeRegression: + """ + Fits a regression with weights shared across the time bins. + """ + + def __init__(self, regression): + self._regression = regression + + def fit(self, source, target): + assert (source['time_bin'].values == target['time_bin'].values).all() + SpanTime(self._regression.fit)(source, target) + + def predict(self, source): + return PerTime(self._regression.predict)(source) + +class PerTimeRegression: + """ + Fits a regression with different weights for each time bins. + """ + + def __init__(self, regression): + self._regression = regression + + def fit(self, source, target): + # Lazy fit until predict + assert (source['time_bin'].values == target['time_bin'].values).all() + self._train_source = source + self._train_target = target + + def predict(self, source): + def fit_predict(train_source, train_target, test_source): + self._regression.fit(train_source, train_target) + return self._regression.predict(test_source) + return PerTime(fit_predict)(self._train_source, self._train_target, source) \ No newline at end of file diff --git a/brainscore_vision/metric_helpers/xarray_utils.py b/brainscore_vision/metric_helpers/xarray_utils.py index ce67654ff..8998b6003 100644 --- a/brainscore_vision/metric_helpers/xarray_utils.py +++ b/brainscore_vision/metric_helpers/xarray_utils.py @@ -1,4 +1,5 @@ import numpy as np +import xarray as xr from brainio.assemblies import NeuroidAssembly, array_is_element, walk_coords from brainscore_vision.metric_helpers import Defaults @@ -90,3 +91,61 @@ def __call__(self, prediction, target): for coord, dims, values in walk_coords(target) if dims == neuroid_dims}, dims=neuroid_dims) return result + + +# ops that also applies to attrs (and attrs of attrs), which are xarrays +def recursive_op(*arrs, op=lambda x:x): + # the attrs structure of each arr must be the same + val = op(*arrs) + attrs = arrs[0].attrs + for attr in attrs: + attr_val = arrs[0].attrs[attr] + if isinstance(attr_val, xr.DataArray): + attr_arrs = [arr.attrs[attr] for arr in arrs] + attr_val = recursive_op(*attr_arrs, op=op) + val.attrs[attr] = attr_val + return val + + +# apply a callable to every slice of the xarray along the specified dimensions +def apply_over_dims(callable, *asms, dims, njobs=-1): + asms = [asm.transpose(*dims, ...) for asm in asms] + sizes = [asms[0].sizes[dim] for dim in dims] + + def apply_helper(sizes, dims, *asms): + xarr = [] + attrs = {} + size = sizes[0] + rsizes = sizes[1:] + dim = dims[0] + rdims = dims[1:] + + if len(sizes) == 1: + # parallel execution on the last applied dimension + from joblib import Parallel, delayed + results = Parallel(n_jobs=njobs)(delayed(callable)(*[asm.isel({dim:s}) for asm in asms]) for s in range(size)) + else: + results = [] + for s in range(size): + arr = apply_helper(rsizes, rdims, *[asm.isel({dim:s}) for asm in asms]) + results.append(arr) + + for arr in results: + if arr is not None: + for k,v in arr.attrs.items(): + assert isinstance(v, xr.DataArray) + attrs.setdefault(k, []).append(v.expand_dims(dim)) + xarr.append(arr) + + if not xarr: + return + else: + xarr = xr.concat(xarr, dim=dim) + attrs = {k: xr.concat(vs, dim=dim) for k,vs in attrs.items()} + xarr.coords[dim] = asms[0].coords[dim] + for k,v in attrs.items(): + attrs[k].coords[dim] = asms[0].coords[dim] + xarr.attrs[k] = attrs[k] + return xarr + + return apply_helper(sizes, dims, *asms) \ No newline at end of file diff --git a/brainscore_vision/metrics/accuracy_distance/metric.py b/brainscore_vision/metrics/accuracy_distance/metric.py index fb31a7280..eb47e3bba 100644 --- a/brainscore_vision/metrics/accuracy_distance/metric.py +++ b/brainscore_vision/metrics/accuracy_distance/metric.py @@ -10,17 +10,52 @@ class AccuracyDistance(Metric): """ - Computes the accuracy distance using the relative distance between the source and target accuracies, adjusted - for the maximum possible difference between the two accuracies. + Computes the accuracy distance using the relative distance between the + source and target accuracies, adjusted for the maximum possible + difference between the two accuracies. By default, the distance is computed + from a single accuracy score on the entire BehavioralAssembly. However, + the distance can also be computed on a condition-wise basis using the + 'variables' argument. The advantage of the condition-wise approach is that + it can separate two models with identical overall accuracy if one exhibits a + more target-like pattern of performance across conditions. """ - def __call__(self, source: BehavioralAssembly, target: BehavioralAssembly) -> Score: + def __call__(self, source: BehavioralAssembly, target: + BehavioralAssembly, variables: tuple=()) -> Score: """Target should be the entire BehavioralAssembly, containing truth values.""" subjects = self.extract_subjects(target) subject_scores = [] for subject in subjects: subject_assembly = target.sel(subject=subject) - subject_score = self.compare_single_subject(source, subject_assembly) + + # compute single score across the entire dataset + if len(variables) == 0: + subject_score = self.compare_single_subject(source, subject_assembly) + + # compute scores for each condition, then average + else: + cond_scores = [] + + # get iterator across all combinations of variables + if len(variables) == 1: + conditions = set(subject_assembly[variables[0]].values) + conditions = [[c] for c in conditions] # to mimic itertools.product + else: + conditions = itertools.product( + *[set(subject_assembly[v].values) for v in variables]) + + # loop over conditions and compute scores + for cond in conditions: + indexers = {v: cond[i] for i, v in enumerate(variables)} + subject_cond_assembly = subject_assembly.sel(**indexers) + source_cond_assembly = source.sel(**indexers) + # to accomodate unbalanced designs, skip combinations of + # variables that don't exist in both assemblies + if len(subject_cond_assembly) and len(source_cond_assembly): + cond_scores.append(self.compare_single_subject( + source_cond_assembly, subject_cond_assembly)) + subject_score = Score(np.mean(cond_scores)) + subject_score = subject_score.expand_dims('subject') subject_score['subject'] = 'subject', [subject] subject_scores.append(subject_score) diff --git a/brainscore_vision/metrics/accuracy_distance/test.py b/brainscore_vision/metrics/accuracy_distance/test.py index 2fc15b792..d6414b790 100644 --- a/brainscore_vision/metrics/accuracy_distance/test.py +++ b/brainscore_vision/metrics/accuracy_distance/test.py @@ -12,6 +12,20 @@ def test_score(): assert score == approx(0.74074074) +def test_score_single_variable(): + assembly = _make_data() + metric = load_metric('accuracy_distance') + score = metric(assembly.sel(subject='C'), assembly, ('condition',)) + assert score == approx(0.55555556) + + +def test_score_multi_variable(): + assembly = _make_data() + metric = load_metric('accuracy_distance') + score = metric(assembly.sel(subject='C'), assembly, ('condition','animacy')) + assert score == approx(0.55555556) + + def test_has_error(): assembly = _make_data() metric = load_metric('accuracy_distance') @@ -38,5 +52,6 @@ def _make_data(): coords={'stimulus_id': ('presentation', np.resize(np.arange(9), 9 * 3)), 'truth': ('presentation', np.resize(['dog', 'cat', 'chair'], 9 * 3)), 'condition': ('presentation', np.resize([1, 1, 1, 2, 2, 2, 3, 3, 3], 9 * 3)), + 'animacy': ('presentation', np.resize(['animate', 'animate', 'inanimate'], 9 * 3)), 'subject': ('presentation', ['A'] * 9 + ['B'] * 9 + ['C'] * 9)}, dims=['presentation']) diff --git a/brainscore_vision/metrics/internal_consistency/__init__.py b/brainscore_vision/metrics/internal_consistency/__init__.py index bd71776be..ae6a41ea6 100644 --- a/brainscore_vision/metrics/internal_consistency/__init__.py +++ b/brainscore_vision/metrics/internal_consistency/__init__.py @@ -1,4 +1,8 @@ from brainscore_vision import metric_registry from .ceiling import InternalConsistency +from brainscore_vision.metric_helpers.temporal import PerTime + + metric_registry['internal_consistency'] = InternalConsistency +metric_registry['internal_consistency_temporal'] = lambda *args, **kwargs: PerTime(InternalConsistency(*args, **kwargs)) \ No newline at end of file diff --git a/brainscore_vision/metrics/internal_consistency/test.py b/brainscore_vision/metrics/internal_consistency/test.py index 6ccd597c3..3c00657fb 100644 --- a/brainscore_vision/metrics/internal_consistency/test.py +++ b/brainscore_vision/metrics/internal_consistency/test.py @@ -19,7 +19,7 @@ def test_dummy_data(self): dims=['presentation', 'neuroid']) ceiler = load_ceiling('internal_consistency') ceiling = ceiler(data) - assert ceiling == 1 + assert ceiling.item() == approx(1, abs=1e-8) class TestSplitHalfConsistency: diff --git a/brainscore_vision/metrics/ost/metric.py b/brainscore_vision/metrics/ost/metric.py index 7093781e7..92f7eb9ed 100644 --- a/brainscore_vision/metrics/ost/metric.py +++ b/brainscore_vision/metrics/ost/metric.py @@ -63,7 +63,7 @@ def compute_osts(self, train_source, test_source, test_osts): break # stop early if threshold is already hit for every image # interpolate - predicted_osts = np.empty(len(test_osts), dtype=np.float) + predicted_osts = np.empty(len(test_osts), dtype=np.float64) predicted_osts[:] = np.nan for i, (last_ost, hit_ost) in enumerate(zip(last_osts, hit_osts)): if hit_ost is None: diff --git a/brainscore_vision/metrics/regression_correlation/__init__.py b/brainscore_vision/metrics/regression_correlation/__init__.py index 2f8019b3f..691e82685 100644 --- a/brainscore_vision/metrics/regression_correlation/__init__.py +++ b/brainscore_vision/metrics/regression_correlation/__init__.py @@ -11,6 +11,15 @@ metric_registry['linear_predictivity'] = lambda *args, **kwargs: CrossRegressedCorrelation( regression=linear_regression(), correlation=pearsonr_correlation(), *args, **kwargs) +# temporal metrics +from .metric import SpanTimeCrossRegressedCorrelation + +metric_registry['spantime_pls'] = lambda *args, **kwargs: SpanTimeCrossRegressedCorrelation( + regression=pls_regression(), correlation=pearsonr_correlation(), *args, **kwargs) +metric_registry['spantime_ridge'] = lambda *args, **kwargs: SpanTimeCrossRegressedCorrelation( + regression=ridge_regression(), correlation=pearsonr_correlation(), *args, **kwargs) + + BIBTEX = """@article{schrimpf2018brain, title={Brain-score: Which artificial neural network for object recognition is most brain-like?}, author={Schrimpf, Martin and Kubilius, Jonas and Hong, Ha and Majaj, Najib J and Rajalingham, Rishi and Issa, Elias B and Kar, Kohitij and Bashivan, Pouya and Prescott-Roy, Jonathan and Geiger, Franziska and others}, diff --git a/brainscore_vision/metrics/regression_correlation/metric.py b/brainscore_vision/metrics/regression_correlation/metric.py index 365f63868..a09ba03e0 100644 --- a/brainscore_vision/metrics/regression_correlation/metric.py +++ b/brainscore_vision/metrics/regression_correlation/metric.py @@ -8,6 +8,7 @@ from brainscore_core.metrics import Metric, Score from brainscore_vision.metric_helpers.transformations import CrossValidation from brainscore_vision.metric_helpers.xarray_utils import XarrayRegression, XarrayCorrelation +from brainscore_vision.metric_helpers.temporal import SpanTimeRegression, PerTime class CrossRegressedCorrelation(Metric): @@ -65,6 +66,15 @@ def predict(self, X): return Ypred +# make the crc to consider time as a sample dimension +def SpanTimeCrossRegressedCorrelation(regression, correlation, *args, **kwargs): + return CrossRegressedCorrelation( + regression=SpanTimeRegression(regression), + correlation=PerTime(correlation), + *args, **kwargs + ) + + def pls_regression(regression_kwargs=None, xarray_kwargs=None): regression_defaults = dict(n_components=25, scale=False) regression_kwargs = {**regression_defaults, **(regression_kwargs or {})} diff --git a/brainscore_vision/model_helpers/activations/__init__.py b/brainscore_vision/model_helpers/activations/__init__.py index 10f514697..40a84e464 100644 --- a/brainscore_vision/model_helpers/activations/__init__.py +++ b/brainscore_vision/model_helpers/activations/__init__.py @@ -1,3 +1 @@ -from brainscore_vision.model_helpers.activations.keras import KerasWrapper, preprocess as preprocess_keras from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper, preprocess_images as preprocess_pytorch -from brainscore_vision.model_helpers.activations.tensorflow import TensorflowWrapper, TensorflowSlimWrapper diff --git a/brainscore_vision/model_helpers/activations/core.py b/brainscore_vision/model_helpers/activations/core.py index a9f537250..58f8baefc 100644 --- a/brainscore_vision/model_helpers/activations/core.py +++ b/brainscore_vision/model_helpers/activations/core.py @@ -348,7 +348,7 @@ def translate_images(self, images: List[Union[str, np.ndarray]], image_paths: Li """ Translate images according to selected microsaccades, if microsaccades are required. - :param images: A list of images (in the case of tensorflow models), or a list of arrays (non-tf models). + :param images: A list of arrays. :param image_paths: A list of image paths. Both `image_paths` and `images` are needed since while both tf and non-tf models preprocess images before this point, non-tf models' preprocessed images are fixed as arrays when fed into here. As such, simply returning `image_paths` for @@ -519,14 +519,9 @@ def translate(image: np.array, shift: Tuple[float, float], image_shape: Tuple[in return translated_image @staticmethod - def get_image_with_shape(image: Union[str, np.ndarray]) -> Tuple[np.array, Tuple[int, int], bool]: - if isinstance(image, str): # tf models return strings after preprocessing - image = cv2.imread(image) - rows, cols, _ = image.shape # cv2 uses height, width, channels - image_is_channels_first = False - else: - _, rows, cols, = image.shape # pytorch and keras use channels, height, width - image_is_channels_first = True + def get_image_with_shape(image: np.ndarray) -> Tuple[np.array, Tuple[int, int], bool]: + _, rows, cols, = image.shape # pytorch uses channels, height, width + image_is_channels_first = True return image, (rows, cols), image_is_channels_first @staticmethod diff --git a/brainscore_vision/model_helpers/activations/keras.py b/brainscore_vision/model_helpers/activations/keras.py deleted file mode 100644 index 8d1acf4d7..000000000 --- a/brainscore_vision/model_helpers/activations/keras.py +++ /dev/null @@ -1,92 +0,0 @@ -from collections import OrderedDict - -import numpy as np - -from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper - - -class KerasWrapper: - def __init__(self, model, preprocessing, identifier=None, *args, **kwargs): - """ - :param model: a keras model with a function `preprocess_input` - that will later be called on the loaded numpy image - """ - self._model = model - identifier = identifier or model.name - self._extractor = ActivationsExtractorHelper( - identifier=identifier, get_activations=self.get_activations, preprocessing=preprocessing, - *args, **kwargs) - self._extractor.insert_attrs(self) - - @property - def identifier(self): - return self._extractor.identifier - - @identifier.setter - def identifier(self, value): - self._extractor.identifier = value - - def __call__(self, *args, **kwargs): # cannot assign __call__ as attribute due to Python convention - return self._extractor(*args, **kwargs) - - def get_activations(self, images, layer_names): - from keras import backend as K - input_tensor = self._model.input - layers = [layer for layer in self._model.layers if layer.name in layer_names] - layers = sorted(layers, key=lambda layer: layer_names.index(layer.name)) - if 'logits' in layer_names: - layers.insert(layer_names.index('logits'), self._model.layers[-1]) - assert len(layers) == len(layer_names) - layer_out_tensors = [layer.output for layer in layers] - functor = K.function([input_tensor] + [K.learning_phase()], layer_out_tensors) # evaluate all tensors at once - layer_outputs = functor([images, 0.]) # 0 to signal testing phase - return OrderedDict([(layer_name, layer_output) for layer_name, layer_output in zip(layer_names, layer_outputs)]) - - def __repr__(self): - return repr(self._model) - - def graph(self): - import networkx as nx - g = nx.DiGraph() - for layer in self._model.layers: - g.add_node(layer.name, object=layer, type=type(layer)) - for outbound_node in layer._outbound_nodes: - g.add_edge(layer.name, outbound_node.outbound_layer.name) - return g - - -def load_images(image_filepaths, image_size): - images = [load_image(image_filepath) for image_filepath in image_filepaths] - images = [scale_image(image, image_size) for image in images] - return np.array(images) - - -def load_image(image_filepath): - try: # keras API before tensorflow 2.9.1 - from keras.preprocessing.image import load_img - from keras.preprocessing.image import img_to_array - except ImportError: - from tensorflow.keras.utils import load_img - from tensorflow.keras.utils import img_to_array - img = load_img(image_filepath) - x = img_to_array(img) - return x - - -def scale_image(img, image_size): - from PIL import Image - try: # keras API before tensorflow 2.9.1 - from keras.preprocessing.image import img_to_array - except ImportError: - from tensorflow.keras.utils import img_to_array - img = Image.fromarray(img.astype(np.uint8)) - img = img.resize((image_size, image_size)) - img = img_to_array(img) - return img - - -def preprocess(image_filepaths, image_size, *args, **kwargs): - # only a wrapper to avoid top-level keras imports - from keras.applications.imagenet_utils import preprocess_input - images = load_images(image_filepaths, image_size=image_size) - return preprocess_input(images, *args, **kwargs) diff --git a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py index d656a86b7..c94ccd3d7 100644 --- a/brainscore_vision/model_helpers/activations/temporal/inputs/base.py +++ b/brainscore_vision/model_helpers/activations/temporal/inputs/base.py @@ -15,4 +15,3 @@ def is_video_path(path: Union[str, Path]) -> bool: def is_image_path(path: Union[str, Path]) -> bool: extension = path.split('.')[-1].lower() return extension in ['jpg', 'jpeg', 'png', 'bmp', 'tiff'] - \ No newline at end of file diff --git a/brainscore_vision/model_helpers/activations/tensorflow.py b/brainscore_vision/model_helpers/activations/tensorflow.py deleted file mode 100644 index d5e4864d5..000000000 --- a/brainscore_vision/model_helpers/activations/tensorflow.py +++ /dev/null @@ -1,71 +0,0 @@ -from collections import OrderedDict - -from brainscore_vision.model_helpers.activations.core import ActivationsExtractorHelper - - -class TensorflowWrapper: - def __init__(self, identifier, inputs, endpoints: dict, session, *args, **kwargs): - import tensorflow as tf - self._inputs = inputs - self._endpoints = endpoints - self._session = session or tf.compat.v1.Session() - self._extractor = ActivationsExtractorHelper(identifier=identifier, get_activations=self.get_activations, - preprocessing=None, *args, **kwargs) - self._extractor.insert_attrs(self) - - @property - def identifier(self): - return self._extractor.identifier - - @identifier.setter - def identifier(self, value): - self._extractor.identifier = value - - def __call__(self, *args, **kwargs): # cannot assign __call__ as attribute due to Python convention - return self._extractor(*args, **kwargs) - - def get_activations(self, images, layer_names): - layer_tensors = OrderedDict((layer, self._endpoints[ - layer if (layer != 'logits' or layer in self._endpoints) else next(reversed(self._endpoints))]) - for layer in layer_names) - layer_outputs = self._session.run(layer_tensors, feed_dict={self._inputs: images}) - return layer_outputs - - def graph(self): - import networkx as nx - g = nx.DiGraph() - for name, layer in self._endpoints.items(): - g.add_node(name, object=layer, type=type(layer)) - g.add_node("logits", object=self.logits, type=type(self.logits)) - return g - - -class TensorflowSlimWrapper(TensorflowWrapper): - def __init__(self, *args, labels_offset=1, **kwargs): - super(TensorflowSlimWrapper, self).__init__(*args, **kwargs) - self._labels_offset = labels_offset - - def get_activations(self, images, layer_names): - layer_outputs = super(TensorflowSlimWrapper, self).get_activations(images, layer_names) - if 'logits' in layer_outputs: - layer_outputs['logits'] = layer_outputs['logits'][:, self._labels_offset:] - return layer_outputs - - -def load_image(image_filepath): - import tensorflow as tf - image = tf.io.read_file(image_filepath) - image = tf.image.decode_png(image, channels=3) - return image - - -def resize_image(image, image_size): - import tensorflow as tf - image = tf.image.resize(image, (image_size, image_size)) - return image - - -def load_resize_image(image_path, image_size): - image = load_image(image_path) - image = resize_image(image, image_size) - return image diff --git a/brainscore_vision/models/bp_resnet50_julios/setup.py b/brainscore_vision/models/bp_resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/bp_resnet50_julios/setup.py +++ b/brainscore_vision/models/bp_resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/cornet_s_ynshah/setup.py b/brainscore_vision/models/cornet_s_ynshah/setup.py index 68362b48b..aa18ce8a3 100644 --- a/brainscore_vision/models/cornet_s_ynshah/setup.py +++ b/brainscore_vision/models/cornet_s_ynshah/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/dbp_resnet50_julios/setup.py b/brainscore_vision/models/dbp_resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/dbp_resnet50_julios/setup.py +++ b/brainscore_vision/models/dbp_resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla/setup.py b/brainscore_vision/models/eBarlow_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla_1/setup.py b/brainscore_vision/models/eBarlow_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla_1/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_Vanilla_2/setup.py b/brainscore_vision/models/eBarlow_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_Vanilla_2/setup.py +++ b/brainscore_vision/models/eBarlow_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_augself_linear_1/setup.py b/brainscore_vision/models/eBarlow_augself_linear_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_augself_linear_1/setup.py +++ b/brainscore_vision/models/eBarlow_augself_linear_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py b/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py +++ b/brainscore_vision/models/eBarlow_augself_mlp_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py b/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_1/setup.py b/brainscore_vision/models/eBarlow_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_2/setup.py b/brainscore_vision/models/eBarlow_lmda_001_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_2/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_001_3/setup.py b/brainscore_vision/models/eBarlow_lmda_001_3/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_001_3/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_001_3/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01/setup.py b/brainscore_vision/models/eBarlow_lmda_01/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01_1/setup.py b/brainscore_vision/models/eBarlow_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_01_2/setup.py b/brainscore_vision/models/eBarlow_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_01_2/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_02_1/setup.py b/brainscore_vision/models/eBarlow_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_02_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_03_1/setup.py b/brainscore_vision/models/eBarlow_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_03_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_04_1/setup.py b/brainscore_vision/models/eBarlow_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_04_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eBarlow_lmda_05_1/setup.py b/brainscore_vision/models/eBarlow_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eBarlow_lmda_05_1/setup.py +++ b/brainscore_vision/models/eBarlow_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py b/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py b/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py b/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py +++ b/brainscore_vision/models/eMMCR_Mom_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla/setup.py b/brainscore_vision/models/eMMCR_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_VanillaV2/setup.py b/brainscore_vision/models/eMMCR_VanillaV2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_VanillaV2/setup.py +++ b/brainscore_vision/models/eMMCR_VanillaV2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla_1/setup.py b/brainscore_vision/models/eMMCR_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla_1/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_Vanilla_2/setup.py b/brainscore_vision/models/eMMCR_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_Vanilla_2/setup.py +++ b/brainscore_vision/models/eMMCR_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01/setup.py b/brainscore_vision/models/eMMCR_lmda_01/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01V2/setup.py b/brainscore_vision/models/eMMCR_lmda_01V2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01V2/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01V2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_1/setup.py b/brainscore_vision/models/eMMCR_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_1/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_2/setup.py b/brainscore_vision/models/eMMCR_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_2/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eMMCR_lmda_01_3/setup.py b/brainscore_vision/models/eMMCR_lmda_01_3/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eMMCR_lmda_01_3/setup.py +++ b/brainscore_vision/models/eMMCR_lmda_01_3/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py b/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py +++ b/brainscore_vision/models/eSimCLR_Vanilla_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py b/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py +++ b/brainscore_vision/models/eSimCLR_Vanilla_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_0001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_001_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_01_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py b/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_01_2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_02_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_02_1_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_03_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_04_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_04_1_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py b/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py +++ b/brainscore_vision/models/eSimCLR_lmda_05_1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py index 41c6ca79e..d3eaf9c94 100644 --- a/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py +++ b/brainscore_vision/models/effnetb1_cutmixpatch_SAM_robust32_avge6e8e9e10_manylayers_324x288/model.py @@ -1,7 +1,7 @@ import functools import torch -from brainscore_vision.model_helpers.activations import PytorchWrapper, KerasWrapper +from brainscore_vision.model_helpers.activations import PytorchWrapper from brainscore_vision.model_helpers.activations.pytorch import load_preprocess_images from brainscore_vision.model_helpers.s3 import load_weight_file from PIL import Image diff --git a/brainscore_vision/models/r50_tvpt/setup.py b/brainscore_vision/models/r50_tvpt/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/r50_tvpt/setup.py +++ b/brainscore_vision/models/r50_tvpt/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py b/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_Vanilla/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py b/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_VanillaV2/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py b/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py +++ b/brainscore_vision/models/resnet50_eMMCR_eqp10_lm1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/resnet50_julios/setup.py b/brainscore_vision/models/resnet50_julios/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/resnet50_julios/setup.py +++ b/brainscore_vision/models/resnet50_julios/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/__init__.py b/brainscore_vision/models/temporal_model_AVID_CMA/__init__.py similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/__init__.py rename to brainscore_vision/models/temporal_model_AVID_CMA/__init__.py diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/model.py b/brainscore_vision/models/temporal_model_AVID_CMA/model.py similarity index 94% rename from brainscore_vision/models/temporal_model_AVID-CMA/model.py rename to brainscore_vision/models/temporal_model_AVID_CMA/model.py index 60d91f690..a67eb3b43 100644 --- a/brainscore_vision/models/temporal_model_AVID-CMA/model.py +++ b/brainscore_vision/models/temporal_model_AVID_CMA/model.py @@ -29,7 +29,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid-cma/audioset/InstX-N1024-PosW-N64-Top32.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID-CMA_Audioset_InstX-N1024-PosW-N64-Top32_checkpoint.pth.tar", version_id="jSaZgbUohM0ZeoEUUKZiLBo6iz_v8VvQ", sha1="9db5eba9aab6bdbb74025be57ab532df808fe3f6" ) @@ -38,7 +38,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid/kinetics/Cross-N1024.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID_Kinetics_Cross-N1024_checkpoint.pth.tar", version_id="XyKt0UOUFsuuyrl6ZREivK8FadRPx34u", sha1="d3a04f856d29421ba8de37808593a3fad4d4794f" ) @@ -47,7 +47,7 @@ def get_model(identifier): cfg_path = os.path.join(HOME, "configs/main/avid/audioset/Cross-N1024.yaml") weight_path = load_weight_file( bucket="brainscore-vision", - relative_path="temporal_model_AVID-CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", + relative_path="temporal_model_AVID_CMA/AVID_Audioset_Cross-N1024_checkpoint.pth.tar", version_id="0Sxuhn8LsYXQC4FnPfJ7rw7uU6kDlKgc", sha1="b48d8428a1a2526ccca070f810333df18bfce5fd" ) diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt b/brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/requirements.txt rename to brainscore_vision/models/temporal_model_AVID_CMA/requirements.txt diff --git a/brainscore_vision/models/temporal_model_AVID-CMA/test.py b/brainscore_vision/models/temporal_model_AVID_CMA/test.py similarity index 100% rename from brainscore_vision/models/temporal_model_AVID-CMA/test.py rename to brainscore_vision/models/temporal_model_AVID_CMA/test.py diff --git a/brainscore_vision/models/temporal_model_GDT/model.py b/brainscore_vision/models/temporal_model_GDT/model.py index 624a5b29b..9a0c057c7 100644 --- a/brainscore_vision/models/temporal_model_GDT/model.py +++ b/brainscore_vision/models/temporal_model_GDT/model.py @@ -69,4 +69,4 @@ def get_model(identifier): # "base.fc": "C", # no fc } - return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) \ No newline at end of file + return PytorchWrapper(identifier, model, transform_video, fps=30, layer_activation_format=layer_activation_format) diff --git a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py index 7e785513e..355b8e8b2 100644 --- a/brainscore_vision/models/temporal_model_VideoMAEv2/model.py +++ b/brainscore_vision/models/temporal_model_VideoMAEv2/model.py @@ -54,7 +54,7 @@ def get_model(identifier): bucket="brainscore-vision", relative_path="temporal_model_VideoMAEv2/vit_g_hybrid_pt_1200e.pth", version_id="TxtkfbeMV105dzpzTwi0Kn5glnvQvIrq", - sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69", + sha1="9048f2bc0b0c7ba4d0e5228f3a7c0bef4dbaca69" ) num_blocks = 40 feature_map_size = 16 diff --git a/brainscore_vision/models/temporal_model_openstl/__init__.py b/brainscore_vision/models/temporal_model_openstl/__init__.py index 2b49cc845..9ea9b66b1 100644 --- a/brainscore_vision/models/temporal_model_openstl/__init__.py +++ b/brainscore_vision/models/temporal_model_openstl/__init__.py @@ -13,7 +13,6 @@ def commit_model(identifier): model_registry["ConvLSTM"] = lambda: commit_model("ConvLSTM") model_registry["PredRNN"] = lambda: commit_model("PredRNN") -# model_registry["PredNet"] = lambda: commit_model("PredNet") model_registry["SimVP"] = lambda: commit_model("SimVP") model_registry["TAU"] = lambda: commit_model("TAU") model_registry["MIM"] = lambda: commit_model("MIM") diff --git a/brainscore_vision/models/temporal_model_openstl/model.py b/brainscore_vision/models/temporal_model_openstl/model.py index aed3e0464..de5c93803 100644 --- a/brainscore_vision/models/temporal_model_openstl/model.py +++ b/brainscore_vision/models/temporal_model_openstl/model.py @@ -105,23 +105,6 @@ def process_output(layer, layer_name, inputs, output): kwargs = {} weight_name = "kitticaltech_predrnn_one_ep100.pth" - elif identifier == "PredNet": - layer_activation_format = { - **{f"layer{i}": "TCHW" for i in range(4)}, - "layer5": "TCHW" - } - - def process_output(layer, layer_name, inputs, output): - if layer_name.startswith("cell_list"): - h, c = output - return c - else: - return output - - wrapper_cls = LSTMWrapper - kwargs = {} - weight_name = "kitticaltech_prednet_one_ep100.pth" - elif identifier == "ConvLSTM": layer_activation_format = { **{f"cell_list.{i}": "TCHW" for i in range(4)}, @@ -220,4 +203,4 @@ def transform_video_simvp(video): return wrapper_cls(identifier, model, transform_video, fps=KITTI_FPS, layer_activation_format=layer_activation_format, - process_output=process_output, **kwargs) \ No newline at end of file + process_output=process_output, **kwargs) diff --git a/brainscore_vision/models/temporal_model_openstl/test.py b/brainscore_vision/models/temporal_model_openstl/test.py index 4d52b76ce..c4090a314 100644 --- a/brainscore_vision/models/temporal_model_openstl/test.py +++ b/brainscore_vision/models/temporal_model_openstl/test.py @@ -6,7 +6,6 @@ model_list = [ "ConvLSTM", "PredRNN", - "PredNet", "SimVP", "TAU", "MIM" @@ -17,4 +16,4 @@ @pytest.mark.parametrize("model_identifier", model_list) def test_load(model_identifier): model = load_model(model_identifier) - assert model is not None \ No newline at end of file + assert model is not None diff --git a/brainscore_vision/models/tv_efficientnet-b1/__init__.py b/brainscore_vision/models/tv_efficientnet_b1/__init__.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/__init__.py rename to brainscore_vision/models/tv_efficientnet_b1/__init__.py diff --git a/brainscore_vision/models/tv_efficientnet-b1/model.py b/brainscore_vision/models/tv_efficientnet_b1/model.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/model.py rename to brainscore_vision/models/tv_efficientnet_b1/model.py diff --git a/brainscore_vision/models/tv_efficientnet-b1/setup.py b/brainscore_vision/models/tv_efficientnet_b1/setup.py similarity index 92% rename from brainscore_vision/models/tv_efficientnet-b1/setup.py rename to brainscore_vision/models/tv_efficientnet_b1/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/tv_efficientnet-b1/setup.py +++ b/brainscore_vision/models/tv_efficientnet_b1/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/tv_efficientnet-b1/test.py b/brainscore_vision/models/tv_efficientnet_b1/test.py similarity index 100% rename from brainscore_vision/models/tv_efficientnet-b1/test.py rename to brainscore_vision/models/tv_efficientnet_b1/test.py diff --git a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py +++ b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240222/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py index 421914cfb..c286567f5 100644 --- a/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py +++ b/brainscore_vision/models/yudixie_resnet50_imagenet1kpret_0_240312/setup.py @@ -19,7 +19,6 @@ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.7', ], test_suite='tests', ) diff --git a/environment_lock.yml b/environment_lock.yml new file mode 100644 index 000000000..9847267d3 --- /dev/null +++ b/environment_lock.yml @@ -0,0 +1,182 @@ +# This environment_lock file is associated with the move to brainscore_vision 2.1.0. This lock includes all testing dependencies and dependencies +# from adjacent repositories. + +name: brainscore_env +channels: + - defaults + - conda-forge +dependencies: + - bzip2=1.0.8 + - ca-certificates=2024.7.2 + - libffi=3.4.4 + - ncurses=6.4 + - openssl=3.0.14 + - pip=24.2 + - python=3.11.9 + - readline=8.2 + - setuptools=72.1.0 + - sqlite=3.45.3 + - tk=8.6.14 + - wheel=0.43.0 + - xz=5.4.6 + - zlib=1.2.13 + - pip: + - anyio==4.4.0 + - appnope==0.1.4 + - argon2-cffi==23.1.0 + - argon2-cffi-bindings==21.2.0 + - arrow==1.3.0 + - asttokens==2.4.1 + - async-lru==2.0.4 + - attrs==24.2.0 + - babel==2.16.0 + - beautifulsoup4==4.12.3 + - bleach==6.1.0 + - boto3==1.35.3 + - botocore==1.35.3 + - brainio @ git+https://github.com/brain-score/brainio.git@main + - brainscore_core @ git+https://github.com/brain-score/core@main + - brainscore-vision @ git+https://github.com/brain-score/vision.git@main + - certifi==2024.7.4 + - cffi==1.17.0 + - cftime==1.6.4 + - charset-normalizer==3.3.2 + - click==8.1.7 + - cloudpickle==3.0.0 + - comm==0.2.2 + - contourpy==1.2.1 + - cycler==0.12.1 + - dask==2024.8.1 + - debugpy==1.8.5 + - decorator==5.1.1 + - defusedxml==0.7.1 + - entrypoints==0.4 + - eva-decord==0.6.1 + - executing==2.0.1 + - fastjsonschema==2.20.0 + - filelock==3.15.4 + - fire==0.6.0 + - fonttools==4.53.1 + - fqdn==1.5.1 + - fsspec==2024.6.1 + - gitdb==4.0.11 + - gitpython==3.1.43 + - h11==0.14.0 + - h5py==3.11.0 + - httpcore==1.0.5 + - httpx==0.27.0 + - idna==3.7 + - importlib-metadata==4.13.0 + - iniconfig==2.0.0 + - ipykernel==6.29.5 + - ipython==8.26.0 + - ipywidgets==8.1.5 + - isoduration==20.11.0 + - jedi==0.19.1 + - jinja2==3.1.4 + - jmespath==1.0.1 + - joblib==1.4.2 + - json5==0.9.25 + - jsonpointer==3.0.0 + - jsonschema==4.23.0 + - jsonschema-specifications==2023.12.1 + - jupyter==1.0.0 + - jupyter-client==8.6.2 + - jupyter-console==6.6.3 + - jupyter-core==5.7.2 + - jupyter-events==0.10.0 + - jupyter-lsp==2.2.5 + - jupyter-server==2.14.2 + - jupyter-server-terminals==0.5.3 + - jupyterlab==4.2.4 + - jupyterlab-pygments==0.3.0 + - jupyterlab-server==2.27.3 + - jupyterlab-widgets==3.0.13 + - kiwisolver==1.4.5 + - latexcodec==3.0.0 + - locket==1.0.0 + - markupsafe==2.1.5 + - matplotlib==3.9.2 + - matplotlib-inline==0.1.7 + - mistune==3.0.2 + - mpmath==1.3.0 + - nbclient==0.10.0 + - nbconvert==7.16.4 + - nbformat==5.10.4 + - nest-asyncio==1.6.0 + - netcdf4==1.7.1.post1 + - networkx==3.3 + - notebook==7.2.1 + - notebook-shim==0.2.4 + - numpy==1.26.4 + - opencv-python==4.10.0.84 + - overrides==7.7.0 + - packaging==24.1 + - pandas==2.2.2 + - pandocfilters==1.5.1 + - parso==0.8.4 + - partd==1.4.2 + - peewee==3.17.6 + - pexpect==4.9.0 + - pillow==10.4.0 + - platformdirs==4.2.2 + - pluggy==1.5.0 + - prometheus-client==0.20.0 + - prompt-toolkit==3.0.47 + - psutil==6.0.0 + - psycopg2-binary==2.9.9 + - ptyprocess==0.7.0 + - pure-eval==0.2.3 + - pybtex==0.24.0 + - pycparser==2.22 + - pygments==2.18.0 + - pyparsing==3.1.2 + - pytest==8.3.2 + - pytest-check==2.3.1 + - pytest-mock==3.14.0 + - pytest-timeout==2.3.1 + - python-dateutil==2.9.0.post0 + - python-json-logger==2.0.7 + - pytz==2024.1 + - pyyaml==6.0.2 + - pyzmq==26.2.0 + - qtconsole==5.5.2 + - qtpy==2.4.1 + - referencing==0.35.1 + - requests==2.32.3 + - result_caching @ git+https://github.com/brain-score/result_caching@master + - rfc3339-validator==0.1.4 + - rfc3986-validator==0.1.1 + - rpds-py==0.20.0 + - s3transfer==0.10.2 + - scikit-learn==1.5.1 + - scipy==1.14.1 + - send2trash==1.8.3 + - six==1.16.0 + - smmap==5.0.1 + - sniffio==1.3.1 + - soupsieve==2.6 + - stack-data==0.6.3 + - sympy==1.13.2 + - termcolor==2.4.0 + - terminado==0.18.1 + - threadpoolctl==3.5.0 + - tinycss2==1.3.0 + - toolz==0.12.1 + - torch==2.4.0 + - torchvision==0.19.0 + - tornado==6.4.1 + - tqdm==4.66.5 + - traitlets==5.14.3 + - types-python-dateutil==2.9.0.20240821 + - typing-extensions==4.12.2 + - tzdata==2024.1 + - uri-template==1.3.0 + - urllib3==2.2.2 + - wcwidth==0.2.13 + - webcolors==24.8.0 + - webencodings==0.5.1 + - websocket-client==1.8.0 + - widgetsnbextension==4.0.13 + - xarray==2022.3.0 + - zipp==3.20.0 diff --git a/pyproject.toml b/pyproject.toml index 3b28322e9..15b4de6d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,17 +4,17 @@ [project] name = "brainscore_vision" -version = "2.0" +version = "2.1" description = "The Brain-Score library enables model comparisons to behavioral and neural experiments" authors = [] license = { 'file' = 'LICENSE' } readme = "README.md" -requires-python = ">=3.7" +requires-python = ">=3.11" dependencies = [ - "numpy>=1.17", - "brainscore_core @ git+https://github.com/brain-score/core", - "result_caching @ git+https://github.com/brain-score/result_caching", + "numpy<2", + "brainscore_core @ git+https://github.com/brain-score/core@main", + "result_caching @ git+https://github.com/brain-score/result_caching@master", "importlib-metadata<5", # workaround to https://github.com/brain-score/brainio/issues/28 "scikit-learn", # for metric_helpers/transformations.py cross-validation "scipy", # for benchmark_helpers/properties_common.py @@ -28,8 +28,8 @@ dependencies = [ "peewee", "psycopg2-binary", "networkx", - "decord", - "psutil" + "eva-decord", + "psutil", ] [project.optional-dependencies] @@ -40,9 +40,6 @@ test = [ "pytest-timeout", "torch", "torchvision", - "tensorflow==1.15", - "keras==2.3.1", - "protobuf<=3.20", # https://protobuf.dev/news/2022-05-06/#python-updates "matplotlib", # for examples "pytest-mock", ] diff --git a/tests/test_metric_helpers/test_temporal.py b/tests/test_metric_helpers/test_temporal.py new file mode 100644 index 000000000..64dffe8de --- /dev/null +++ b/tests/test_metric_helpers/test_temporal.py @@ -0,0 +1,80 @@ +import numpy as np +import scipy.stats +from pytest import approx +from sklearn.linear_model import LinearRegression + +from brainio.assemblies import NeuroidAssembly +from brainscore_vision.metric_helpers.xarray_utils import XarrayRegression, XarrayCorrelation +from brainscore_vision.metric_helpers.temporal import PerTime, SpanTime, PerTimeRegression, SpanTimeRegression + + +class TestMetricHelpers: + def test_pertime_ops(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + mean_neuroid = lambda arr: arr.mean('neuroid') + pertime_mean_neuroid = PerTime(mean_neuroid) + output = pertime_mean_neuroid(jumbled_source) + output = output.transpose('presentation', 'time_bin') + target = jumbled_source.transpose('presentation', 'time_bin', 'neuroid').mean('neuroid') + assert (output == approx(target)).all().item() + + def test_spantime_ops(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + mean_presentation = lambda arr: arr.mean("presentation") + spantime_mean_presentation = SpanTime(mean_presentation) + output = spantime_mean_presentation(jumbled_source) + output = output.transpose('neuroid') + target = jumbled_source.transpose('presentation', 'time_bin', 'neuroid').mean('presentation').mean('time_bin') + assert (output == approx(target)).all().item() + + def test_pertime_regression(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + target = jumbled_source.sortby(['stimulus_id', 'neuroid_id']) + pertime_regression = PerTimeRegression(XarrayRegression(LinearRegression())) + pertime_regression.fit(jumbled_source, target) + prediction = pertime_regression.predict(jumbled_source) + prediction = prediction.transpose(*target.dims) + # do not test for alignment of metadata - it is only important that the data is well-aligned with the metadata. + np.testing.assert_array_almost_equal(prediction.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values, + target.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values) + + + def test_spantime_regression(self): + jumbled_source = NeuroidAssembly(np.random.rand(500, 10, 20), + coords={'stimulus_id': ('presentation', list(reversed(range(500)))), + 'image_meta': ('presentation', [0] * 500), + 'neuroid_id': ('neuroid', list(reversed(range(10)))), + 'neuroid_meta': ('neuroid', [0] * 10), + 'time_bin_start': ('time_bin', np.arange(0, 400, 20)), + 'time_bin_end': ('time_bin', np.arange(20, 420, 20))}, + dims=['presentation', 'neuroid', 'time_bin']) + target = jumbled_source.sortby(['stimulus_id', 'neuroid_id']) + spantime_regression = SpanTimeRegression(XarrayRegression(LinearRegression())) + spantime_regression.fit(jumbled_source, target) + prediction = spantime_regression.predict(jumbled_source) + prediction = prediction.transpose(*target.dims) + # do not test for alignment of metadata - it is only important that the data is well-aligned with the metadata. + np.testing.assert_array_almost_equal(prediction.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values, + target.sortby(['stimulus_id', 'neuroid_id', 'time_bin']).values) + diff --git a/tests/test_model_helpers/activations/test___init__.py b/tests/test_model_helpers/activations/test___init__.py index 99b36cb98..9bd012348 100644 --- a/tests/test_model_helpers/activations/test___init__.py +++ b/tests/test_model_helpers/activations/test___init__.py @@ -6,7 +6,7 @@ from pathlib import Path from brainio.stimuli import StimulusSet -from brainscore_vision.model_helpers.activations import KerasWrapper, PytorchWrapper, TensorflowSlimWrapper +from brainscore_vision.model_helpers.activations import PytorchWrapper from brainscore_vision.model_helpers.activations.core import flatten from brainscore_vision.model_helpers.activations.pca import LayerPCA @@ -93,74 +93,10 @@ def forward(self, x): return PytorchWrapper(model=MyTransformer(), preprocessing=preprocessing) -def keras_vgg19(): - import keras - from keras.applications.vgg19 import VGG19, preprocess_input - from brainscore_vision.model_helpers.activations.keras import load_images - keras.backend.clear_session() - preprocessing = lambda image_filepaths: preprocess_input(load_images(image_filepaths, image_size=224)) - return KerasWrapper(model=VGG19(), preprocessing=preprocessing) - - -def tfslim_custom(): - from brainscore_vision.model_helpers.activations.tensorflow import load_resize_image - import tensorflow as tf - slim = tf.contrib.slim - tf.compat.v1.reset_default_graph() - - image_size = 224 - placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=[64]) - preprocess = lambda image_path: load_resize_image(image_path, image_size) - preprocess = tf.map_fn(preprocess, placeholder, dtype=tf.float32) - - with tf.compat.v1.variable_scope('my_model', values=[preprocess]) as sc: - end_points_collection = sc.original_name_scope + '_end_points' - # Collect outputs for conv2d, fully_connected and max_pool2d. - with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], - outputs_collections=[end_points_collection]): - net = slim.conv2d(preprocess, 64, [11, 11], 4, padding='VALID', scope='conv1') - net = slim.max_pool2d(net, [5, 5], 5, scope='pool1') - net = slim.max_pool2d(net, [3, 3], 2, scope='pool2') - net = slim.flatten(net, scope='flatten') - net = slim.fully_connected(net, 1000, scope='logits') - endpoints = slim.utils.convert_collection_to_dict(end_points_collection) - - session = tf.compat.v1.Session() - session.run(tf.compat.v1.initialize_all_variables()) - return TensorflowSlimWrapper(identifier='tf-custom', labels_offset=0, - endpoints=endpoints, inputs=placeholder, session=session) - - -def tfslim_vgg16(): - import tensorflow as tf - from nets import nets_factory - from preprocessing import vgg_preprocessing - from brainscore_vision.model_helpers.activations.tensorflow import load_resize_image - tf.compat.v1.reset_default_graph() - - image_size = 224 - placeholder = tf.compat.v1.placeholder(dtype=tf.string, shape=[64]) - preprocess_image = lambda image: vgg_preprocessing.preprocess_image( - image, image_size, image_size, resize_side_min=image_size) - preprocess = lambda image_path: preprocess_image(load_resize_image(image_path, image_size)) - preprocess = tf.map_fn(preprocess, placeholder, dtype=tf.float32) - - model_ctr = nets_factory.get_network_fn('vgg_16', num_classes=1001, is_training=False) - logits, endpoints = model_ctr(preprocess) - - session = tf.compat.v1.Session() - session.run(tf.compat.v1.initialize_all_variables()) - return TensorflowSlimWrapper(identifier='tf-vgg16', labels_offset=1, - endpoints=endpoints, inputs=placeholder, session=session) - - models_layers = [ pytest.param(pytorch_custom, ['linear', 'relu2']), pytest.param(pytorch_alexnet, ['features.12', 'classifier.5'], marks=pytest.mark.memory_intense), pytest.param(pytorch_transformer_substitute, ['relu1']), - pytest.param(keras_vgg19, ['block3_pool'], marks=pytest.mark.memory_intense), - pytest.param(tfslim_custom, ['my_model/pool2'], marks=pytest.mark.memory_intense), - pytest.param(tfslim_vgg16, ['vgg_16/pool5'], marks=pytest.mark.memory_intense), ] # exact microsaccades for pytorch_alexnet, grayscale.png, for 1 and 10 number_of_trials @@ -366,8 +302,6 @@ def test_exact_microsaccades(number_of_trials): @pytest.mark.memory_intense @pytest.mark.parametrize(["model_ctr", "internal_layers"], [ (pytorch_alexnet, ['features.12', 'classifier.5']), - (keras_vgg19, ['block3_pool']), - (tfslim_vgg16, ['vgg_16/pool5']), ]) def test_mixed_layer_logits(model_ctr, internal_layers): stimuli_paths = [os.path.join(os.path.dirname(__file__), 'rgb.jpg')] @@ -384,7 +318,6 @@ def test_mixed_layer_logits(model_ctr, internal_layers): @pytest.mark.parametrize(["model_ctr", "expected_identifier"], [ (pytorch_custom, 'MyModel'), (pytorch_alexnet, 'AlexNet'), - (keras_vgg19, 'vgg19'), ]) def test_infer_identifier(model_ctr, expected_identifier): model = model_ctr()