diff --git a/requirements_s3.txt b/requirements_s3.txt index 94266cab9..df0fc7af7 100644 --- a/requirements_s3.txt +++ b/requirements_s3.txt @@ -2,7 +2,7 @@ boto3==1.34.103 botocore==1.34.103 jmespath==1.0.1 python-dateutil==2.9.0.post0 -s3path==0.5.6 +s3path==0.5.7 s3transfer==0.10.1 six==1.16.0 smart-open==7.0.4 diff --git a/src/bandersnatch/tests/conftest.py b/src/bandersnatch/tests/conftest.py index d866c13e1..70036ae58 100644 --- a/src/bandersnatch/tests/conftest.py +++ b/src/bandersnatch/tests/conftest.py @@ -10,7 +10,13 @@ from _pytest.capture import CaptureFixture from _pytest.fixtures import FixtureRequest from _pytest.monkeypatch import MonkeyPatch -from s3path import PureS3Path, S3Path, accessor, register_configuration_parameter +from s3path import ( + PureS3Path, + S3Path, + accessor, + configuration_map, + register_configuration_parameter, +) if TYPE_CHECKING: from bandersnatch.master import Master @@ -202,7 +208,7 @@ def s3_mock(reset_configuration_cache: None) -> S3Path: new_bucket = S3Path("/test-bucket") new_bucket.mkdir(exist_ok=True) yield new_bucket - resource, _ = new_bucket._accessor.configuration_map.get_configuration(new_bucket) + resource, _ = configuration_map.get_configuration(new_bucket) bucket = resource.Bucket(new_bucket.bucket) for key in bucket.objects.all(): key.delete() diff --git a/src/bandersnatch_storage_plugins/s3.py b/src/bandersnatch_storage_plugins/s3.py index d25037a4d..94d5015d0 100644 --- a/src/bandersnatch_storage_plugins/s3.py +++ b/src/bandersnatch_storage_plugins/s3.py @@ -17,7 +17,7 @@ from botocore.client import Config from s3path import PureS3Path from s3path import S3Path as _S3Path -from s3path import register_configuration_parameter +from s3path import configuration_map, register_configuration_parameter if TYPE_CHECKING: from s3path.accessor import _S3DirEntry @@ -37,7 +37,7 @@ def mkdir( def glob(self, pattern: str) -> Iterator[S3Path]: bucket_name = self.bucket - resource, _ = self._accessor.configuration_map.get_configuration(self) + resource, _ = configuration_map.get_configuration(self) bucket = resource.Bucket(bucket_name) kwargs = { @@ -261,7 +261,7 @@ def copy_file(self, source: PATH_TYPES, dest: PATH_TYPES) -> None: dest = self.PATH_BACKEND(dest) if not self.exists(source): raise FileNotFoundError(source) - resource, _ = source._accessor.configuration_map.get_configuration(source) + resource, _ = configuration_map.get_configuration(source) client = resource.meta.client client.copy_object( Key=dest.key, @@ -422,7 +422,7 @@ def get_file_size(self, path: PATH_TYPES) -> int: def get_upload_time(self, path: PATH_TYPES) -> datetime.datetime: if not isinstance(path, self.PATH_BACKEND): path = self.create_path_backend(path) - resource, _ = path._accessor.configuration_map.get_configuration(path) + resource, _ = configuration_map.get_configuration(path) s3object = resource.Object(path.bucket, str(path.key)) ts = s3object.metadata.get(self.UPLOAD_TIME_METADATA_KEY, 0) if not isinstance(ts, int): @@ -432,7 +432,7 @@ def get_upload_time(self, path: PATH_TYPES) -> datetime.datetime: def set_upload_time(self, path: PATH_TYPES, time: datetime.datetime) -> None: if not isinstance(path, self.PATH_BACKEND): path = self.create_path_backend(path) - resource, _ = path._accessor.configuration_map.get_configuration(path) + resource, _ = configuration_map.get_configuration(path) s3object = resource.Object(path.bucket, str(path.key)) s3object.metadata.update({self.UPLOAD_TIME_METADATA_KEY: str(time.timestamp())}) # s3 does not support editing metadata after upload, it can be done better.