diff --git a/.buildinfo b/.buildinfo index 6c81d8eb..4e9c0186 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 72539cf72e77b2ad46b1a6b455720d47 +config: 5e475b822304872e97844f5ff081e1e4 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.doctrees/advanced.doctree b/.doctrees/advanced.doctree deleted file mode 100644 index 69ba4b0d..00000000 Binary files a/.doctrees/advanced.doctree and /dev/null differ diff --git a/.doctrees/advanced/custom_protocol.doctree b/.doctrees/advanced/custom_protocol.doctree deleted file mode 100644 index 861885aa..00000000 Binary files a/.doctrees/advanced/custom_protocol.doctree and /dev/null differ diff --git a/.doctrees/advanced/glob.doctree b/.doctrees/advanced/glob.doctree deleted file mode 100644 index ffa9e604..00000000 Binary files a/.doctrees/advanced/glob.doctree and /dev/null differ diff --git a/.doctrees/changelog.doctree b/.doctrees/changelog.doctree deleted file mode 100644 index 12b0fef8..00000000 Binary files a/.doctrees/changelog.doctree and /dev/null differ diff --git a/.doctrees/cli.doctree b/.doctrees/cli.doctree deleted file mode 100644 index dc6d0926..00000000 Binary files a/.doctrees/cli.doctree and /dev/null differ diff --git a/.doctrees/configuration.doctree b/.doctrees/configuration.doctree deleted file mode 100644 index 78aad997..00000000 Binary files a/.doctrees/configuration.doctree and /dev/null differ diff --git a/.doctrees/configuration/common.doctree b/.doctrees/configuration/common.doctree deleted file mode 100644 index 8ff51215..00000000 Binary files a/.doctrees/configuration/common.doctree and /dev/null differ diff --git a/.doctrees/configuration/hdfs.doctree b/.doctrees/configuration/hdfs.doctree deleted file mode 100644 index dd71706c..00000000 Binary files a/.doctrees/configuration/hdfs.doctree and /dev/null differ diff --git a/.doctrees/configuration/http.doctree b/.doctrees/configuration/http.doctree deleted file mode 100644 index 08013744..00000000 Binary files a/.doctrees/configuration/http.doctree and /dev/null differ diff --git a/.doctrees/configuration/s3.doctree b/.doctrees/configuration/s3.doctree deleted file mode 100644 index 4d59424e..00000000 Binary files a/.doctrees/configuration/s3.doctree and /dev/null differ diff --git a/.doctrees/configuration/sftp.doctree b/.doctrees/configuration/sftp.doctree deleted file mode 100644 index 3dfc7c09..00000000 Binary files a/.doctrees/configuration/sftp.doctree and /dev/null differ diff --git a/.doctrees/environment.pickle b/.doctrees/environment.pickle deleted file mode 100644 index 04367eaf..00000000 Binary files a/.doctrees/environment.pickle and /dev/null differ diff --git a/.doctrees/index.doctree b/.doctrees/index.doctree deleted file mode 100644 index 5c9d5c91..00000000 Binary files a/.doctrees/index.doctree and /dev/null differ diff --git a/.doctrees/megfile.doctree b/.doctrees/megfile.doctree deleted file mode 100644 index 392ac4e0..00000000 Binary files a/.doctrees/megfile.doctree and /dev/null differ diff --git a/.doctrees/megfile.fs.doctree b/.doctrees/megfile.fs.doctree deleted file mode 100644 index a5ad18ae..00000000 Binary files a/.doctrees/megfile.fs.doctree and /dev/null differ diff --git a/.doctrees/megfile.fs_path.doctree b/.doctrees/megfile.fs_path.doctree deleted file mode 100644 index 4c205903..00000000 Binary files a/.doctrees/megfile.fs_path.doctree and /dev/null differ diff --git a/.doctrees/megfile.hdfs.doctree b/.doctrees/megfile.hdfs.doctree deleted file mode 100644 index bb74ec9b..00000000 Binary files a/.doctrees/megfile.hdfs.doctree and /dev/null differ diff --git a/.doctrees/megfile.hdfs_path.doctree b/.doctrees/megfile.hdfs_path.doctree deleted file mode 100644 index dde86a77..00000000 Binary files a/.doctrees/megfile.hdfs_path.doctree and /dev/null differ diff --git a/.doctrees/megfile.http.doctree b/.doctrees/megfile.http.doctree deleted file mode 100644 index 6b1234de..00000000 Binary files a/.doctrees/megfile.http.doctree and /dev/null differ diff --git a/.doctrees/megfile.http_path.doctree b/.doctrees/megfile.http_path.doctree deleted file mode 100644 index 16024c49..00000000 Binary files a/.doctrees/megfile.http_path.doctree and /dev/null differ diff --git a/.doctrees/megfile.s3.doctree b/.doctrees/megfile.s3.doctree deleted file mode 100644 index 988b8ef3..00000000 Binary files a/.doctrees/megfile.s3.doctree and /dev/null differ diff --git a/.doctrees/megfile.s3_path.doctree b/.doctrees/megfile.s3_path.doctree deleted file mode 100644 index 0a2e1e0a..00000000 Binary files a/.doctrees/megfile.s3_path.doctree and /dev/null differ diff --git a/.doctrees/megfile.sftp.doctree b/.doctrees/megfile.sftp.doctree deleted file mode 100644 index 6cf36645..00000000 Binary files a/.doctrees/megfile.sftp.doctree and /dev/null differ diff --git a/.doctrees/megfile.sftp_path.doctree b/.doctrees/megfile.sftp_path.doctree deleted file mode 100644 index ad0fb427..00000000 Binary files a/.doctrees/megfile.sftp_path.doctree and /dev/null differ diff --git a/.doctrees/megfile.smart.doctree b/.doctrees/megfile.smart.doctree deleted file mode 100644 index 1618d9b2..00000000 Binary files a/.doctrees/megfile.smart.doctree and /dev/null differ diff --git a/.doctrees/megfile.smart_path.doctree b/.doctrees/megfile.smart_path.doctree deleted file mode 100644 index 4ddfaca7..00000000 Binary files a/.doctrees/megfile.smart_path.doctree and /dev/null differ diff --git a/.doctrees/megfile.stdio.doctree b/.doctrees/megfile.stdio.doctree deleted file mode 100644 index ffb46cfd..00000000 Binary files a/.doctrees/megfile.stdio.doctree and /dev/null differ diff --git a/.doctrees/megfile.stdio_path.doctree b/.doctrees/megfile.stdio_path.doctree deleted file mode 100644 index 113ba1b6..00000000 Binary files a/.doctrees/megfile.stdio_path.doctree and /dev/null differ diff --git a/.doctrees/path_format.doctree b/.doctrees/path_format.doctree deleted file mode 100644 index 4135499e..00000000 Binary files a/.doctrees/path_format.doctree and /dev/null differ diff --git a/.doctrees/readme.doctree b/.doctrees/readme.doctree deleted file mode 100644 index f0661b1c..00000000 Binary files a/.doctrees/readme.doctree and /dev/null differ diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 00000000..6881be05 --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,128 @@ + + + + + + Overview: module code — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/fs.html b/_modules/megfile/fs.html new file mode 100644 index 00000000..b7f359e4 --- /dev/null +++ b/_modules/megfile/fs.html @@ -0,0 +1,503 @@ + + + + + + megfile.fs — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.fs

+from typing import BinaryIO, Callable, Iterator, List, Optional, Tuple
+
+from megfile.fs_path import FSPath, StatResult, _make_stat, fs_cwd, fs_glob, fs_glob_stat, fs_home, fs_iglob, fs_lstat, fs_makedirs, fs_move, fs_path_join, fs_readlink, fs_rename, fs_resolve, is_fs
+from megfile.interfaces import Access, FileEntry, PathLike, StatResult
+
+__all__ = [
+    'is_fs',
+    'StatResult',
+    'fs_path_join',
+    '_make_stat',
+    'fs_readlink',
+    'fs_cwd',
+    'fs_home',
+    'fs_iglob',
+    'fs_glob',
+    'fs_glob_stat',
+    'fs_rename',
+    'fs_resolve',
+    'fs_move',
+    'fs_makedirs',
+    'fs_lstat',
+    'fs_isabs',
+    'fs_abspath',
+    'fs_access',
+    'fs_exists',
+    'fs_getmtime',
+    'fs_getsize',
+    'fs_expanduser',
+    'fs_isdir',
+    'fs_isfile',
+    'fs_listdir',
+    'fs_load_from',
+    'fs_realpath',
+    'fs_relpath',
+    'fs_remove',
+    'fs_scan',
+    'fs_scan_stat',
+    'fs_scandir',
+    'fs_stat',
+    'fs_unlink',
+    'fs_walk',
+    'fs_getmd5',
+    'fs_copy',
+    'fs_sync',
+    'fs_symlink',
+    'fs_islink',
+    'fs_ismount',
+    'fs_save_as',
+]
+
+
+
[docs]def fs_isabs(path: PathLike) -> bool: + '''Test whether a path is absolute + + :param path: Given path + :returns: True if a path is absolute, else False + ''' + return FSPath(path).is_absolute()
+ + +
[docs]def fs_abspath(path: PathLike) -> str: + '''Return the absolute path of given path + + :param path: Given path + :returns: Absolute path of given path + ''' + return FSPath(path).abspath()
+ + +
[docs]def fs_access(path: PathLike, mode: Access = Access.READ) -> bool: + ''' + Test if path has access permission described by mode + Using ``os.access`` + + :param path: Given path + :param mode: access mode + :returns: Access: Enum, the read/write access that path has. + ''' + return FSPath(path).access(mode)
+ + +
[docs]def fs_exists(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if the path exists + + .. note:: + + The difference between this function and ``os.path.exists`` is that this function regard symlink as file. + In other words, this function is equal to ``os.path.lexists`` + + :param path: Given path + :param followlinks: False if regard symlink as file, else True + :returns: True if the path exists, else False + + ''' + return FSPath(path).exists(followlinks)
+ + +
[docs]def fs_getmtime(path: PathLike, follow_symlinks: bool = False) -> float: + ''' + Get last-modified time of the file on the given path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. + + :param path: Given path + :returns: last-modified time + ''' + return FSPath(path).getmtime(follow_symlinks)
+ + +
[docs]def fs_getsize(path: PathLike, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given file path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + :param path: Given path + :returns: File size + + ''' + return FSPath(path).getsize(follow_symlinks)
+ + +
[docs]def fs_expanduser(path: PathLike): + '''Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing. + ''' + return FSPath(path).expanduser()
+ + +
[docs]def fs_isdir(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if a path is directory + + .. note:: + + The difference between this function and ``os.path.isdir`` is that this function regard symlink as file + + :param path: Given path + :param followlinks: False if regard symlink as file, else True + :returns: True if the path is a directory, else False + + ''' + return FSPath(path).is_dir(followlinks)
+ + +
[docs]def fs_isfile(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if a path is file + + .. note:: + + The difference between this function and ``os.path.isfile`` is that this function regard symlink as file + + :param path: Given path + :param followlinks: False if regard symlink as file, else True + :returns: True if the path is a file, else False + + ''' + return FSPath(path).is_file(followlinks)
+ + +
[docs]def fs_listdir(path: PathLike) -> List[str]: + ''' + Get all contents of given fs path. The result is in acsending alphabetical order. + + :param path: Given path + :returns: All contents have in the path in acsending alphabetical order + ''' + return FSPath(path).listdir()
+ + +
[docs]def fs_load_from(path: PathLike) -> BinaryIO: + '''Read all content on specified path and write into memory + + User should close the BinaryIO manually + + :param path: Given path + :returns: Binary stream + ''' + return FSPath(path).load()
+ + +
[docs]def fs_realpath(path: PathLike) -> str: + '''Return the real path of given path + + :param path: Given path + :returns: Real path of given path + ''' + return FSPath(path).realpath()
+ + +
[docs]def fs_relpath(path: PathLike, start: Optional[str] = None) -> str: + '''Return the relative path of given path + + :param path: Given path + :param start: Given start directory + :returns: Relative path from start + ''' + return FSPath(path).relpath(start)
+ + +
[docs]def fs_remove(path: PathLike, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on fs + + :param path: Given path + :param missing_ok: if False and target file/directory not exists, raise FileNotFoundError + ''' + return FSPath(path).remove(missing_ok)
+ + +
[docs]def fs_scan(path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a path string. + + If path is a file path, yields the file only + If path is a non-existent path, return an empty generator + If path is a bucket path, return all file paths in the bucket + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :returns: A file path generator + ''' + return FSPath(path).scan(missing_ok, followlinks)
+ + +
[docs]def fs_scan_stat( + path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a tuple of path string and file stat + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :returns: A file path generator + ''' + return FSPath(path).scan_stat(missing_ok, followlinks)
+ + +
[docs]def fs_scandir(path: PathLike) -> Iterator[FileEntry]: + ''' + Get all content of given file path. + + :param path: Given path + :returns: An iterator contains all contents have prefix path + ''' + return FSPath(path).scandir()
+ + +
[docs]def fs_stat(path: PathLike, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of file on fs, including file size and mtime, referring to fs_getsize and fs_getmtime + + :param path: Given path + :returns: StatResult + ''' + return FSPath(path).stat(follow_symlinks)
+ + + + + +
[docs]def fs_walk(path: PathLike, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Generate the file names in a directory tree by walking the tree top-down. + For each directory in the tree rooted at directory path (including path itself), + it yields a 3-tuple (root, dirs, files). + + root: a string of current path + dirs: name list of subdirectories (excluding '.' and '..' if they exist) in 'root'. The list is sorted by ascending alphabetical order + files: name list of non-directory files (link is regarded as file) in 'root'. The list is sorted by ascending alphabetical order + + If path not exists, or path is a file (link is regarded as file), return an empty generator + + .. note:: + + Be aware that setting ``followlinks`` to True can lead to infinite recursion if a link points to a parent directory of itself. fs_walk() does not keep track of the directories it visited already. + + :param path: Given path + :param followlinks: False if regard symlink as file, else True + :returns: A 3-tuple generator + ''' + return FSPath(path).walk(followlinks)
+ + +
[docs]def fs_getmd5( + path: PathLike, recalculate: bool = False, followlinks: bool = True): + ''' + Calculate the md5 value of the file + + :param path: Given path + :param recalculate: Ignore this parameter, just for compatibility + :param followlinks: Ignore this parameter, just for compatibility + returns: md5 of file + ''' + return FSPath(path).md5(recalculate, followlinks)
+ + +
[docs]def fs_copy( + src_path: PathLike, + dst_path: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True): + ''' File copy on file system + Copy content (excluding meta date) of file on `src_path` to `dst_path`. `dst_path` must be a complete file name + + .. note :: + + The differences between this function and shutil.copyfile are: + + 1. If parent directory of dst_path doesn't exist, create it + + 2. Allow callback function, None by default. callback: Optional[Callable[[int], None]], + + the int data is means the size (in bytes) of the written data that is passed periodically + + 3. This function is thread-unsafe + + :param src_path: Given path + :param dst_path: Target file path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param overwrite: whether or not overwrite file when exists, default is True + ''' + return FSPath(src_path).copy(dst_path, callback, followlinks, overwrite)
+ + +
[docs]def fs_sync( + src_path: PathLike, + dst_path: PathLike, + followlinks: bool = False, + force: bool = False, + overwrite: bool = True) -> None: + '''Force write of everything to disk. + + :param src_path: Given path + :param dst_path: Target file path + :param followlinks: False if regard symlink as file, else True + :param force: Sync file forcely, do not ignore same files, priority is higher than 'overwrite', default is False + :param overwrite: whether or not overwrite file when exists, default is True + ''' + return FSPath(src_path).sync(dst_path, followlinks, force, overwrite)
+ + + + + + + + +
[docs]def fs_ismount(path: PathLike) -> bool: + '''Test whether a path is a mount point + + :param path: Given path + :returns: True if a path is a mount point, else False + ''' + return FSPath(path).is_mount()
+ + +
[docs]def fs_save_as(file_object: BinaryIO, path: PathLike): + '''Write the opened binary stream to path + If parent directory of path doesn't exist, it will be created. + + :param path: Given path + :param file_object: stream to be read + ''' + return FSPath(path).save(file_object)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/fs_path.html b/_modules/megfile/fs_path.html new file mode 100644 index 00000000..785f7d85 --- /dev/null +++ b/_modules/megfile/fs_path.html @@ -0,0 +1,1126 @@ + + + + + + megfile.fs_path — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.fs_path

+import hashlib
+import io
+import os
+import pathlib
+import shutil
+from stat import S_ISDIR as stat_isdir
+from stat import S_ISLNK as stat_islnk
+from typing import IO, AnyStr, BinaryIO, Callable, Iterator, List, Optional, Tuple, Union
+
+from megfile.errors import _create_missing_ok_generator
+from megfile.interfaces import Access, ContextIterator, FileEntry, PathLike, StatResult
+from megfile.lib.compare import is_same_file
+from megfile.lib.glob import iglob
+from megfile.lib.url import get_url_scheme
+from megfile.utils import cachedproperty, calculate_md5
+
+from .interfaces import PathLike, URIPath
+from .lib.compat import fspath
+from .lib.joinpath import path_join
+from .smart_path import SmartPath
+
+__all__ = [
+    'FSPath',
+    'is_fs',
+    'StatResult',
+    'fs_path_join',
+    '_make_stat',
+    'fs_readlink',
+    'fs_cwd',
+    'fs_home',
+    'fs_iglob',
+    'fs_glob',
+    'fs_glob_stat',
+    'fs_rename',
+    'fs_resolve',
+    'fs_move',
+    'fs_makedirs',
+    'fs_lstat',
+]
+
+
+def _make_stat(stat: os.stat_result) -> StatResult:
+    return StatResult(
+        size=stat.st_size,
+        ctime=stat.st_ctime,
+        mtime=stat.st_mtime,
+        isdir=stat_isdir(stat.st_mode),
+        islnk=stat_islnk(stat.st_mode),
+        extra=stat,
+    )
+
+
+
[docs]def is_fs(path: Union["PathLike", int]) -> bool: + '''Test if a path is fs path + + :param path: Path to be tested + :returns: True of a path is fs path, else False + ''' + if isinstance(path, int): + return True + path = fspath(path) + scheme = get_url_scheme(path) + return scheme == '' or scheme == 'file'
+ + +
[docs]def fs_path_join(path: PathLike, *other_paths: PathLike) -> str: + return path_join(fspath(path), *map(fspath, other_paths))
+ + + + + +
[docs]def fs_cwd() -> str: + '''Return current working directory + + returns: Current working directory + ''' + return os.getcwd()
+ + +
[docs]def fs_home(): + '''Return the home directory + + returns: Home directory path + ''' + return os.path.expanduser('~')
+ + +
[docs]def fs_iglob(path: PathLike, recursive: bool = True, + missing_ok: bool = True) -> Iterator[str]: + '''Return path iterator in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: An iterator contains paths match `pathname` + ''' + for path in _create_missing_ok_generator( + iglob(fspath(path), recursive=recursive), missing_ok, + FileNotFoundError('No match any file: %r' % path)): + yield path
+ + +
[docs]def fs_glob(path: PathLike, recursive: bool = True, + missing_ok: bool = True) -> List[str]: + '''Return path list in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: A list contains paths match `pathname` + ''' + return list(fs_iglob(path=path, recursive=recursive, missing_ok=missing_ok))
+ + +
[docs]def fs_glob_stat( + path: PathLike, recursive: bool = True, + missing_ok: bool = True) -> Iterator[FileEntry]: + '''Return a list contains tuples of path and file stat, in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: A list contains tuples of path and file stat, in which paths match `pathname` + ''' + for path in fs_iglob(path=path, recursive=recursive, missing_ok=missing_ok): + yield FileEntry( + os.path.basename(path), path, _make_stat(os.lstat(path)))
+ + +def _fs_rename_file( + src_path: PathLike, dst_path: PathLike, overwrite: bool = True) -> None: + ''' + rename file on fs + + :param src_path: Given path + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + src_path, dst_path = fspath(src_path), fspath(dst_path) + + if not overwrite and os.path.exists(dst_path): + return + + dst_dir = os.path.dirname(dst_path) + if dst_dir and dst_dir != ".": + os.makedirs(dst_dir, exist_ok=True) + shutil.move(src_path, dst_path) + + +
[docs]def fs_rename( + src_path: PathLike, dst_path: PathLike, overwrite: bool = True) -> None: + ''' + rename file on fs + + :param src_path: Given path + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + src_path, dst_path = fspath(src_path), fspath(dst_path) + if os.path.isfile(src_path): + return _fs_rename_file(src_path, dst_path, overwrite) + else: + os.makedirs(dst_path, exist_ok=True) + + with os.scandir(src_path) as entries: + for file_entry in entries: + src_file_path = file_entry.path + dst_file_path = dst_path + relative_path = os.path.relpath(src_file_path, start=src_path) + if relative_path and relative_path != '.': + dst_file_path = os.path.join(dst_file_path, relative_path) + if os.path.exists(dst_file_path) and file_entry.is_dir(): + fs_rename(src_file_path, dst_file_path, overwrite) + else: + _fs_rename_file(src_file_path, dst_file_path, overwrite) + + if os.path.isdir(src_path): + shutil.rmtree(src_path) + else: + os.remove(src_path)
+ + +
[docs]def fs_move( + src_path: PathLike, dst_path: PathLike, overwrite: bool = True) -> None: + ''' + rename file on fs + + :param src_path: Given path + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + return fs_rename(src_path, dst_path, overwrite)
+ + +
[docs]def fs_resolve(path: PathLike) -> str: + '''Equal to fs_realpath, return the real path of given path + + :param path: Given path + :returns: Real path of given path + ''' + return FSPath(path).realpath()
+ + +
[docs]def fs_makedirs(path: PathLike, exist_ok: bool = False): + ''' + make a directory on fs, including parent directory + + If there exists a file on the path, raise FileExistsError + + :param path: Given path + :param exist_ok: If False and target directory exists, raise FileExistsError + :raises: FileExistsError + ''' + return FSPath(path).mkdir(parents=True, exist_ok=exist_ok)
+ + +
[docs]def fs_lstat(path: PathLike) -> StatResult: + ''' + Like Path.stat() but, if the path points to a symbolic link, return the symbolic link’s information rather than its target’s. + + :param path: Given path + :returns: StatResult + ''' + return FSPath(path).lstat()
+ + +
[docs]@SmartPath.register +class FSPath(URIPath): + """file protocol + e.g. file:///data/test/ or /data/test + """ + + protocol = "file" + + def __init__(self, path: Union["PathLike", int], *other_paths: "PathLike"): + if not isinstance(path, int): + if len(other_paths) > 0: + path = self.from_path(path).joinpath(*other_paths) + path = str(path) + self.path = path + + def __fspath__(self) -> str: + return os.path.normpath(self.path_without_protocol) + +
[docs] @cachedproperty + def root(self) -> str: + return pathlib.Path(self.path_without_protocol).root
+ +
[docs] @cachedproperty + def anchor(self) -> str: + return pathlib.Path(self.path_without_protocol).anchor
+ +
[docs] @cachedproperty + def drive(self) -> str: + return pathlib.Path(self.path_without_protocol).drive
+ +
[docs] @classmethod + def from_uri(cls, path: str) -> "FSPath": + return cls.from_path(path)
+ + @property + def path_with_protocol(self) -> Union[str, int]: + if isinstance(self.path, int): + return self.path + protocol_prefix = self.protocol + "://" + if self.path.startswith(protocol_prefix): + return self.path + return protocol_prefix + self.path + +
[docs] def is_absolute(self) -> bool: + '''Test whether a path is absolute + + :returns: True if a path is absolute, else False + ''' + return os.path.isabs(self.path_without_protocol)
+ +
[docs] def abspath(self) -> str: + '''Return the absolute path of given path + + :returns: Absolute path of given path + ''' + return fspath(os.path.abspath(self.path_without_protocol))
+ +
[docs] def access(self, mode: Access = Access.READ) -> bool: + ''' + Test if path has access permission described by mode + Using ``os.access`` + + :param mode: access mode + :returns: Access: Enum, the read/write access that path has. + ''' + if not isinstance(mode, Access): + raise TypeError( + 'Unsupported mode: {} -- Mode should use one of the enums belonging to: {}' + .format(mode, ', '.join([str(a) for a in Access]))) + if mode == Access.READ: + return os.access(self.path_without_protocol, os.R_OK) + if mode == Access.WRITE: + return os.access(self.path_without_protocol, os.W_OK)
+ +
[docs] def exists(self, followlinks: bool = False) -> bool: + ''' + Test if the path exists + + .. note:: + + The difference between this function and ``os.path.exists`` is that this function regard symlink as file. + In other words, this function is equal to ``os.path.lexists`` + + :param followlinks: False if regard symlink as file, else True + :returns: True if the path exists, else False + + ''' + if followlinks: + return os.path.exists(self.path_without_protocol) + return os.path.lexists(self.path_without_protocol)
+ +
[docs] def getmtime(self, follow_symlinks: bool = False) -> float: + ''' + Get last-modified time of the file on the given path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. + + :returns: last-modified time + ''' + return self.stat(follow_symlinks=follow_symlinks).mtime
+ +
[docs] def getsize(self, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given file path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + :returns: File size + + ''' + return self.stat(follow_symlinks=follow_symlinks).size
+ +
[docs] def glob(self, pattern, recursive: bool = True, + missing_ok: bool = True) -> List['FSPath']: + '''Return path list in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: A list contains paths match `pathname` + ''' + return list( + self.iglob( + pattern=pattern, recursive=recursive, missing_ok=missing_ok))
+ +
[docs] def glob_stat( + self, pattern, recursive: bool = True, + missing_ok: bool = True) -> Iterator[FileEntry]: + '''Return a list contains tuples of path and file stat, in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: A list contains tuples of path and file stat, in which paths match `pathname` + ''' + for path_obj in self.iglob(pattern=pattern, recursive=recursive, + missing_ok=missing_ok): + yield FileEntry( + path_obj.name, path_obj.path, + _make_stat(os.lstat(path_obj.path)))
+ +
[docs] def expanduser(self): + '''Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing. + ''' + return os.path.expanduser(self.path_without_protocol)
+ +
[docs] def iglob(self, pattern, recursive: bool = True, + missing_ok: bool = True) -> Iterator['FSPath']: + '''Return path iterator in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: An iterator contains paths match `pathname` + ''' + glob_path = self.path_without_protocol + if pattern: + glob_path = self.joinpath(pattern).path_without_protocol + for path in fs_iglob(glob_path, recursive=recursive, + missing_ok=missing_ok): + yield self.from_path(path)
+ +
[docs] def is_dir(self, followlinks: bool = False) -> bool: + ''' + Test if a path is directory + + .. note:: + + The difference between this function and ``os.path.isdir`` is that this function regard symlink as file + + :param followlinks: False if regard symlink as file, else True + :returns: True if the path is a directory, else False + + ''' + if os.path.islink(self.path_without_protocol) and not followlinks: + return False + return os.path.isdir(self.path_without_protocol)
+ +
[docs] def is_file(self, followlinks: bool = False) -> bool: + ''' + Test if a path is file + + .. note:: + + The difference between this function and ``os.path.isfile`` is that this function regard symlink as file + + :param followlinks: False if regard symlink as file, else True + :returns: True if the path is a file, else False + + ''' + if os.path.islink(self.path_without_protocol) and not followlinks: + return True + return os.path.isfile(self.path_without_protocol)
+ +
[docs] def listdir(self) -> List[str]: + ''' + Get all contents of given fs path. The result is in acsending alphabetical order. + + :returns: All contents have in the path in acsending alphabetical order + ''' + return sorted(os.listdir(self.path_without_protocol))
+ +
[docs] def iterdir(self) -> Iterator['FSPath']: + ''' + Get all contents of given fs path. The result is in acsending alphabetical order. + + :returns: All contents have in the path in acsending alphabetical order + ''' + for path in self.listdir(): + yield self.joinpath(path) # type: ignore
+ +
[docs] def load(self) -> BinaryIO: + '''Read all content on specified path and write into memory + + User should close the BinaryIO manually + + :returns: Binary stream + ''' + with open(self.path_without_protocol, 'rb') as f: + data = f.read() + return io.BytesIO(data)
+ +
[docs] def mkdir(self, mode=0o777, parents: bool = False, exist_ok: bool = False): + ''' + make a directory on fs, including parent directory + + If there exists a file on the path, raise FileExistsError + + :param mode: If mode is given, it is combined with the process’ umask value to determine the file mode and access flags. + :param parents: If parents is true, any missing parents of this path are created as needed; + If parents is false (the default), a missing parent raises FileNotFoundError. + :param exist_ok: If False and target directory exists, raise FileExistsError + :raises: FileExistsError + ''' + if exist_ok and self.path_without_protocol == '': + return + return pathlib.Path(self.path_without_protocol).mkdir( + mode=mode, parents=parents, exist_ok=exist_ok)
+ +
[docs] def realpath(self) -> str: + '''Return the real path of given path + + :returns: Real path of given path + ''' + return fspath(os.path.realpath(self.path_without_protocol))
+ +
[docs] def relpath(self, start: Optional[str] = None) -> str: + '''Return the relative path of given path + + :param start: Given start directory + :returns: Relative path from start + ''' + return fspath(os.path.relpath(self.path_without_protocol, start=start))
+ +
[docs] def rename(self, dst_path: PathLike, overwrite: bool = True) -> 'FSPath': + ''' + rename file on fs + + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + fs_rename(self.path_without_protocol, dst_path, overwrite) + return self.from_path(dst_path)
+ +
[docs] def replace(self, dst_path: PathLike, overwrite: bool = True) -> 'FSPath': + ''' + move file on fs + + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + return self.rename(dst_path=dst_path, overwrite=overwrite)
+ +
[docs] def remove(self, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on fs + + :param missing_ok: if False and target file/directory not exists, raise FileNotFoundError + ''' + if missing_ok and not self.exists(): + return + if self.is_dir(): + shutil.rmtree(self.path_without_protocol) + else: + os.remove(self.path_without_protocol)
+ + def _scan(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + if self.is_file(followlinks=followlinks): + path = fspath(self.path_without_protocol) + yield path + + for root, _, files in self.walk(followlinks=followlinks): + for filename in files: + yield os.path.join(root, filename) + +
[docs] def scan(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a path string. + + If path is a file path, yields the file only + If path is a non-existent path, return an empty generator + If path is a bucket path, return all file paths in the bucket + + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :returns: A file path generator + ''' + return _create_missing_ok_generator( + self._scan(followlinks=followlinks), missing_ok, + FileNotFoundError('No match any file in: %r' % self.path))
+ +
[docs] def scan_stat(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a tuple of path string and file stat + + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :returns: A file path generator + ''' + file_not_found = True + for path in self._scan(followlinks=followlinks): + yield FileEntry( + os.path.basename(path), path, _make_stat(os.lstat(path))) + file_not_found = False + if file_not_found: + if missing_ok: + return + raise FileNotFoundError( + 'No match any file in: %r' % self.path_without_protocol)
+ +
[docs] def scandir(self) -> Iterator[FileEntry]: + ''' + Get all content of given file path. + + :returns: An iterator contains all contents have prefix path + ''' + + def create_generator(): + with os.scandir(self.path_without_protocol) as entries: + for entry in entries: + yield FileEntry( + entry.name, entry.path, + _make_stat(entry.stat(follow_symlinks=False))) + + return ContextIterator(create_generator())
+ +
[docs] def stat(self, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of file on fs, including file size and mtime, referring to fs_getsize and fs_getmtime + + :returns: StatResult + ''' + if follow_symlinks: + result = _make_stat(os.stat(self.path_without_protocol)) + else: + result = _make_stat(os.lstat(self.path_without_protocol)) + + if result.islnk or not result.isdir: + return result + + size = 0 + ctime = result.ctime + mtime = result.mtime + for root, _, files in os.walk(self.path_without_protocol): + for filename in files: + canonical_path = os.path.join(root, filename) + stat = os.lstat(canonical_path) + size += stat.st_size + if ctime > stat.st_ctime: + ctime = stat.st_ctime + if mtime < stat.st_mtime: + mtime = stat.st_mtime + return result._replace(size=size, ctime=ctime, mtime=mtime)
+ + + +
[docs] def walk(self, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Generate the file names in a directory tree by walking the tree top-down. + For each directory in the tree rooted at directory path (including path itself), + it yields a 3-tuple (root, dirs, files). + + root: a string of current path + dirs: name list of subdirectories (excluding '.' and '..' if they exist) in 'root'. The list is sorted by ascending alphabetical order + files: name list of non-directory files (link is regarded as file) in 'root'. The list is sorted by ascending alphabetical order + + If path not exists, or path is a file (link is regarded as file), return an empty generator + + .. note:: + + Be aware that setting ``followlinks`` to True can lead to infinite recursion if a link points to a parent directory of itself. fs_walk() does not keep track of the directories it visited already. + + :param followlinks: False if regard symlink as file, else True + :returns: A 3-tuple generator + ''' + if not self.exists(followlinks=followlinks): + return + + if self.is_file(followlinks=followlinks): + return + + path = fspath(self.path_without_protocol) + path = os.path.normpath(self.path_without_protocol) + + stack = [path] + while stack: + root = stack.pop() + dirs, files = [], [] + for entry in os.scandir(root): + name = fspath(entry.name) + path = entry.path + if FSPath(path).is_file(followlinks=followlinks): + files.append(name) + elif FSPath(path).is_dir(followlinks=followlinks): + dirs.append(name) + + dirs = sorted(dirs) + files = sorted(files) + + yield root, dirs, files + + stack.extend( + (os.path.join(root, directory) for directory in reversed(dirs)))
+ +
[docs] def resolve(self, strict=False) -> 'FSPath': + '''Equal to fs_realpath + + :return: Return the canonical path of the specified filename, eliminating any symbolic links encountered in the path. + :rtype: FSPath + ''' + return self.from_path( + fspath( + pathlib.Path( + self.path_without_protocol).resolve(strict=strict)))
+ +
[docs] def md5(self, recalculate: bool = False, followlinks: bool = True): + ''' + Calculate the md5 value of the file + + :param recalculate: Ignore this parameter, just for compatibility + :param followlinks: Ignore this parameter, just for compatibility + returns: md5 of file + ''' + if os.path.isdir(self.path_without_protocol): + hash_md5 = hashlib.md5() # nosec + for file_name in self.listdir(): + chunk = FSPath(self.path_without_protocol, file_name).md5( + recalculate=recalculate, followlinks=followlinks).encode() + hash_md5.update(chunk) + return hash_md5.hexdigest() + with open(self.path_without_protocol, 'rb') as src: # type: ignore + md5 = calculate_md5(src) + return md5
+ + def _copyfile( + self, + dst_path: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False): + + shutil.copy2( + self.path_without_protocol, dst_path, follow_symlinks=followlinks) + + # After python3.8, patch `shutil.copyfile` is not a good way, because `shutil.copy2` will not call it in some cases. + if callback: + callback(self.stat(follow_symlinks=followlinks).size) + +
[docs] def copy( + self, + dst_path: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True): + ''' File copy on file system + Copy content (excluding meta date) of file on `src_path` to `dst_path`. `dst_path` must be a complete file name + + .. note :: + + The differences between this function and shutil.copyfile are: + + 1. If parent directory of dst_path doesn't exist, create it + + 2. Allow callback function, None by default. callback: Optional[Callable[[int], None]], + + the int data is means the size (in bytes) of the written data that is passed periodically + + 3. This function is thread-unsafe + + :param dst_path: Target file path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param overwrite: whether or not overwrite file when exists, default is True + ''' + if not overwrite and os.path.exists((dst_path)): + return + + try: + self._copyfile(dst_path, callback=callback, followlinks=followlinks) + except FileNotFoundError as error: + # Prevent the dst_path directory from being created when src_path does not exist + if dst_path == error.filename: + FSPath(os.path.dirname(dst_path)).mkdir( + parents=True, exist_ok=True) + self._copyfile( + dst_path, callback=callback, followlinks=followlinks) + else: + raise
+ +
[docs] def sync( + self, + dst_path: PathLike, + followlinks: bool = False, + force: bool = False, + overwrite: bool = True) -> None: + '''Force write of everything to disk. + + :param dst_path: Target file path + :param followlinks: False if regard symlink as file, else True + :param force: Sync file forcely, do not ignore same files, priority is higher than 'overwrite', default is False + :param overwrite: whether or not overwrite file when exists, default is True + ''' + if self.is_dir(followlinks=followlinks): + + def ignore_same_file(src: str, names: List[str]) -> List[str]: + ignore_files = [] + for name in names: + dst_obj = self.from_path(dst_path).joinpath(name) + if force: + pass + elif not overwrite and dst_obj.exists(): + ignore_files.append(name) + elif dst_obj.exists() and is_same_file( + self.joinpath(name).stat(), dst_obj.stat(), 'copy'): + ignore_files.append(name) + return ignore_files + + shutil.copytree( + self.path_without_protocol, + dst_path, + ignore=ignore_same_file, + dirs_exist_ok=True) + else: + self.copy(dst_path, followlinks=followlinks, overwrite=overwrite)
+ + + + + + + +
[docs] def is_mount(self) -> bool: + '''Test whether a path is a mount point + + :returns: True if a path is a mount point, else False + ''' + return os.path.ismount(self.path_without_protocol)
+ +
[docs] def cwd(self) -> 'FSPath': + '''Return current working directory + + returns: Current working directory + ''' + return self.from_path(fs_cwd())
+ +
[docs] def home(self): + '''Return the home directory + + returns: Home directory path + ''' + return self.from_path(fs_home())
+ +
[docs] def joinpath(self, *other_paths: PathLike) -> "FSPath": + path = fspath(self) + if path == '.': + path = '' + return self.from_path(path_join(path, *map(fspath, other_paths)))
+ +
[docs] def save(self, file_object: BinaryIO): + '''Write the opened binary stream to path + If parent directory of path doesn't exist, it will be created. + + :param file_object: stream to be read + ''' + FSPath(os.path.dirname(self.path_without_protocol)).mkdir( + parents=True, exist_ok=True) + with open(self.path_without_protocol, 'wb') as output: + output.write(file_object.read())
+ +
[docs] def open( + self, + mode: str = 'r', + buffering=-1, + encoding=None, + errors=None, + newline=None, + closefd=True, + **kwargs) -> IO[AnyStr]: # pytype: disable=signature-mismatch + if not isinstance(self.path_without_protocol, int) and ('w' in mode or + 'x' in mode or + 'a' in mode): + FSPath(os.path.dirname(self.path_without_protocol)).mkdir( + parents=True, exist_ok=True) + return io.open( + self.path_without_protocol, + mode, + buffering=buffering, + encoding=encoding, + errors=errors, + newline=newline, + closefd=closefd)
+ +
[docs] @cachedproperty + def parts(self) -> Tuple[str]: + ''' + A tuple giving access to the path’s various components + ''' + return pathlib.Path(self.path_without_protocol).parts
+ +
[docs] def chmod(self, mode: int, *, follow_symlinks: bool = True): + ''' + Change the file mode and permissions, like os.chmod(). + + This method normally follows symlinks. + Some Unix flavours support changing permissions on the symlink itself; + on these platforms you may add the argument follow_symlinks=False, or use lchmod(). + ''' + return os.chmod( + path=self.path_without_protocol, + mode=mode, + follow_symlinks=follow_symlinks)
+ +
[docs] def group(self) -> str: + ''' + Return the name of the group owning the file. KeyError is raised if the file’s gid isn’t found in the system database. + ''' + return pathlib.Path(self.path_without_protocol).group()
+ +
[docs] def is_socket(self) -> bool: + ''' + Return True if the path points to a Unix socket (or a symbolic link pointing to a Unix socket), False if it points to another kind of file. + + False is also returned if the path doesn’t exist or is a broken symlink; other errors (such as permission errors) are propagated. + ''' + return pathlib.Path(self.path_without_protocol).is_socket()
+ +
[docs] def is_fifo(self) -> bool: + ''' + Return True if the path points to a FIFO (or a symbolic link pointing to a FIFO), False if it points to another kind of file. + + False is also returned if the path doesn’t exist or is a broken symlink; other errors (such as permission errors) are propagated. + ''' + return pathlib.Path(self.path_without_protocol).is_fifo()
+ +
[docs] def is_block_device(self) -> bool: + ''' + Return True if the path points to a block device (or a symbolic link pointing to a block device), False if it points to another kind of file. + + False is also returned if the path doesn’t exist or is a broken symlink; other errors (such as permission errors) are propagated. + ''' + return pathlib.Path(self.path_without_protocol).is_block_device()
+ +
[docs] def is_char_device(self) -> bool: + ''' + Return True if the path points to a character device (or a symbolic link pointing to a character device), False if it points to another kind of file. + + False is also returned if the path doesn’t exist or is a broken symlink; other errors (such as permission errors) are propagated. + ''' + return pathlib.Path(self.path_without_protocol).is_char_device()
+ +
[docs] def owner(self) -> str: + ''' + Return the name of the user owning the file. KeyError is raised if the file’s uid isn’t found in the system database. + ''' + return pathlib.Path(self.path_without_protocol).owner()
+ +
[docs] def absolute(self) -> 'FSPath': + ''' + Make the path absolute, without normalization or resolving symlinks. Returns a new path object + ''' + return self.from_path(os.path.abspath(self.path_without_protocol))
+ +
[docs] def rmdir(self): + ''' + Remove this directory. The directory must be empty. + ''' + return os.rmdir(self.path_without_protocol)
+ + + +
[docs] def utime(self, atime: Union[float, int], mtime: Union[float, int]): + """ + Set the access and modified times of the file specified by path. + + :param atime: a float or int representing the access time to be set. If it is set to None, the + access time is set to the current time. + :param mtime: a float or int representing the modified time to be set. If it is set to None, the + modified time is set to the current time. + :return: None + """ + return os.utime(self.path_without_protocol, times=(atime, mtime))
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/hdfs.html b/_modules/megfile/hdfs.html new file mode 100644 index 00000000..00ab5cef --- /dev/null +++ b/_modules/megfile/hdfs.html @@ -0,0 +1,380 @@ + + + + + + megfile.hdfs — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.hdfs

+from typing import IO, AnyStr, BinaryIO, Iterator, List, Optional, Tuple
+
+from megfile.hdfs_path import HdfsPath, hdfs_glob, hdfs_glob_stat, hdfs_iglob, hdfs_makedirs, is_hdfs
+from megfile.interfaces import FileEntry, PathLike, StatResult
+
+__all__ = [
+    'is_hdfs',
+    'hdfs_glob',
+    'hdfs_glob_stat',
+    'hdfs_iglob',
+    'hdfs_makedirs',
+    'hdfs_exists',
+    'hdfs_stat',
+    'hdfs_getmtime',
+    'hdfs_getsize',
+    'hdfs_isdir',
+    'hdfs_isfile',
+    'hdfs_listdir',
+    'hdfs_load_from',
+    'hdfs_move',
+    'hdfs_remove',
+    'hdfs_scan',
+    'hdfs_scan_stat',
+    'hdfs_scandir',
+    'hdfs_unlink',
+    'hdfs_walk',
+    'hdfs_getmd5',
+    'hdfs_save_as',
+    'hdfs_open',
+]
+
+
+
[docs]def hdfs_exists(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if path exists + + If the bucket of path are not permitted to read, return False + + :param path: Given path + :returns: True if path eixsts, else False + ''' + return HdfsPath(path).exists(followlinks)
+ + +
[docs]def hdfs_stat(path: PathLike, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of path file, including file size and mtime, referring to hdfs_getsize and hdfs_getmtime + + If path is not an existent path, which means hdfs_exist(path) returns False, then raise FileNotFoundError + If attempt to get StatResult of complete hdfs, such as hdfs_dir_url == 'hdfs://', raise BucketNotFoundError + + :param path: Given path + :returns: StatResult + :raises: FileNotFoundError + ''' + return HdfsPath(path).stat(follow_symlinks)
+ + +
[docs]def hdfs_getmtime(path: PathLike, follow_symlinks: bool = False) -> float: + ''' + Get last-modified time of the file on the given path path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. The mtime of empty directory is 1970-01-01 00:00:00 + + If path is not an existent path, which means hdfs_exist(path) returns False, then raise FileNotFoundError + + :param path: Given path + :returns: Last-modified time + :raises: FileNotFoundError + ''' + return HdfsPath(path).getmtime(follow_symlinks)
+ + +
[docs]def hdfs_getsize(path: PathLike, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given path path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + If path is not an existent path, which means hdfs_exist(path) returns False, then raise FileNotFoundError + + :param path: Given path + :returns: File size + :raises: FileNotFoundError + ''' + return HdfsPath(path).getsize(follow_symlinks)
+ + +
[docs]def hdfs_isdir(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if an hdfs url is directory + Specific procedures are as follows: + If there exists a suffix, of which ``os.path.join(path, suffix)`` is a file + If the url is empty bucket or hdfs:// + + :param path: Given path + :param followlinks: whether followlinks is True or False, result is the same. Because hdfs symlink not support dir. + :returns: True if path is hdfs directory, else False + ''' + return HdfsPath(path).is_dir(followlinks)
+ + +
[docs]def hdfs_isfile(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if an path is file + + :param path: Given path + :returns: True if path is hdfs file, else False + ''' + return HdfsPath(path).is_file(followlinks)
+ + +
[docs]def hdfs_listdir(path: PathLike, followlinks: bool = False) -> List[str]: + ''' + Get all contents of given path. + + :param path: Given path + :returns: All contents have prefix of path. + :raises: FileNotFoundError, NotADirectoryError + ''' + return HdfsPath(path).listdir(followlinks)
+ + +
[docs]def hdfs_load_from(path: PathLike, followlinks: bool = False) -> BinaryIO: + '''Read all content in binary on specified path and write into memory + + User should close the BinaryIO manually + + :param path: Given path + :returns: BinaryIO + ''' + return HdfsPath(path).load(followlinks)
+ + +
[docs]def hdfs_move( + src_path: PathLike, dst_path: PathLike, overwrite: bool = True) -> None: + ''' + Move file/directory path from src_path to dst_path + + :param src_path: Given path + :param dst_path: Given destination path + ''' + return HdfsPath(src_path).move(dst_path, overwrite)
+ + +
[docs]def hdfs_remove(path: PathLike, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on hdfs, `hdfs://` and `hdfs://bucket` are not permitted to remove + + :param path: Given path + :param missing_ok: if False and target file/directory not exists, raise FileNotFoundError + :raises: FileNotFoundError, UnsupportedError + ''' + return HdfsPath(path).remove(missing_ok)
+ + +
[docs]def hdfs_scan( + path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given hdfs directory. + Every iteration on generator yields a path string. + + If path is a file path, yields the file only + If path is a non-existent path, return an empty generator + If path is a bucket path, return all file paths in the bucket + If path is an empty bucket, return an empty generator + If path doesn't contain any bucket, which is path == 'hdfs://', raise UnsupportedError. walk() on complete hdfs is not supported in megfile + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + return HdfsPath(path).scan(missing_ok, followlinks)
+ + +
[docs]def hdfs_scan_stat( + path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory. + Every iteration on generator yields a tuple of path string and file stat + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + return HdfsPath(path).scan_stat(missing_ok, followlinks)
+ + +
[docs]def hdfs_scandir(path: PathLike, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Get all contents of given path, the order of result is not guaranteed. + + :param path: Given path + :returns: All contents have prefix of path + :raises: FileNotFoundError, NotADirectoryError + ''' + return HdfsPath(path).scandir(followlinks)
+ + + + + +
[docs]def hdfs_walk(path: PathLike, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Iteratively traverse the given hdfs directory, in top-bottom order. In other words, firstly traverse parent directory, if subdirectories exist, traverse the subdirectories. + Every iteration on generator yields a 3-tuple: (root, dirs, files) + + - root: Current hdfs path; + - dirs: Name list of subdirectories in current directory. + - files: Name list of files in current directory. + + If path is a file path, return an empty generator + If path is a non-existent path, return an empty generator + If path is a bucket path, bucket will be the top directory, and will be returned at first iteration of generator + If path is an empty bucket, only yield one 3-tuple (notes: hdfs doesn't have empty directory) + If path doesn't contain any bucket, which is path == 'hdfs://', raise UnsupportedError. walk() on complete hdfs is not supported in megfile + + :param path: Given path + :param followlinks: whether followlinks is True or False, result is the same. Because hdfs not support symlink. + :returns: A 3-tuple generator + ''' + return HdfsPath(path).walk(followlinks)
+ + +
[docs]def hdfs_getmd5( + path: PathLike, recalculate: bool = False, + followlinks: bool = False) -> str: + ''' + Get checksum of the file or dir. + + :param path: Given path + :param recalculate: Ignore this parameter, just for compatibility + :param followlinks: Ignore this parameter, just for compatibility + :returns: checksum + ''' + return HdfsPath(path).md5(recalculate, followlinks)
+ + +
[docs]def hdfs_save_as(file_object: BinaryIO, path: PathLike): + '''Write the opened binary stream to specified path, but the stream won't be closed + + :param path: Given path + :param file_object: Stream to be read + ''' + return HdfsPath(path).save(file_object)
+ + +
[docs]def hdfs_open( + path: PathLike, + mode: str = 'r', + *, + buffering: Optional[int] = None, + encoding: Optional[str] = None, + errors: Optional[str] = None, + **kwargs) -> IO[AnyStr]: # pytype: disable=signature-mismatch + return HdfsPath(path).open( + mode, buffering=buffering, encoding=encoding, errors=errors)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/hdfs_path.html b/_modules/megfile/hdfs_path.html new file mode 100644 index 00000000..e4ce788a --- /dev/null +++ b/_modules/megfile/hdfs_path.html @@ -0,0 +1,754 @@ + + + + + + megfile.hdfs_path — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.hdfs_path

+import hashlib
+import io
+import os
+import sys
+from functools import lru_cache
+from typing import IO, AnyStr, BinaryIO, Iterator, List, Optional, Tuple
+
+from megfile.errors import _create_missing_ok_generator, raise_hdfs_error
+from megfile.interfaces import FileEntry, PathLike, StatResult, URIPath
+from megfile.lib.compat import fspath
+from megfile.lib.glob import FSFunc, iglob
+from megfile.lib.hdfs_prefetch_reader import HdfsPrefetchReader
+from megfile.lib.hdfs_tools import hdfs_api
+from megfile.lib.url import get_url_scheme
+from megfile.pathlike import PathLike, URIPath
+from megfile.smart_path import SmartPath
+from megfile.utils import _is_pickle, cachedproperty
+
+__all__ = [
+    'HdfsPath',
+    'is_hdfs',
+    'hdfs_glob',
+    'hdfs_glob_stat',
+    'hdfs_iglob',
+    'hdfs_makedirs',
+]
+
+HDFS_USER = "HDFS_USER"
+HDFS_URL = "HDFS_URL"
+HDFS_ROOT = "HDFS_ROOT"
+HDFS_TIMEOUT = "HDFS_TIMEOUT"
+HDFS_TOKEN = "HDFS_TOKEN"
+HDFS_CONFIG_PATH = "HDFS_CONFIG_PATH"
+MAX_RETRIES = 10
+DEFAULT_HDFS_TIMEOUT = 10
+
+
+
[docs]def is_hdfs(path: PathLike) -> bool: # pytype: disable=invalid-annotation + '''Test if a path is sftp path + + :param path: Path to be tested + :returns: True of a path is sftp path, else False + ''' + return fspath(path).startswith("hdfs://")
+ + +def get_hdfs_config(profile_name: Optional[str] = None): + env_profile = f"{profile_name.upper()}__" if profile_name else "" + config = { + 'user': os.getenv(f"{env_profile}{HDFS_USER}"), + 'url': os.getenv(f"{env_profile}{HDFS_URL}"), + 'root': os.getenv(f"{env_profile}{HDFS_ROOT}"), + 'timeout': DEFAULT_HDFS_TIMEOUT, + 'token': os.getenv(f"{env_profile}{HDFS_TOKEN}"), + } + timeout_env = f"{env_profile}{HDFS_TIMEOUT}" + if os.getenv(timeout_env): + config['timeout'] = int(os.getenv(timeout_env)) + + config_path = os.getenv(HDFS_CONFIG_PATH) or os.path.expanduser( + '~/.hdfscli.cfg') + if os.path.exists(config_path): + all_config = hdfs_api.config.Config(path=config_path) + if not profile_name: + if (all_config.has_section(all_config.global_section) and + all_config.has_option(all_config.global_section, + 'default.alias')): + profile_name = all_config.get( + all_config.global_section, 'default.alias') + for suffix in ('.alias', '_alias'): + section = '{}{}'.format(profile_name, suffix) + if all_config.has_section(section): + options = dict(all_config.items(section)) + for key, value in config.items(): + if not value and options.get(key): + config[key] = options[key] + break + + if config['url']: + return config + + raise hdfs_api.HdfsError( + 'Config error, please set environments or use "megfile config hdfs ..."' + ) + + +@lru_cache() +def get_hdfs_client(profile_name: Optional[str] = None): + if not hdfs_api: # pragma: no cover + raise ImportError( + "hdfs not found, please `pip install 'megfile[hdfs]'`") + + config = get_hdfs_config(profile_name) + if config['token']: + config.pop('user', None) + return hdfs_api.TokenClient(**config) + config.pop('token', None) + return hdfs_api.InsecureClient(**config) + + +
[docs]def hdfs_glob( + path: PathLike, + recursive: bool = True, + missing_ok: bool = True, +) -> List[str]: + '''Return hdfs path list in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: A list contains paths match `path` + ''' + return list(hdfs_iglob(path, recursive=recursive, missing_ok=missing_ok))
+ + +
[docs]def hdfs_glob_stat( + path: PathLike, recursive: bool = True, + missing_ok: bool = True) -> Iterator[FileEntry]: + '''Return a generator contains tuples of path and file stat, in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: A generator contains tuples of path and file stat, in which paths match `path` + ''' + return HdfsPath(path).glob_stat( + pattern="", recursive=recursive, missing_ok=missing_ok)
+ + +
[docs]def hdfs_iglob( + path: PathLike, + recursive: bool = True, + missing_ok: bool = True, +) -> Iterator[str]: + '''Return hdfs path iterator in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: An iterator contains paths match `path` + ''' + for path_obj in HdfsPath(path).iglob(pattern="", recursive=recursive, + missing_ok=missing_ok): + yield path_obj.path_with_protocol
+ + +
[docs]def hdfs_makedirs(path: PathLike, exist_ok: bool = False): + ''' + Create an hdfs directory. + Purely creating directory is invalid because it's unavailable on OSS. + This function is to test the target bucket have WRITE access. + + :param path: Given path + :param exist_ok: If False and target directory exists, raise S3FileExistsError + :raises: FileExistsError + ''' + return HdfsPath(path).mkdir(parents=True, exist_ok=exist_ok)
+ + +
[docs]@SmartPath.register +class HdfsPath(URIPath): + protocol = "hdfs" + + def __init__(self, path: PathLike, *other_paths: PathLike): + super().__init__(path, *other_paths) + protocol = get_url_scheme(self.path) + self._protocol_with_profile = self.protocol + self._profile_name = None + if protocol.startswith('hdfs+'): + self._protocol_with_profile = protocol + self._profile_name = protocol[5:] + + @property + def _client(self): + return get_hdfs_client(profile_name=self._profile_name) + +
[docs] @cachedproperty + def path_with_protocol(self) -> str: + '''Return path with protocol, like hdfs://path''' + path = self.path + protocol_prefix = self._protocol_with_profile + "://" + if path.startswith(protocol_prefix): + return path + return protocol_prefix + path.lstrip('/')
+ +
[docs] @cachedproperty + def path_without_protocol(self) -> str: + '''Return path without protocol, example: if path is hdfs://path, return path''' + path = self.path + protocol_prefix = self._protocol_with_profile + "://" + if path.startswith(protocol_prefix): + path = path[len(protocol_prefix):] + return path
+ +
[docs] @cachedproperty + def parts(self) -> Tuple[str]: + '''A tuple giving access to the path’s various components''' + parts = [f"{self._protocol_with_profile}://"] + path = self.path_without_protocol + path = path.lstrip('/') + if path != '': + parts.extend(path.split('/')) + return tuple(parts)
+ +
[docs] def exists(self, followlinks: bool = False) -> bool: + ''' + Test if path exists + + If the bucket of path are not permitted to read, return False + + :returns: True if path eixsts, else False + ''' + return bool( + self._client.status(self.path_without_protocol, strict=False))
+ +
[docs] def stat(self, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of path file, including file size and mtime, referring to hdfs_getsize and hdfs_getmtime + + If path is not an existent path, which means hdfs_exist(path) returns False, then raise FileNotFoundError + If attempt to get StatResult of complete hdfs, such as hdfs_dir_url == 'hdfs://', raise BucketNotFoundError + + :returns: StatResult + :raises: FileNotFoundError + ''' + with raise_hdfs_error(self.path_with_protocol): + stat_data = self._client.status(self.path_without_protocol) + return StatResult( + size=stat_data['length'], + mtime=stat_data['modificationTime'] / 1000, + isdir=stat_data['type'] == 'DIRECTORY', + islnk=False, + extra=stat_data, + )
+ +
[docs] def getmtime(self, follow_symlinks: bool = False) -> float: + ''' + Get last-modified time of the file on the given path path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. The mtime of empty directory is 1970-01-01 00:00:00 + + If path is not an existent path, which means hdfs_exist(path) returns False, then raise FileNotFoundError + + :returns: Last-modified time + :raises: FileNotFoundError + ''' + return self.stat(follow_symlinks=follow_symlinks).mtime
+ +
[docs] def getsize(self, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given path path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + If path is not an existent path, which means hdfs_exist(path) returns False, then raise FileNotFoundError + + :returns: File size + :raises: FileNotFoundError + ''' + return self.stat(follow_symlinks=follow_symlinks).size
+ +
[docs] def glob( + self, + pattern, + recursive: bool = True, + missing_ok: bool = True, + ) -> List['HdfsPath']: + '''Return hdfs path list, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: A list contains paths match `hdfs_pathname` + ''' + return list( + self.iglob( + pattern=pattern, recursive=recursive, missing_ok=missing_ok))
+ +
[docs] def glob_stat( + self, pattern, recursive: bool = True, + missing_ok: bool = True) -> Iterator[FileEntry]: + '''Return a generator contains tuples of path and file stat, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: A generator contains tuples of path and file stat, in which paths match `hdfs_pathname` + ''' + for path_obj in self.iglob(pattern=pattern, recursive=recursive, + missing_ok=missing_ok): + yield FileEntry(path_obj.name, path_obj.path, path_obj.stat())
+ +
[docs] def iglob( + self, + pattern, + recursive: bool = True, + missing_ok: bool = True, + ) -> Iterator['HdfsPath']: + '''Return hdfs path iterator, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: An iterator contains paths match `hdfs_pathname` + ''' + glob_path = self.path_with_protocol + if pattern: + glob_path = self.joinpath(pattern).path_with_protocol + + def _scandir(dirname: str) -> Iterator[Tuple[str, bool]]: + for entry in self.from_path(dirname).scandir(): + yield entry.name, entry.is_dir() + + def _exist(path: PathLike, followlinks: bool = False): + return self.from_path(path).exists(followlinks=followlinks) + + def _is_dir(path: PathLike, followlinks: bool = False): + return self.from_path(path).is_dir(followlinks=followlinks) + + fs_func = FSFunc(_exist, _is_dir, _scandir) + for real_path in _create_missing_ok_generator( + iglob(fspath(glob_path), recursive=recursive, + fs=fs_func), missing_ok, + FileNotFoundError('No match any file: %r' % glob_path)): + yield self.from_path(real_path)
+ +
[docs] def is_dir(self, followlinks: bool = False) -> bool: + ''' + Test if an hdfs url is directory + Specific procedures are as follows: + If there exists a suffix, of which ``os.path.join(path, suffix)`` is a file + If the url is empty bucket or hdfs:// + + :param followlinks: whether followlinks is True or False, result is the same. Because hdfs symlink not support dir. + :returns: True if path is hdfs directory, else False + ''' + return self.stat().is_dir()
+ +
[docs] def is_file(self, followlinks: bool = False) -> bool: + ''' + Test if an path is file + + :returns: True if path is hdfs file, else False + ''' + return self.stat().is_file()
+ +
[docs] def listdir(self, followlinks: bool = False) -> List[str]: + ''' + Get all contents of given path. + + :returns: All contents have prefix of path. + :raises: FileNotFoundError, NotADirectoryError + ''' + if not self.is_dir(): + raise NotADirectoryError('Not a directory: %r' % self.path) + with raise_hdfs_error(self.path_with_protocol): + return self._client.list(self.path_without_protocol)
+ +
[docs] def iterdir(self, followlinks: bool = False) -> Iterator['HdfsPath']: + ''' + Get all contents of given path. + + :returns: All contents have prefix of path. + :raises: FileNotFoundError, NotADirectoryError + ''' + for filename in self.listdir(followlinks=followlinks): + yield self.joinpath(filename) # pytype: disable=bad-return-type
+ +
[docs] def load(self, followlinks: bool = False) -> BinaryIO: + '''Read all content in binary on specified path and write into memory + + User should close the BinaryIO manually + + :returns: BinaryIO + ''' + + buffer = io.BytesIO() + with self.open('rb') as f: + buffer.write(f.read()) + buffer.seek(0) + return buffer
+ +
[docs] def mkdir(self, mode=0o777, parents: bool = False, exist_ok: bool = False): + ''' + Create an hdfs directory. + Purely creating directory is invalid because it's unavailable on OSS. + This function is to test the target bucket have WRITE access. + + :param mode: Octal permission to set on the newly created directory. + These permissions will only be set on directories that do not already exist. + :param parents: parents is ignored, only be compatible with pathlib.Path + :param exist_ok: If False and target directory exists, raise FileExistsError + :raises: BucketNotFoundError, FileExistsError + ''' + if not exist_ok and self.exists(): + raise FileExistsError('File exists: %r' % self.path) + with raise_hdfs_error(self.path_with_protocol): + self._client.makedirs(self.path_without_protocol, permission=mode)
+ +
[docs] def rename(self, dst_path: PathLike, overwrite: bool = True) -> 'HdfsPath': + ''' + Move hdfs file path from src_path to dst_path + + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + dst_path = self.from_path(dst_path) + if self.is_dir(): + for filename in self.iterdir(): + self.joinpath(filename).rename(dst_path.joinpath(filename)) # pytype: disable=attribute-error + else: + if overwrite: + dst_path.remove(missing_ok=True) + if overwrite or not dst_path.exists(): + with raise_hdfs_error(self.path_with_protocol): + self._client.rename( + self.path_without_protocol, + dst_path.path_without_protocol) + self.remove(missing_ok=True) + return dst_path
+ +
[docs] def move(self, dst_path: PathLike, overwrite: bool = True) -> None: + ''' + Move file/directory path from src_path to dst_path + + :param dst_path: Given destination path + ''' + self.rename(dst_path=dst_path, overwrite=overwrite)
+ +
[docs] def remove(self, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on hdfs, `hdfs://` and `hdfs://bucket` are not permitted to remove + + :param missing_ok: if False and target file/directory not exists, raise FileNotFoundError + :raises: FileNotFoundError, UnsupportedError + ''' + try: + with raise_hdfs_error(self.path_with_protocol): + self._client.delete(self.path_without_protocol, recursive=True) + except Exception as e: + if not missing_ok or not isinstance(e, FileNotFoundError): + raise
+ +
[docs] def scan(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given hdfs directory. + Every iteration on generator yields a path string. + + If path is a file path, yields the file only + If path is a non-existent path, return an empty generator + If path is a bucket path, return all file paths in the bucket + If path is an empty bucket, return an empty generator + If path doesn't contain any bucket, which is path == 'hdfs://', raise UnsupportedError. walk() on complete hdfs is not supported in megfile + + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + for file_entry in self.scan_stat(missing_ok=missing_ok, + followlinks=followlinks): + yield file_entry.path
+ +
[docs] def scan_stat(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory. + Every iteration on generator yields a tuple of path string and file stat + + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + with raise_hdfs_error(self.path_with_protocol): + for (root, + _root_status), _dir_infos, file_infos in self._client.walk( + self.path_without_protocol, status=True, + ignore_missing=missing_ok): + for filename, stat_data in file_infos: + yield FileEntry( + name=filename, + path=self.from_path( + f"{self._protocol_with_profile}://{root.lstrip('/')}" + ).joinpath(filename).path_with_protocol, + stat=StatResult( + size=stat_data['length'], + mtime=stat_data['modificationTime'] / 1000, + isdir=False, + islnk=False, + extra=stat_data, + ))
+ +
[docs] def scandir(self, followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Get all contents of given path, the order of result is not guaranteed. + + :returns: All contents have prefix of path + :raises: FileNotFoundError, NotADirectoryError + ''' + with raise_hdfs_error(self.path_with_protocol): + for filename, stat_data in self._client.list( + self.path_without_protocol, status=True): + yield FileEntry( + name=filename, + path=self.joinpath(filename).path_with_protocol, + stat=StatResult( + size=stat_data['length'], + mtime=stat_data['modificationTime'] / 1000, + isdir=stat_data['type'] == 'DIRECTORY', + islnk=False, + extra=stat_data, + ))
+ + + +
[docs] def walk(self, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Iteratively traverse the given hdfs directory, in top-bottom order. In other words, firstly traverse parent directory, if subdirectories exist, traverse the subdirectories. + Every iteration on generator yields a 3-tuple: (root, dirs, files) + + - root: Current hdfs path; + - dirs: Name list of subdirectories in current directory. + - files: Name list of files in current directory. + + If path is a file path, return an empty generator + If path is a non-existent path, return an empty generator + If path is a bucket path, bucket will be the top directory, and will be returned at first iteration of generator + If path is an empty bucket, only yield one 3-tuple (notes: hdfs doesn't have empty directory) + If path doesn't contain any bucket, which is path == 'hdfs://', raise UnsupportedError. walk() on complete hdfs is not supported in megfile + + :param followlinks: whether followlinks is True or False, result is the same. Because hdfs not support symlink. + :returns: A 3-tuple generator + ''' + with raise_hdfs_error(self.path_with_protocol): + for path, dirs, files in self._client.walk( + self.path_without_protocol, ignore_missing=True, + allow_dir_changes=True): + yield f"{self._protocol_with_profile}://{path.lstrip('/')}", dirs, files
+ +
[docs] def md5(self, recalculate: bool = False, followlinks: bool = False) -> str: + ''' + Get checksum of the file or dir. + + :param recalculate: Ignore this parameter, just for compatibility + :param followlinks: Ignore this parameter, just for compatibility + :returns: checksum + ''' + if self.is_dir(followlinks=followlinks): + hash_md5 = hashlib.md5() # nosec + for file_name in self.listdir(): + chunk = self.joinpath(file_name).md5( # pytype: disable=attribute-error + recalculate=recalculate).encode() + hash_md5.update(chunk) + return hash_md5.hexdigest() + with raise_hdfs_error(self.path_with_protocol): + return self._client.checksum(self.path_without_protocol)['bytes']
+ +
[docs] def save(self, file_object: BinaryIO): + '''Write the opened binary stream to specified path, but the stream won't be closed + + :param file_object: Stream to be read + ''' + with raise_hdfs_error(self.path_with_protocol): + self._client.write( + self.path_without_protocol, overwrite=True, data=file_object)
+ +
[docs] def open( + self, + mode: str = 'r', + *, + buffering: Optional[int] = None, + encoding: Optional[str] = None, + errors: Optional[str] = None, + **kwargs) -> IO[AnyStr]: # pytype: disable=signature-mismatch + if '+' in mode: + raise ValueError('unacceptable mode: %r' % mode) + + if 'b' in mode: + encoding = None + elif not encoding: + encoding = sys.getdefaultencoding() + + with raise_hdfs_error(self.path_with_protocol): + if mode in ('r', 'rb'): + keys = [ + 'block_size', 'block_capacity', 'block_forward', + 'max_retries', 'max_workers' + ] + input_kwargs = {} + for key in keys: + if key in kwargs: + input_kwargs[key] = kwargs[key] + file_obj = HdfsPrefetchReader( + hdfs_path=self.path_without_protocol, + client=self._client, + profile_name=self._profile_name, + **input_kwargs) + if _is_pickle(file_obj): # pytype: disable=wrong-arg-types + file_obj = io.BufferedReader(file_obj) # pytype: disable=wrong-arg-types + if 'b' not in mode: + file_obj = io.TextIOWrapper( + file_obj, encoding=encoding, errors=errors) # pytype: disable=wrong-arg-types + file_obj.mode = mode + return file_obj # pytype: disable=bad-return-type + elif mode in ('w', 'wb'): + return self._client.write( # pytype: disable=bad-return-type + self.path_without_protocol, + overwrite=True, + buffersize=buffering, + encoding=encoding) + elif mode in ('a', 'ab'): + return self._client.write( # pytype: disable=bad-return-type + self.path_without_protocol, + append=True, + buffersize=buffering, + encoding=encoding) + raise ValueError('unacceptable mode: %r' % mode)
+ +
[docs] def absolute(self) -> 'HdfsPath': + ''' + Make the path absolute, without normalization or resolving symlinks. Returns a new path object + ''' + with raise_hdfs_error(self.path_with_protocol): + real_path = self._client.resolve(self.path_without_protocol) + return self.from_path( + f"{self._protocol_with_profile}:///{real_path.lstrip('/')}")
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/http.html b/_modules/megfile/http.html new file mode 100644 index 00000000..e4dd60bf --- /dev/null +++ b/_modules/megfile/http.html @@ -0,0 +1,174 @@ + + + + + + megfile.http — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.http

+from megfile.http_path import HttpPath, HttpsPath, get_http_session, http_open, is_http
+from megfile.interfaces import PathLike, StatResult
+
+__all__ = [
+    'get_http_session',
+    'is_http',
+    'http_open',
+    'http_stat',
+    'http_getsize',
+    'http_getmtime',
+    'http_exists',
+]
+
+
+
[docs]def http_stat(path: PathLike, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of http_url response, including size and mtime, referring to http_getsize and http_getmtime + + :param path: Given path + :param follow_symlinks: Ignore this parameter, just for compatibility + :returns: StatResult + :raises: HttpPermissionError, HttpFileNotFoundError + ''' + return HttpPath(path).stat(follow_symlinks)
+ + +
[docs]def http_getsize(path: PathLike, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given http_url path. + + If http response header don't support Content-Length, will return None + + :param path: Given path + :param follow_symlinks: Ignore this parameter, just for compatibility + :returns: File size (in bytes) + :raises: HttpPermissionError, HttpFileNotFoundError + ''' + return HttpPath(path).getsize(follow_symlinks)
+ + +
[docs]def http_getmtime(path: PathLike, follow_symlinks: bool = False) -> float: + ''' + Get Last-Modified time of the http request on the given http_url path. + + If http response header don't support Last-Modified, will return None + + :param path: Given path + :param follow_symlinks: Ignore this parameter, just for compatibility + :returns: Last-Modified time (in Unix timestamp format) + :raises: HttpPermissionError, HttpFileNotFoundError + ''' + return HttpPath(path).getmtime(follow_symlinks)
+ + +
[docs]def http_exists(path: PathLike, followlinks: bool = False) -> bool: + """Test if http path exists + + :param path: Given path + :param followlinks: ignore this parameter, just for compatibility + :type followlinks: bool, optional + :return: return True if exists + :rtype: bool + """ + return HttpPath(path).exists(followlinks)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/http_path.html b/_modules/megfile/http_path.html new file mode 100644 index 00000000..f3fbec0b --- /dev/null +++ b/_modules/megfile/http_path.html @@ -0,0 +1,557 @@ + + + + + + megfile.http_path — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.http_path

+import time
+from copy import deepcopy
+from functools import partial
+from io import BufferedReader, BytesIO
+from logging import getLogger as get_logger
+from threading import Lock
+from typing import Iterable, Iterator, Optional, Tuple, Union
+
+import requests
+from urllib3 import HTTPResponse
+
+from megfile.config import DEFAULT_BLOCK_SIZE, HTTP_MAX_RETRY_TIMES
+from megfile.errors import http_should_retry, patch_method, translate_http_error
+from megfile.interfaces import PathLike, Readable, StatResult, URIPath
+from megfile.lib.compat import fspath
+from megfile.lib.http_prefetch_reader import DEFAULT_TIMEOUT, HttpPrefetchReader
+from megfile.lib.s3_buffered_writer import DEFAULT_MAX_BUFFER_SIZE
+from megfile.lib.url import get_url_scheme
+from megfile.pathlike import PathLike
+from megfile.smart_path import SmartPath
+from megfile.utils import _is_pickle, binary_open
+
+__all__ = [
+    'HttpPath',
+    'HttpsPath',
+    'get_http_session',
+    'is_http',
+    'http_open',
+]
+
+_logger = get_logger(__name__)
+max_retries = HTTP_MAX_RETRY_TIMES
+
+
+
[docs]def get_http_session( + timeout: Optional[Union[int, Tuple[int, int]]] = DEFAULT_TIMEOUT, + status_forcelist: Iterable[int] = (500, 502, 503, 504) +) -> requests.Session: + session = requests.Session() + + def after_callback(response, *args, **kwargs): + if response.status_code in status_forcelist: + response.raise_for_status() + return response + + def before_callback(method, url, **kwargs): + _logger.debug( + 'send http request: %s %r, with parameters: %s', method, url, + kwargs) + + def retry_callback( + error, + method, + url, + params=None, + data=None, + headers=None, + cookies=None, + files=None, + auth=None, + timeout=None, + allow_redirects=True, + proxies=None, + hooks=None, + stream=None, + verify=None, + cert=None, + json=None, + **kwargs, + ): + if data and hasattr(data, 'seek'): + data.seek(0) + elif isinstance(data, Iterator): + _logger.warning(f'Can not retry http request with iterator data') + raise + if files: + + def seek_or_reopen(file_object): + if isinstance(file_object, (str, bytes)): + return file_object + elif hasattr(file_object, 'seek'): + file_object.seek(0) + return file_object + elif hasattr(file_object, 'name'): + with SmartPath(file_object.name).open('rb') as f: + return BytesIO(f.read()) + else: + _logger.warning( + f'Can not retry http request, because the file object is not seekable and unsupport "name"' + ) + raise + + for key, file_info in files.items(): + if hasattr(file_info, 'seek'): + file_info.seek(0) + elif isinstance(file_info, + (tuple, list)) and len(file_info) >= 2: + file_info = list(file_info) + if isinstance(file_info[1], + (tuple, list)) and len(file_info[1]) >= 2: + file_info[1] = list(file_info[1]) + file_info[1] = seek_or_reopen(file_info[1]) + else: + file_info[1] = seek_or_reopen(file_info[1]) + files[key] = file_info + + session.request = patch_method( + partial(session.request, timeout=timeout), + max_retries=max_retries, + should_retry=http_should_retry, + before_callback=before_callback, + after_callback=after_callback, + retry_callback=retry_callback, + ) + return session
+ + +
[docs]def is_http(path: PathLike) -> bool: + '''http scheme definition: http(s)://domain/path + + :param path: Path to be tested + :returns: True if path is http url, else False + ''' + + path = fspath(path) + if not isinstance(path, str) or not (path.startswith('http://') or + path.startswith('https://')): + return False + + scheme = get_url_scheme(path) + return scheme == 'http' or scheme == 'https'
+ + +
[docs]def http_open( + path: PathLike, + mode: str = 'rb', + *, + encoding: Optional[str] = None, + errors: Optional[str] = None, + max_concurrency: Optional[int] = None, + max_buffer_size: int = DEFAULT_MAX_BUFFER_SIZE, + forward_ratio: Optional[float] = None, + block_size: int = DEFAULT_BLOCK_SIZE, + **kwargs) -> Union[BufferedReader, HttpPrefetchReader]: + '''Open a BytesIO to read binary data of given http(s) url + + .. note :: + + Essentially, it reads data of http(s) url to memory by requests, and then return BytesIO to user. + + :param path: Given path + :param mode: Only supports 'rb' mode now + :param encoding: encoding is the name of the encoding used to decode or encode the file. This should only be used in text mode. + :param errors: errors is an optional string that specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. + :param max_concurrency: Max download thread number, None by default + :param max_buffer_size: Max cached buffer size in memory, 128MB by default + :param block_size: Size of single block, 8MB by default. Each block will be uploaded or downloaded by single thread. + :return: BytesIO initialized with http(s) data + ''' + return HttpPath(path).open( + mode, + encoding=encoding, + errors=errors, + max_concurrency=max_concurrency, + max_buffer_size=max_buffer_size, + forward_ratio=forward_ratio, + block_size=block_size)
+ + +
[docs]@SmartPath.register +class HttpPath(URIPath): + + protocol = "http" + + def __init__(self, path: PathLike, *other_paths: PathLike): + super().__init__(path, *other_paths) + + if fspath(path).startswith('https://'): + self.protocol = 'https' + self.request_kwargs = {} + +
[docs] @binary_open + def open( + self, + mode: str = 'rb', + *, + max_concurrency: Optional[int] = None, + max_buffer_size: int = DEFAULT_MAX_BUFFER_SIZE, + forward_ratio: Optional[float] = None, + block_size: int = DEFAULT_BLOCK_SIZE, + **kwargs) -> Union[BufferedReader, HttpPrefetchReader]: + '''Open a BytesIO to read binary data of given http(s) url + + .. note :: + + Essentially, it reads data of http(s) url to memory by requests, and then return BytesIO to user. + + :param mode: Only supports 'rb' mode now + :param encoding: encoding is the name of the encoding used to decode or encode the file. This should only be used in text mode. + :param errors: errors is an optional string that specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. + :param max_concurrency: Max download thread number, None by default + :param max_buffer_size: Max cached buffer size in memory, 128MB by default + :param block_size: Size of single block, 8MB by default. Each block will be uploaded or downloaded by single thread. + :return: BytesIO initialized with http(s) data + ''' + if mode not in ('rb',): + raise ValueError('unacceptable mode: %r' % mode) + + response = None + request_kwargs = deepcopy(self.request_kwargs) + timeout = request_kwargs.pop('timeout', DEFAULT_TIMEOUT) + stream = request_kwargs.pop('stream', True) + try: + response = get_http_session( + timeout=timeout, + status_forcelist=(), + ).get( + self.path_with_protocol, stream=stream, **request_kwargs) + response.raise_for_status() + except Exception as error: + if response: + response.close() + raise translate_http_error(error, self.path_with_protocol) + + content_size = int(response.headers['Content-Length']) + if (response.headers.get('Accept-Ranges') == 'bytes' and + content_size >= block_size * 2 and + not response.headers.get('Content-Encoding')): + response.close() + + block_capacity = max_buffer_size // block_size + if forward_ratio is None: + block_forward = None + else: + block_forward = max(int(block_capacity * forward_ratio), 1) + + reader = HttpPrefetchReader( + self, + content_size=content_size, + max_retries=max_retries, + max_workers=max_concurrency, + block_capacity=block_capacity, + block_forward=block_forward, + block_size=block_size, + ) + if _is_pickle(reader): # pytype: disable=wrong-arg-types + reader = BufferedReader(reader) # pytype: disable=wrong-arg-types + return reader + + response.raw.name = self.path_with_protocol + # TODO: When python version must bigger than 3.10, use urllib3>=2.0.0 instead of 'Response' + # response.raw.auto_close = False + # response.raw.decode_content = True + # return BufferedReader(response.raw) + return BufferedReader(Response(response.raw)) # pytype: disable=wrong-arg-types
+ +
[docs] def stat(self, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of http_url response, including size and mtime, referring to http_getsize and http_getmtime + + :param follow_symlinks: Ignore this parameter, just for compatibility + :returns: StatResult + :raises: HttpPermissionError, HttpFileNotFoundError + ''' + + request_kwargs = deepcopy(self.request_kwargs) + timeout = request_kwargs.pop('timeout', DEFAULT_TIMEOUT) + stream = request_kwargs.pop('stream', True) + + try: + with get_http_session(timeout=timeout, status_forcelist=()).get( + self.path_with_protocol, stream=stream, + **request_kwargs) as response: + response.raise_for_status() + headers = response.headers + except Exception as error: + raise translate_http_error(error, self.path_with_protocol) + + size = headers.get('Content-Length') + if size: + size = int(size) + + last_modified = headers.get('Last-Modified') + if last_modified: + last_modified = time.mktime( + time.strptime(last_modified, "%a, %d %b %Y %H:%M:%S %Z")) + + return StatResult( # pyre-ignore[20] + size=size, mtime=last_modified, isdir=False, + islnk=False, extra=headers)
+ +
[docs] def getsize(self, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given http_url path. + + If http response header don't support Content-Length, will return None + + :param follow_symlinks: Ignore this parameter, just for compatibility + :returns: File size (in bytes) + :raises: HttpPermissionError, HttpFileNotFoundError + ''' + return self.stat().size
+ +
[docs] def getmtime(self, follow_symlinks: bool = False) -> float: + ''' + Get Last-Modified time of the http request on the given http_url path. + + If http response header don't support Last-Modified, will return None + + :param follow_symlinks: Ignore this parameter, just for compatibility + :returns: Last-Modified time (in Unix timestamp format) + :raises: HttpPermissionError, HttpFileNotFoundError + ''' + return self.stat().mtime
+ +
[docs] def exists(self, followlinks: bool = False) -> bool: + """Test if http path exists + + :param followlinks: ignore this parameter, just for compatibility + :type followlinks: bool, optional + :return: return True if exists + :rtype: bool + """ + request_kwargs = deepcopy(self.request_kwargs) + timeout = request_kwargs.pop('timeout', DEFAULT_TIMEOUT) + stream = request_kwargs.pop('stream', True) + + try: + with get_http_session(timeout=timeout, status_forcelist=()).get( + self.path_with_protocol, stream=stream, + **request_kwargs) as response: + if response.status_code == 404: + return False + return True + except requests.exceptions.ConnectionError: + return False
+ + +
[docs]@SmartPath.register +class HttpsPath(HttpPath): + + protocol = "https"
+ + +class Response(Readable): + + def __init__(self, raw: HTTPResponse) -> None: + super().__init__() + + raw.auto_close = False + self._block_size = 128 * 2**10 # 128KB + self._raw = raw + self._offset = 0 + self._buffer = BytesIO() + self._lock = Lock() + + @property + def name(self): + return self._raw.name + + @property + def mode(self): + return 'rb' + + def tell(self) -> int: + return self._offset + + def _clear_buffer(self) -> None: + self._buffer.seek(0) + self._buffer.truncate() + + def read(self, size: Optional[int] = None) -> bytes: + if size == 0: + return b'' + if size is not None and size < 0: + size = None + + with self._lock: + while not size or self._buffer.tell() < size: + data = self._raw.read(self._block_size, decode_content=True) + if not data: + break + self._buffer.write(data) + self._buffer.seek(0) + content = self._buffer.read(size) + residue = self._buffer.read() + self._clear_buffer() + if residue: + self._buffer.write(residue) + self._offset += len(content) + return content + + def readline(self, size: Optional[int] = None) -> bytes: + if size == 0: + return b'' + if size is not None and size < 0: + size = None + + with self._lock: + self._buffer.seek(0) + buffer = self._buffer.read() + self._clear_buffer() + if b'\n' in buffer: + content = buffer[:buffer.index(b'\n') + 1] + if size: + content = content[:size] + self._buffer.write(buffer[len(content):]) + elif size and len(buffer) >= size: + content = buffer[:size] + self._buffer.write(buffer[size:]) + else: + content = None + self._buffer.write(buffer) + while True: + if size and self._buffer.tell() >= size: + break + data = self._raw.read(self._block_size, decode_content=True) + if not data: + break + elif b"\n" in data: + last_content, residue = data.split(b"\n", 1) + self._buffer.write(last_content) + self._buffer.write(b"\n") + self._buffer.seek(0) + content = self._buffer.read() + self._clear_buffer() + if size and len(content) > size: + self._buffer.write(content[size:]) + content = content[:size] + if residue: + self._buffer.write(residue) + break + else: + self._buffer.write(data) + + if content is None: + self._buffer.seek(0) + content = self._buffer.read(size) + residue = self._buffer.read() + self._clear_buffer() + if residue: + self._buffer.write(residue) + self._offset += len(content) + return content + + def _close(self) -> None: + return self._raw.close() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/lib/s3_buffered_writer.html b/_modules/megfile/lib/s3_buffered_writer.html new file mode 100644 index 00000000..a73f39b5 --- /dev/null +++ b/_modules/megfile/lib/s3_buffered_writer.html @@ -0,0 +1,331 @@ + + + + + + megfile.lib.s3_buffered_writer — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for megfile.lib.s3_buffered_writer

+from collections import OrderedDict
+from concurrent.futures import FIRST_COMPLETED, ThreadPoolExecutor, wait
+from io import BytesIO
+from logging import getLogger as get_logger
+from threading import Lock
+from typing import NamedTuple, Optional
+
+from megfile.config import BACKOFF_FACTOR, BACKOFF_INITIAL, DEFAULT_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE, DEFAULT_MAX_BUFFER_SIZE, GLOBAL_MAX_WORKERS
+from megfile.errors import raise_s3_error
+from megfile.interfaces import Writable
+from megfile.utils import get_human_size, process_local
+
+_logger = get_logger(__name__)
+'''
+class PartResult(NamedTuple):
+
+    etag: str
+    part_number: int
+    content_size: int
+
+in Python 3.6+
+'''
+
+_PartResult = NamedTuple(
+    'PartResult', [('etag', str), ('part_number', int), ('content_size', int)])
+
+
+class PartResult(_PartResult):
+
+    def asdict(self):
+        return {
+            'PartNumber': self.part_number,
+            'ETag': self.etag,
+        }
+
+
+
[docs]class S3BufferedWriter(Writable): + + def __init__( + self, + bucket: str, + key: str, + *, + s3_client, + block_size: int = DEFAULT_BLOCK_SIZE, + max_block_size: int = DEFAULT_MAX_BLOCK_SIZE, + max_buffer_size: int = DEFAULT_MAX_BUFFER_SIZE, + max_workers: Optional[int] = None, + profile_name: Optional[str] = None): + + self._bucket = bucket + self._key = key + self._client = s3_client + self._profile_name = profile_name + + self._block_size = block_size + self._max_block_size = max_block_size + self._max_buffer_size = max_buffer_size + self._total_buffer_size = 0 + self._offset = 0 + self.__content_size = 0 + self._backoff_size = BACKOFF_INITIAL + self._buffer = BytesIO() + + self._futures = OrderedDict() + self._is_global_executor = False + if max_workers is None: + self._executor = process_local( + 'S3BufferedWriter.executor', + ThreadPoolExecutor, + max_workers=GLOBAL_MAX_WORKERS) + self._is_global_executor = True + else: + self._executor = ThreadPoolExecutor(max_workers=max_workers) + + self._part_number = 0 + self.__upload_id = None + self.__upload_id_lock = Lock() + + _logger.debug('open file: %r, mode: %s' % (self.name, self.mode)) + + @property + def name(self) -> str: + return 's3%s://%s/%s' % ( + f"+{self._profile_name}" if self._profile_name else "", + self._bucket, self._key) + + @property + def mode(self) -> str: + return 'wb' + +
[docs] def tell(self) -> int: + return self._offset
+ + @property + def _content_size(self) -> int: + return self.__content_size + + @_content_size.setter + def _content_size(self, value: int): + if value > self._backoff_size: + _logger.debug( + 'writing file: %r, current size: %s' % + (self.name, get_human_size(value))) + while value > self._backoff_size: + self._backoff_size *= BACKOFF_FACTOR + self.__content_size = value + + @property + def _is_multipart(self) -> bool: + return len(self._futures) > 0 + + @property + def _upload_id(self) -> str: + with self.__upload_id_lock: + if self.__upload_id is None: + with raise_s3_error(self.name): + self.__upload_id = self._client.create_multipart_upload( + Bucket=self._bucket, + Key=self._key, + )['UploadId'] + return self.__upload_id + + @property + def _buffer_size(self): + return self._total_buffer_size - sum( + future.result().content_size + for future in self._futures.values() + if future.done()) + + @property + def _uploading_futures(self): + return [ + future for future in self._futures.values() if not future.done() + ] + + @property + def _multipart_upload(self): + return { + 'Parts': [ + future.result().asdict() + for _, future in sorted(self._futures.items()) + ], + } + + def _upload_buffer(self, part_number, content): + with raise_s3_error(self.name): + return PartResult( + self._client.upload_part( + Bucket=self._bucket, + Key=self._key, + UploadId=self._upload_id, + PartNumber=part_number, + Body=content, + )['ETag'], part_number, len(content)) + + def _submit_upload_buffer(self, part_number, content): + self._futures[part_number] = self._executor.submit( + self._upload_buffer, part_number, content) + self._total_buffer_size += len(content) + while self._buffer_size > self._max_buffer_size: + wait(self._uploading_futures, return_when=FIRST_COMPLETED) + + def _submit_upload_content(self, content: bytes): + # s3 part needs at least 5MB, so we need to divide content into equal-size parts, and give last part more size + # e.g. 257MB can be divided into 2 parts, 128MB and 129MB + offset = 0 + while len(content) - offset - self._max_block_size > self._block_size: + self._part_number += 1 + offset_stop = offset + self._max_block_size + self._submit_upload_buffer( + self._part_number, content[offset:offset_stop]) + offset = offset_stop + self._part_number += 1 + self._submit_upload_buffer(self._part_number, content[offset:]) + + def _submit_futures(self): + content = self._buffer.getvalue() + if len(content) == 0: + return + self._buffer = BytesIO() + self._submit_upload_content(content) + +
[docs] def write(self, data: bytes) -> int: + if self.closed: + raise IOError('file already closed: %r' % self.name) + + result = self._buffer.write(data) + if self._buffer.tell() >= self._block_size: + self._submit_futures() + self._offset += result + self._content_size = self._offset + return result
+ + def _shutdown(self): + if not self._is_global_executor: + self._executor.shutdown() + + def _close(self): + _logger.debug('close file: %r' % self.name) + + if not self._is_multipart: + with raise_s3_error(self.name): + self._client.put_object( + Bucket=self._bucket, + Key=self._key, + Body=self._buffer.getvalue()) + self._shutdown() + return + + self._submit_futures() + + with raise_s3_error(self.name): + self._client.complete_multipart_upload( + Bucket=self._bucket, + Key=self._key, + MultipartUpload=self._multipart_upload, + UploadId=self._upload_id, + ) + + self._shutdown()
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/lib/s3_limited_seekable_writer.html b/_modules/megfile/lib/s3_limited_seekable_writer.html new file mode 100644 index 00000000..0101b38d --- /dev/null +++ b/_modules/megfile/lib/s3_limited_seekable_writer.html @@ -0,0 +1,277 @@ + + + + + + megfile.lib.s3_limited_seekable_writer — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for megfile.lib.s3_limited_seekable_writer

+import os
+from io import BytesIO
+from logging import getLogger as get_logger
+from typing import Optional
+
+from megfile.config import DEFAULT_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE, DEFAULT_MAX_BUFFER_SIZE
+from megfile.errors import raise_s3_error
+from megfile.interfaces import Seekable
+from megfile.lib.s3_buffered_writer import S3BufferedWriter
+
+_logger = get_logger(__name__)
+
+
+
[docs]class S3LimitedSeekableWriter(Seekable, S3BufferedWriter): + ''' For file format like msgpack and mp4, it's a pain that you need to write + header before writing the data. So it's kind of hard to make streaming write + to unseekable file system like s3. In this case, we will try to keep the first + and last parts of data in memory, so we can come back to head again and write + the header at the last second. + ''' + + def __init__( + self, + bucket: str, + key: str, + *, + s3_client, + block_size: int = DEFAULT_BLOCK_SIZE, + head_block_size: Optional[int] = None, + tail_block_size: Optional[int] = None, + max_block_size: int = DEFAULT_MAX_BLOCK_SIZE, + max_buffer_size: int = DEFAULT_MAX_BUFFER_SIZE, + max_workers: Optional[int] = None, + profile_name: Optional[str] = None): + + super().__init__( + bucket, + key, + s3_client=s3_client, + block_size=block_size, + max_block_size=max_block_size, + max_buffer_size=max_buffer_size, + max_workers=max_workers, + profile_name=profile_name) + + self._head_block_size = head_block_size or block_size + self._tail_block_size = tail_block_size or block_size + self._head_buffer = BytesIO() + + @property + def _head_size(self) -> int: + return len(self._head_buffer.getvalue()) + + @property + def _tail_size(self) -> int: + return len(self._buffer.getvalue()) + + @property + def _tail_offset(self) -> int: + return self._content_size - self._tail_size + +
[docs] def seek(self, offset: int, whence: int = os.SEEK_SET) -> int: + if self.closed: + raise IOError('file already closed: %r' % self.name) + + if whence == os.SEEK_SET: + target_offset = offset + elif whence == os.SEEK_CUR: + target_offset = self._offset + offset + elif whence == os.SEEK_END: + target_offset = self._content_size + offset + + if target_offset < self._head_block_size: + self._head_buffer.seek(target_offset) + elif target_offset >= self._tail_offset: + self._buffer.seek(target_offset - self._tail_offset) + else: + raise OSError( + 'Can only seek inside of head, or seek to tail, target offset: %d' + % target_offset) + + self._offset = target_offset + return self._offset
+ +
[docs] def write(self, data: bytes) -> int: + if self.closed: + raise IOError('file already closed: %r' % self.name) + + if self._head_size != self._head_block_size: # no tail part yet + self._write_to_head(data) + elif self._offset < self._head_block_size: # tail part already created + self._write_to_head_after_tail_part_created(data) + elif self._offset >= self._tail_offset: + self._write_to_tail(data) + else: + raise OSError( + 'Can only write inside of head, or write to tail, current offset: %d' + % self._offset) + return len(data)
+ + def _write_to_head(self, data: bytes): + if self._offset + len(data) <= self._head_block_size: + self._head_buffer.write(data) + self._content_size = self._offset = self._head_size + if self._content_size == self._head_block_size: + self._part_number += 1 + else: # head part exceeded + offset = self._head_block_size - self._offset + self._head_buffer.write(data[:offset]) + self._content_size = self._offset = self._head_size + self._part_number += 1 + self._write_to_tail(data[offset:]) + + def _write_to_head_after_tail_part_created(self, data: bytes): + if self._offset + len(data) > self._head_block_size: + raise Exception( + 'Head part overflow, %d bytes left but try to write %d bytes' % + (self._head_block_size - self._offset, len(data))) + self._head_buffer.write(data) + self._offset += len(data) + + def _write_to_tail(self, data: bytes): + self._buffer.write(data) + if self._buffer.tell() >= self._block_size + self._tail_block_size: + self._submit_futures() + self._offset += len(data) + if self._offset > self._content_size: + self._content_size = self._offset + + def _submit_futures(self): + content = self._buffer.getvalue() + if len(content) == 0: + return + offset = len(content) - self._tail_block_size + self._buffer = BytesIO(content[offset:]) + self._buffer.seek(0, os.SEEK_END) + self._submit_upload_content(content[:offset]) + + def _close(self): + _logger.debug('close file: %r' % self.name) + + if not self._is_multipart: + with raise_s3_error(self.name): + self._client.put_object( + Bucket=self._bucket, + Key=self._key, + Body=self._head_buffer.getvalue() + self._buffer.getvalue()) + self._shutdown() + return + + self._submit_upload_buffer(1, self._head_buffer.getvalue()) + self._head_buffer = BytesIO() # clean memory + + content = self._buffer.getvalue() + if len(content) > 0: + self._submit_upload_content(content) + self._buffer = BytesIO() # clean memory + + with raise_s3_error(self.name): + self._client.complete_multipart_upload( + Bucket=self._bucket, + Key=self._key, + MultipartUpload=self._multipart_upload, + UploadId=self._upload_id, + ) + + self._shutdown()
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/lib/s3_prefetch_reader.html b/_modules/megfile/lib/s3_prefetch_reader.html new file mode 100644 index 00000000..cb1c1525 --- /dev/null +++ b/_modules/megfile/lib/s3_prefetch_reader.html @@ -0,0 +1,221 @@ + + + + + + megfile.lib.s3_prefetch_reader — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for megfile.lib.s3_prefetch_reader

+import os
+from concurrent.futures import Future
+from io import BytesIO
+from typing import Optional
+
+from megfile.config import BACKOFF_FACTOR, BACKOFF_INITIAL, DEFAULT_BLOCK_CAPACITY, DEFAULT_BLOCK_SIZE, GLOBAL_MAX_WORKERS, NEWLINE, S3_MAX_RETRY_TIMES
+from megfile.errors import S3FileChangedError, S3InvalidRangeError, patch_method, raise_s3_error, s3_should_retry
+from megfile.lib.base_prefetch_reader import BasePrefetchReader, LRUCacheFutureManager
+
+__all__ = [
+    'DEFAULT_BLOCK_CAPACITY',
+    'DEFAULT_BLOCK_SIZE',
+    'GLOBAL_MAX_WORKERS',
+    'BACKOFF_INITIAL',
+    'BACKOFF_FACTOR',
+    'NEWLINE',
+    'S3PrefetchReader',
+    'LRUCacheFutureManager',
+]
+
+
+
[docs]class S3PrefetchReader(BasePrefetchReader): + ''' + Reader to fast read the s3 content. This will divide the file content into equal parts of block_size size, and will use LRU to cache at most block_capacity blocks in memory. + open(), seek() and read() will trigger prefetch read. The prefetch will cached block_forward blocks of data from offset position (the position after reading if the called function is read). + ''' + + def __init__( + self, + bucket: str, + key: str, + *, + s3_client, + block_size: int = DEFAULT_BLOCK_SIZE, + block_capacity: int = DEFAULT_BLOCK_CAPACITY, + block_forward: Optional[int] = None, + max_retries: int = S3_MAX_RETRY_TIMES, + max_workers: Optional[int] = None, + profile_name: Optional[str] = None): + + self._bucket = bucket + self._key = key + self._client = s3_client + self._profile_name = profile_name + + super().__init__( + block_size=block_size, + block_capacity=block_capacity, + block_forward=block_forward, + max_retries=max_retries, + max_workers=max_workers) + + def _get_content_size(self): + try: + start, end = 0, self._block_size - 1 + first_index_response = self._fetch_response(start=start, end=end) + content_size = int( + first_index_response['ContentRange'].split('/')[-1]) + except S3InvalidRangeError: + # usually when read a empty file + # TODO: use minio test empty file: https://hub.docker.com/r/minio/minio + first_index_response = self._fetch_response() + content_size = int(first_index_response['ContentLength']) + + first_future = Future() + first_future.set_result(first_index_response['Body']) + self._insert_futures(index=0, future=first_future) + self._content_etag = first_index_response['ETag'] + self._content_info = first_index_response + return content_size + + @property + def name(self) -> str: + return 's3%s://%s/%s' % ( + f"+{self._profile_name}" if self._profile_name else "", + self._bucket, self._key) + + def _fetch_response( + self, start: Optional[int] = None, + end: Optional[int] = None) -> dict: + + def fetch_response() -> dict: + if start is None or end is None: + return self._client.get_object( + Bucket=self._bucket, Key=self._key) + + range_str = f'bytes={start}-{end}' + response = self._client.get_object( + Bucket=self._bucket, Key=self._key, Range=range_str) + response['Body'] = BytesIO(response['Body'].read()) + return response + + fetch_response = patch_method( + fetch_response, + max_retries=self._max_retries, + should_retry=s3_should_retry) + + with raise_s3_error(self.name): + return fetch_response() + + def _fetch_buffer(self, index: int) -> BytesIO: + start, end = index * self._block_size, ( + index + 1) * self._block_size - 1 + response = self._fetch_response(start=start, end=end) + etag = response.get('ETag', None) + if etag is not None and etag != self._content_etag: # pytype: disable=attribute-error + raise S3FileChangedError( + 'File changed: %r, etag before: %s, after: %s' % + (self.name, self._content_info, response)) # pytype: disable=attribute-error + + return response['Body']
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/lib/s3_share_cache_reader.html b/_modules/megfile/lib/s3_share_cache_reader.html new file mode 100644 index 00000000..e1bc0f3b --- /dev/null +++ b/_modules/megfile/lib/s3_share_cache_reader.html @@ -0,0 +1,210 @@ + + + + + + megfile.lib.s3_share_cache_reader — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for megfile.lib.s3_share_cache_reader

+from collections import Counter
+from concurrent.futures import Future
+from logging import getLogger as get_logger
+from typing import Optional
+
+from megfile.config import DEFAULT_BLOCK_CAPACITY, DEFAULT_BLOCK_SIZE, S3_MAX_RETRY_TIMES
+from megfile.lib.s3_prefetch_reader import LRUCacheFutureManager, S3PrefetchReader
+from megfile.utils import thread_local
+
+_logger = get_logger(__name__)
+
+
+
[docs]class S3ShareCacheReader(S3PrefetchReader): + ''' + Reader to fast read the s3 content. This will divide the file content into equal parts of block_size size, and will use LRU to cache at most block_capacity blocks in memory. + open(), seek() and read() will trigger prefetch read. The prefetch will cached block_forward blocks of data from offset position (the position after reading if the called function is read). + ''' + + def __init__( + self, + bucket: str, + key: str, + *, + s3_client, + block_size: int = DEFAULT_BLOCK_SIZE, + block_capacity: int = DEFAULT_BLOCK_CAPACITY, + block_forward: Optional[int] = None, + max_retries: int = S3_MAX_RETRY_TIMES, + cache_key: str = 'lru', + max_workers: Optional[int] = None, + profile_name: Optional[str] = None): + + self._cache_key = cache_key + + super().__init__( + bucket, + key, + s3_client=s3_client, + block_size=block_size, + block_capacity=block_capacity, + block_forward=block_forward, + max_retries=max_retries, + max_workers=max_workers, + profile_name=profile_name, + ) + + def _get_futures(self): + futures = thread_local( + 'S3ShareCacheReader.' + self._cache_key, ShareCacheFutureManager) + futures.register(self.name) + return futures + + def _seek_buffer(self, index: int, offset: int = 0): + # The corresponding block is probably not downloaded when seeked to a new position + # So record the offset first, set it when it is accessed + self._cached_offset = offset + self._block_index = index + + def _submit_future(self, index: int): + if index < 0 or index >= self._block_stop: + return + self._futures.submit( + self._executor, (self.name, index), self._fetch_buffer, index) + + def _insert_futures(self, index: int, future: Future): + self._futures[(self.name, index)] = future + + def _fetch_future_result(self, index: int): + return self._futures.result((self.name, index)) + + def _cleanup_futures(self): + self._futures.cleanup(DEFAULT_BLOCK_CAPACITY) + + def _close(self): + _logger.debug('close file: %r' % self.name) + + if not self._is_global_executor: + self._executor.shutdown() + self._futures.unregister(self.name) # pytype: disable=attribute-error
+ + +class ShareCacheFutureManager(LRUCacheFutureManager): + + def __init__(self): + super().__init__() + self._references = Counter() + + def register(self, key): + self._references[key] += 1 + + def unregister(self, key): + self._references[key] -= 1 + if self._references[key] == 0: + self._references.pop(key) + for key_tuple in list(self): + if key_tuple[0] != key: + continue + future = self.pop(key_tuple) + if not future.done(): + future.cancel() # pragma: no cover +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/pathlike.html b/_modules/megfile/pathlike.html new file mode 100644 index 00000000..b6c067b3 --- /dev/null +++ b/_modules/megfile/pathlike.html @@ -0,0 +1,1015 @@ + + + + + + megfile.pathlike — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.pathlike

+import os
+import stat
+from collections.abc import Sequence
+from enum import Enum
+from functools import wraps
+from typing import IO, Any, AnyStr, BinaryIO, Callable, Iterator, List, NamedTuple, Optional, Tuple, Union
+
+from megfile.lib.compat import PathLike as _PathLike
+from megfile.lib.compat import fspath
+from megfile.lib.fnmatch import _compile_pattern
+from megfile.lib.joinpath import uri_join
+from megfile.utils import cachedproperty, classproperty
+
+# Python 3.5+ compatible
+'''
+class StatResult(NamedTuple):
+
+    size: int = 0
+    ctime: float = 0.0
+    mtime: float = 0.0
+    isdir: bool = False
+    islnk: bool = False
+    extra: Any = None  # raw stat info
+
+in Python 3.6+
+'''
+
+_StatResult = NamedTuple(
+    'StatResult', [
+        ('size', int), ('ctime', float), ('mtime', float), ('isdir', bool),
+        ('islnk', bool), ('extra', Any)
+    ])
+_StatResult.__new__.__defaults__ = (0, 0.0, 0.0, False, False, None)
+
+
+class Access(Enum):
+    READ = 1
+    WRITE = 2
+
+
+
[docs]class StatResult(_StatResult): + +
[docs] def is_file(self) -> bool: + return not self.isdir or self.islnk
+ +
[docs] def is_dir(self) -> bool: + return self.isdir and not self.islnk
+ + + + @property + def st_mode(self) -> int: + ''' + File mode: file type and file mode bits (permissions). + Only support fs. + ''' + if self.extra and hasattr(self.extra, 'st_mode'): + return self.extra.st_mode + if self.is_symlink(): + return stat.S_IFLNK + elif self.is_dir(): + return stat.S_IFDIR + return stat.S_IFREG + + @property + def st_ino(self) -> int: + ''' + Platform dependent, but if non-zero, uniquely identifies the file for a given value of st_dev. Typically: + + the inode number on Unix, + the file index on Windows, + the decimal of etag on oss. + ''' + if self.extra: + if hasattr(self.extra, 'st_ino'): + return self.extra.st_ino + elif isinstance(self.extra, dict) and self.extra.get('ETag'): + return int(self.extra['ETag'][1:-1], 16) + return 0 + + @property + def st_dev(self) -> int: + ''' + Identifier of the device on which this file resides. + ''' + if self.extra: + if hasattr(self.extra, 'st_dev'): + return self.extra.st_dev + return 0 + + @property + def st_nlink(self) -> int: + ''' + Number of hard links. + Only support fs. + ''' + if self.extra and hasattr(self.extra, 'st_nlink'): + return self.extra.st_nlink + return 0 + + @property + def st_uid(self) -> int: + ''' + User identifier of the file owner. + Only support fs. + ''' + if self.extra and hasattr(self.extra, 'st_uid'): + return self.extra.st_uid + return 0 + + @property + def st_gid(self) -> int: + ''' + Group identifier of the file owner. + Only support fs. + ''' + if self.extra and hasattr(self.extra, 'st_gid'): + return self.extra.st_gid + return 0 + + @property + def st_size(self) -> int: + ''' + Size of the file in bytes. + ''' + if self.extra and hasattr(self.extra, 'st_size'): + return self.extra.st_size + return self.size + + @property + def st_atime(self) -> float: + ''' + Time of most recent access expressed in seconds. + Only support fs. + ''' + if self.extra and hasattr(self.extra, 'st_atime'): + return self.extra.st_atime + return 0.0 + + @property + def st_mtime(self) -> float: + ''' + Time of most recent content modification expressed in seconds. + ''' + if self.extra and hasattr(self.extra, 'st_mtime'): + return self.extra.st_mtime + return self.mtime + + @property + def st_ctime(self) -> float: + ''' + Platform dependent: + + the time of most recent metadata change on Unix, + the time of creation on Windows, expressed in seconds, + the time of file created on oss; if is dir, return the latest ctime of the files in dir. + ''' + if self.extra and hasattr(self.extra, 'st_ctime'): + return self.extra.st_ctime + return self.ctime + + @property + def st_atime_ns(self) -> int: + ''' + Time of most recent access expressed in nanoseconds as an integer. + Only support fs. + ''' + if self.extra and hasattr(self.extra, 'st_atime_ns'): + return self.extra.st_atime_ns + return 0 + + @property + def st_mtime_ns(self) -> int: + ''' + Time of most recent content modification expressed in nanoseconds as an integer. + Only support fs. + ''' + if self.extra and hasattr(self.extra, 'st_mtime_ns'): + return self.extra.st_mtime_ns + return 0 + + @property + def st_ctime_ns(self) -> int: + ''' + Platform dependent: + + the time of most recent metadata change on Unix, + the time of creation on Windows, expressed in nanoseconds as an integer. + + Only support fs. + ''' + if self.extra and hasattr(self.extra, 'st_ctime_ns'): + return self.extra.st_ctime_ns + return 0
+ + +''' +class FileEntry(NamedTuple): + + name: str + stat: StatResult + +in Python 3.6+ +''' + +_FileEntry = NamedTuple( + 'FileEntry', [('name', str), ('path', str), ('stat', StatResult)]) + + +class FileEntry(_FileEntry): + + def inode(self) -> Optional[Union[int, str]]: + return self.stat.st_ino + + def is_file(self) -> bool: + return self.stat.is_file() + + def is_dir(self) -> bool: + return self.stat.is_dir() + + def is_symlink(self) -> bool: + return self.stat.is_symlink() + + +def method_not_implemented(func): + + @wraps(func) + def wrapper(self, *args, **kwargs): + raise NotImplementedError( + 'method %r not implemented: %r' % (func.__name__, self)) + + return wrapper + + +class BasePath: + + def __init__(self, path: "PathLike"): + self.path = str(path) + + def __str__(self) -> str: + return self.path + + def __repr__(self) -> str: + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __bytes__(self) -> bytes: + return str(self).encode() + + def __fspath__(self) -> str: + return self.path + + def __hash__(self) -> int: + return hash(fspath(self)) + + def __eq__(self, other_path: "BasePath") -> bool: + return fspath(self) == fspath(other_path) + + # pytype: disable=bad-return-type + + @method_not_implemented + def is_dir(self, followlinks: bool = False) -> bool: # type: ignore + """Return True if the path points to a directory.""" + + @method_not_implemented + def is_file(self, followlinks: bool = False) -> bool: # type: ignore + """Return True if the path points to a regular file.""" + + def is_symlink(self) -> bool: + return False + + @method_not_implemented + def access(self, mode: Access) -> bool: # type: ignore + """Return True if the path has access permission described by mode.""" + + @method_not_implemented + def exists(self, followlinks: bool = False) -> bool: # type: ignore + """Whether the path points to an existing file or directory.""" + + # listdir or iterdir? + @method_not_implemented + def listdir(self) -> List[str]: # type: ignore + """Return the names of the entries in the directory the path points to.""" + + @method_not_implemented + def scandir(self) -> Iterator[FileEntry]: # type: ignore + """Return an iterator of FileEntry objects corresponding to the entries in the directory.""" + + @method_not_implemented + def getsize(self, follow_symlinks: bool = True) -> int: # type: ignore + """Return the size, in bytes.""" + + @method_not_implemented + def getmtime(self, follow_symlinks: bool = True) -> float: # type: ignore + """Return the time of last modification.""" + + @method_not_implemented + def stat(self, follow_symlinks=True) -> StatResult: # type: ignore + """Get the status of the path.""" + + @method_not_implemented + def remove(self, missing_ok: bool = False) -> None: + """Remove (delete) the file.""" + + @method_not_implemented + def unlink(self, missing_ok: bool = False) -> None: + """Remove (delete) the file.""" + + @method_not_implemented + def mkdir( + self, mode=0o777, parents: bool = False, + exist_ok: bool = False) -> None: + """Create a directory.""" + + @method_not_implemented + def rmdir(self) -> None: + """Remove (delete) the directory.""" + + @method_not_implemented + def open(self, mode: str = 'r', **kwargs) -> IO[AnyStr]: # type: ignore + """Open the file with mode.""" + + @method_not_implemented + def walk(self, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: # type: ignore + """Generate the file names in a directory tree by walking the tree.""" + + @method_not_implemented + def scan(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: # type: ignore + """Iterate through the files in the directory.""" + + @method_not_implemented + def scan_stat(self, missing_ok: bool = True, followlinks: bool = False + ) -> Iterator[FileEntry]: # type: ignore + """Iterate through the files in the directory, with file stat.""" + + @method_not_implemented + def glob(self, pattern, recursive: bool = True, + missing_ok: bool = True) -> List['BasePath']: # type: ignore + """Return files whose paths match the glob pattern.""" + + @method_not_implemented + def iglob(self, pattern, recursive: bool = True, + missing_ok: bool = True) -> Iterator['BasePath']: # type: ignore + """Return an iterator of files whose paths match the glob pattern.""" + + @method_not_implemented + def glob_stat( + self, pattern, recursive: bool = True, + missing_ok: bool = True) -> Iterator[FileEntry]: # type: ignore + """Return an iterator of files with stat whose paths match the glob pattern.""" + + @method_not_implemented + def load(self) -> BinaryIO: # type: ignore + """Read all content in binary.""" + + @method_not_implemented + def save(self, file_object: BinaryIO): + """Write the opened binary stream to the path.""" + + @method_not_implemented + def joinpath(self, *other_paths: "PathLike") -> 'BasePath': # type: ignore + """Join or or more path.""" + + @method_not_implemented + def abspath(self): # type: ignore + """Return a normalized absolutized version of the path.""" + + @method_not_implemented + def realpath(self): # type: ignore + """Return the canonical path of the path.""" + + @method_not_implemented + def relpath(self, start=None): # type: ignore + """Return the relative path.""" + + @method_not_implemented + def is_absolute(self) -> bool: # type: ignore + """Return True if the path is an absolute pathname.""" + + @method_not_implemented + def is_mount(self) -> bool: # type: ignore + """Return True if the path is a mount point.""" + + @method_not_implemented + def resolve(self): # type: ignore + """Alias of realpath.""" + + def touch(self): + with self.open('w'): + pass + + # will be deleted in next version + def is_link(self) -> bool: + return self.is_symlink() + + def makedirs(self, exist_ok: bool = False) -> None: + ''' + Recursive directory creation function. Like mkdir(), but makes all intermediate-level directories needed to contain the leaf directory. + ''' + self.mkdir(parents=True, exist_ok=exist_ok) + + # pytype: enable=bad-return-type + + +PathLike = Union[str, BasePath, _PathLike] + + +class BaseURIPath(BasePath): + + # ##### + # Backwards compatible API, will be removed in megfile 1.0 + @classmethod + def get_protocol(self) -> Optional[str]: + pass # pragma: no cover + + @classproperty + def protocol(cls) -> str: + return cls.get_protocol() + + def make_uri(self) -> str: + return self.path_with_protocol + + def as_uri(self) -> str: + return self.make_uri() + + # ##### + + @cachedproperty + def path_with_protocol(self) -> str: + '''Return path with protocol, like file:///root, s3://bucket/key''' + path = self.path + protocol_prefix = self.protocol + "://" + if path.startswith(protocol_prefix): + return path + return protocol_prefix + path.lstrip('/') + + @cachedproperty + def path_without_protocol(self) -> str: + '''Return path without protocol, example: if path is s3://bucket/key, return bucket/key''' + path = self.path + protocol_prefix = self.protocol + "://" + if path.startswith(protocol_prefix): + path = path[len(protocol_prefix):] + return path + + def as_posix(self) -> str: + '''Return a string representation of the path with forward slashes (/)''' + return self.path_with_protocol + + @classmethod + def from_path(cls, path) -> "BaseURIPath": + """Return new instance of this class + + :param path: new path + :return: new instance of new path + :rtype: BaseURIPath + """ + return cls(path) + + @classmethod + def from_uri(cls, path: str) -> "BaseURIPath": + protocol_prefix = cls.protocol + "://" + if path[:len(protocol_prefix)] != protocol_prefix: + raise ValueError( + "protocol not match, expected: %r, got: %r" % + (cls.protocol, path)) + return cls.from_path(path[len(protocol_prefix):]) + + def __fspath__(self) -> str: + return self.as_uri() + + def __lt__(self, other_path: "BaseURIPath") -> bool: + if not isinstance(other_path, BaseURIPath): + raise TypeError("%r is not 'URIPath'" % other_path) + if self.protocol != other_path.protocol: + raise TypeError( + "'<' not supported between instances of %r and %r" % + (type(self), type(other_path))) + return fspath(self) < fspath(other_path) + + def __le__(self, other_path: "BaseURIPath") -> bool: + if not isinstance(other_path, BaseURIPath): + raise TypeError("%r is not 'URIPath'" % other_path) + if self.protocol != other_path.protocol: + raise TypeError( + "'<=' not supported between instances of %r and %r" % + (type(self), type(other_path))) + return str(self) <= str(other_path) + + def __gt__(self, other_path: "BaseURIPath") -> bool: + if not isinstance(other_path, BaseURIPath): + raise TypeError("%r is not 'URIPath'" % other_path) + if self.protocol != other_path.protocol: + raise TypeError( + "'>' not supported between instances of %r and %r" % + (type(self), type(other_path))) + return str(self) > str(other_path) + + def __ge__(self, other_path: "BaseURIPath") -> bool: + if not isinstance(other_path, BaseURIPath): + raise TypeError("%r is not 'URIPath'" % other_path) + if self.protocol != other_path.protocol: + raise TypeError( + "'>=' not supported between instances of %r and %r" % + (type(self), type(other_path))) + return str(self) >= str(other_path) + + @classproperty + def drive(self) -> str: + return '' + + @classproperty + def root(self) -> str: + return self.protocol + '://' + + @classproperty + def anchor(self) -> str: + return self.root + + +class URIPath(BaseURIPath): + + def __init__(self, path: "PathLike", *other_paths: "PathLike"): + if len(other_paths) > 0: + path = self.from_path(path).joinpath(*other_paths) + self.path = str(path) + + def __truediv__(self, other_path: PathLike) -> "BaseURIPath": + if isinstance(other_path, BaseURIPath): + if self.protocol != other_path.protocol: + raise TypeError( + "'/' not supported between instances of %r and %r" % + (type(self), type(other_path))) + elif not isinstance(other_path, str): + raise TypeError("%r is not 'str' nor 'URIPath'" % other_path) + return self.joinpath(other_path) + + def joinpath(self, *other_paths: PathLike) -> "BaseURIPath": + '''Calling this method is equivalent to combining the path with each of the other arguments in turn''' + return self.from_path(uri_join(str(self), *map(str, other_paths))) + + @cachedproperty + def parts(self) -> Tuple[str]: + '''A tuple giving access to the path’s various components''' + parts = [self.root] + path = self.path_without_protocol + path = path.lstrip('/') + if path != '': + parts.extend(path.split('/')) + return tuple(parts) + + @cachedproperty + def parents(self) -> "URIPathParents": + '''An immutable sequence providing access to the logical ancestors of the path''' + return URIPathParents(self) + + @cachedproperty + def parent(self) -> "BaseURIPath": + '''The logical parent of the path''' + if self.path_without_protocol == "/": + return self + elif len(self.parents) > 0: + return self.parents[0] + return self.from_path("") + + @cachedproperty + def name(self) -> str: + '''A string representing the final path component, excluding the drive and root''' + parts = self.parts + if len(parts) == 1 and parts[0] == self.protocol + "://": + return '' + return parts[-1] + + @cachedproperty + def suffix(self) -> str: + '''The file extension of the final component''' + name = self.name + i = name.rfind('.') + if 0 < i < len(name) - 1: + return name[i:] + return '' + + @cachedproperty + def suffixes(self) -> List[str]: + '''A list of the path’s file extensions''' + name = self.name + if name.endswith('.'): + return [] + name = name.lstrip('.') + return ['.' + suffix for suffix in name.split('.')[1:]] + + @cachedproperty + def stem(self) -> str: + '''The final path component, without its suffix''' + name = self.name + i = name.rfind('.') + if 0 < i < len(name) - 1: + return name[:i] + return name + + def is_reserved(self) -> bool: + return False + + def match(self, pattern) -> bool: + '''Match this path against the provided glob-style pattern. Return True if matching is successful, False otherwise''' + match = _compile_pattern(pattern) + for index in range(len(self.parts), 0, -1): + path = '/'.join(self.parts[index:]) + if match(path) is not None: + return True + return match(self.path_with_protocol) is not None + + def is_relative_to(self, *other) -> bool: + + try: + self.relative_to(*other) + return True + except Exception: + return False + + def relative_to(self, *other) -> "BaseURIPath": + ''' + Compute a version of this path relative to the path represented by other. + If it’s impossible, ValueError is raised. + ''' + if not other: + raise TypeError("need at least one argument") + + other_path = self.from_path(other[0]) + if len(other) > 0: + other_path = other_path.joinpath(*other[1:]) + other_path = other_path.path_with_protocol + path = self.path_with_protocol + + if path.startswith(other_path): + relative = path[len(other_path):] + relative = relative.lstrip('/') + return type(self)(relative) + else: + raise ValueError("%r does not start with %r" % (path, other)) + + def with_name(self, name) -> "BaseURIPath": + '''Return a new path with the name changed''' + path = str(self) + raw_name = self.name + return self.from_path(path[:len(path) - len(raw_name)] + name) + + def with_stem(self, stem) -> "BaseURIPath": + '''Return a new path with the stem changed''' + return self.with_name("".join([stem, self.suffix])) + + def with_suffix(self, suffix) -> "BaseURIPath": + '''Return a new path with the suffix changed''' + path = str(self) + raw_suffix = self.suffix + return self.from_path(path[:len(path) - len(raw_suffix)] + suffix) + + def is_absolute(self) -> bool: + return True + + def is_mount(self) -> bool: + '''Test whether a path is a mount point + + :returns: True if a path is a mount point, else False + ''' + return False + + def is_socket(self) -> bool: + ''' + Return True if the path points to a Unix socket (or a symbolic link pointing to a Unix socket), False if it points to another kind of file. + + False is also returned if the path doesn’t exist or is a broken symlink; other errors (such as permission errors) are propagated. + ''' + return False + + def is_fifo(self) -> bool: + ''' + Return True if the path points to a FIFO (or a symbolic link pointing to a FIFO), False if it points to another kind of file. + + False is also returned if the path doesn’t exist or is a broken symlink; other errors (such as permission errors) are propagated. + ''' + return False + + def is_block_device(self) -> bool: + ''' + Return True if the path points to a block device (or a symbolic link pointing to a block device), False if it points to another kind of file. + + False is also returned if the path doesn’t exist or is a broken symlink; other errors (such as permission errors) are propagated. + ''' + return False + + def is_char_device(self) -> bool: + ''' + Return True if the path points to a character device (or a symbolic link pointing to a character device), False if it points to another kind of file. + + False is also returned if the path doesn’t exist or is a broken symlink; other errors (such as permission errors) are propagated. + ''' + return False + + def abspath(self) -> str: + """Return a normalized absolutized version of the path.""" + return self.path_with_protocol + + def realpath(self) -> str: + """Return the canonical path of the path.""" + return self.path_with_protocol + + def resolve(self): + """Alias of realpath.""" + return self.path_with_protocol + + def chmod(self, mode: int, *, follow_symlinks: bool = True): + raise NotImplementedError(f"'chmod' is unsupported on '{type(self)}'") + + def lchmod(self, mode: int): + ''' + Like chmod() but, if the path points to a symbolic link, the symbolic link’s mode is changed rather than its target’s. + ''' + return self.chmod(mode=mode, follow_symlinks=False) + + def read_bytes(self) -> bytes: + '''Return the binary contents of the pointed-to file as a bytes object''' + with self.open(mode='rb') as f: + return f.read() + + def read_text(self) -> str: + '''Return the decoded contents of the pointed-to file as a string''' + with self.open(mode='r') as f: + return f.read() + + def rename(self, dst_path: PathLike, overwrite: bool = True) -> 'URIPath': + ''' + rename file + + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + raise NotImplementedError(f"'rename' is unsupported on '{type(self)}'") + + def replace(self, dst_path: PathLike, overwrite: bool = True) -> 'URIPath': + ''' + move file + + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + return self.rename(dst_path=dst_path, overwrite=overwrite) + + def rglob(self, pattern) -> List['URIPath']: + ''' + This is like calling Path.glob() with “**/” added in front of the given relative pattern + ''' + if not pattern: + pattern = "" + pattern = '**/' + pattern.lstrip('/') + return self.glob(pattern=pattern) + + def md5(self, recalculate: bool = False, followlinks: bool = False) -> str: + raise NotImplementedError(f"'md5' is unsupported on '{type(self)}'") + + def samefile(self, other_path) -> bool: + ''' + Return whether this path points to the same file + ''' + if hasattr(other_path, 'protocol'): + if other_path.protocol != self.protocol: + return False + + stat = self.stat() + if hasattr(other_path, 'stat'): + other_path_stat = other_path.stat() + else: + other_path_stat = self.from_path(other_path).stat() + + return stat.st_ino == other_path_stat.st_ino and stat.st_dev == other_path_stat.st_dev + + def symlink(self, dst_path: PathLike) -> None: + raise NotImplementedError(f"'symlink' is unsupported on '{type(self)}'") + + def symlink_to(self, target, target_is_directory=False): + ''' + Make this path a symbolic link to target. + symlink_to's arguments is the reverse of symlink's. + Target_is_directory’s value is ignored, only be compatible with pathlib.Path + ''' + return self.from_path( + target).symlink( # type: ignore + dst_path=self.path) + + def hardlink_to(self, target): + ''' + Make this path a hard link to the same file as target. + ''' + raise NotImplementedError( + f"'hardlink_to' is unsupported on '{type(self)}'") + + def write_bytes(self, data: bytes): + '''Open the file pointed to in bytes mode, write data to it, and close the file''' + with self.open(mode='wb') as f: + return f.write(data) + + def write_text(self, data: str, encoding=None, errors=None, newline=None): + ''' + Open the file pointed to in text mode, write data to it, and close the file. + The optional parameters have the same meaning as in open(). + ''' + with self.open(mode='w', encoding=encoding, errors=errors, + newline=newline) as f: + return f.write(data) + + def home(self): + '''Return the home directory + + returns: Home directory path + ''' + raise NotImplementedError(f"'home' is unsupported on '{type(self)}'") + + def group(self): + """ + Return the name of the group owning the file. + """ + raise NotImplementedError(f"'group' is unsupported on '{type(self)}'") + + def expanduser(self): + """ + Return a new path with expanded ~ and ~user constructs, as returned by os.path.expanduser(). + Only fs path support this method. + """ + raise NotImplementedError( + f"'expanduser' is unsupported on '{type(self)}'") + + def cwd(self) -> 'URIPath': + '''Return current working directory + + returns: Current working directory + ''' + raise NotImplementedError(f"'cwd' is unsupported on '{type(self)}'") + + def iterdir(self) -> Iterator['URIPath']: + ''' + Get all contents of given fs path. The result is in acsending alphabetical order. + + :returns: All contents have in the path in acsending alphabetical order + ''' + raise NotImplementedError(f"'iterdir' is unsupported on '{type(self)}'") + + def owner(self) -> str: + ''' + Return the name of the user owning the file. + ''' + raise NotImplementedError(f"'owner' is unsupported on '{type(self)}'") + + def absolute(self) -> 'URIPath': + ''' + Make the path absolute, without normalization or resolving symlinks. Returns a new path object + ''' + raise NotImplementedError( + f"'absolute' is unsupported on '{type(self)}'") + + def utime(self, atime: Union[float, int], mtime: Union[float, int]): + """ + Sets the access and modified times of the file specified by path to the specified values. + + :param atime: The access time to be set. + :type atime: Union[float, int] + :param mtime: The modification time to be set. + :type mtime: Union[float, int] + :raises NotImplementedError: Always raised, since the functionality is unsupported. + """ + raise NotImplementedError(f"'utime' is unsupported on '{type(self)}'") + + def lstat(self) -> StatResult: + '''Like stat() but, if the path points to a symbolic link, return the symbolic link’s information rather than its target’s.''' + return self.stat(follow_symlinks=False) + + +class URIPathParents(Sequence): + + def __init__(self, path): + # We don't store the instance to avoid reference cycles + self.cls = type(path) + parts = path.parts + if len(parts) > 0 and parts[0] == path.protocol + "://": + self.prefix = parts[0] + self.parts = parts[1:] + else: + self.prefix = '' + self.parts = parts + + def __len__(self): + return max(len(self.parts) - 1, 0) + + def __getitem__(self, idx): + if idx < 0 or idx > len(self): + raise IndexError(idx) + + if len(self.parts[:-idx - 1]) > 1: + other_path = os.path.join(*self.parts[:-idx - 1]) + elif len(self.parts[:-idx - 1]) == 1: + other_path = self.parts[:-idx - 1][0] + else: + other_path = "" + return self.cls(self.prefix + other_path) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/s3.html b/_modules/megfile/s3.html new file mode 100644 index 00000000..605d7501 --- /dev/null +++ b/_modules/megfile/s3.html @@ -0,0 +1,482 @@ + + + + + + megfile.s3 — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.s3

+from typing import BinaryIO, Callable, Iterator, List, Optional, Tuple
+
+from megfile.interfaces import Access, FileEntry, PathLike, StatResult
+from megfile.s3_path import S3BufferedWriter, S3Cacher, S3LimitedSeekableWriter, S3Path, S3PrefetchReader, S3ShareCacheReader, get_endpoint_url, get_s3_client, get_s3_session, is_s3, parse_s3_url, s3_buffered_open, s3_cached_open, s3_concat, s3_download, s3_glob, s3_glob_stat, s3_iglob, s3_load_content, s3_lstat, s3_makedirs, s3_memory_open, s3_open, s3_path_join, s3_pipe_open, s3_prefetch_open, s3_readlink, s3_rename, s3_share_cache_open, s3_upload
+
+__all__ = [
+    'parse_s3_url',
+    'get_endpoint_url',
+    'get_s3_session',
+    'get_s3_client',
+    's3_path_join',
+    'is_s3',
+    's3_buffered_open',
+    's3_cached_open',
+    's3_memory_open',
+    's3_pipe_open',
+    's3_prefetch_open',
+    's3_share_cache_open',
+    's3_open',
+    'S3Cacher',
+    'S3BufferedWriter',
+    'S3LimitedSeekableWriter',
+    'S3PrefetchReader',
+    'S3ShareCacheReader',
+    's3_upload',
+    's3_download',
+    's3_load_content',
+    's3_readlink',
+    's3_glob',
+    's3_glob_stat',
+    's3_iglob',
+    's3_rename',
+    's3_makedirs',
+    's3_concat',
+    's3_lstat',
+    's3_access',
+    's3_exists',
+    's3_getmtime',
+    's3_getsize',
+    's3_isdir',
+    's3_isfile',
+    's3_listdir',
+    's3_load_from',
+    's3_hasbucket',
+    's3_move',
+    's3_remove',
+    's3_scan',
+    's3_scan_stat',
+    's3_scandir',
+    's3_stat',
+    's3_unlink',
+    's3_walk',
+    's3_getmd5',
+    's3_copy',
+    's3_sync',
+    's3_symlink',
+    's3_islink',
+    's3_save_as',
+]
+
+
+
[docs]def s3_access( + path: PathLike, mode: Access = Access.READ, + followlinks: bool = False) -> bool: + ''' + Test if path has access permission described by mode + + :param path: Given path + :param mode: access mode + :returns: bool, if the bucket of s3_url has read/write access. + ''' + return S3Path(path).access(mode, followlinks)
+ + +
[docs]def s3_exists(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if s3_url exists + + If the bucket of s3_url are not permitted to read, return False + + :param path: Given path + :returns: True if s3_url eixsts, else False + ''' + return S3Path(path).exists(followlinks)
+ + +
[docs]def s3_getmtime(path: PathLike, follow_symlinks: bool = False) -> float: + ''' + Get last-modified time of the file on the given s3_url path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. The mtime of empty directory is 1970-01-01 00:00:00 + + If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError + + :param path: Given path + :returns: Last-modified time + :raises: S3FileNotFoundError, UnsupportedError + ''' + return S3Path(path).getmtime(follow_symlinks)
+ + +
[docs]def s3_getsize(path: PathLike, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given s3_url path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError + + :param path: Given path + :returns: File size + :raises: S3FileNotFoundError, UnsupportedError + ''' + return S3Path(path).getsize(follow_symlinks)
+ + +
[docs]def s3_isdir(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if an s3 url is directory + Specific procedures are as follows: + If there exists a suffix, of which ``os.path.join(s3_url, suffix)`` is a file + If the url is empty bucket or s3:// + + :param path: Given path + :param followlinks: whether followlinks is True or False, result is the same. Because s3 symlink not support dir. + :returns: True if path is s3 directory, else False + ''' + return S3Path(path).is_dir(followlinks)
+ + +
[docs]def s3_isfile(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if an s3_url is file + + :param path: Given path + :returns: True if path is s3 file, else False + ''' + return S3Path(path).is_file(followlinks)
+ + +
[docs]def s3_listdir(path: PathLike, followlinks: bool = False) -> List[str]: + ''' + Get all contents of given s3_url. The result is in acsending alphabetical order. + + :param path: Given path + :returns: All contents have prefix of s3_url in acsending alphabetical order + :raises: S3FileNotFoundError, S3NotADirectoryError + ''' + return S3Path(path).listdir(followlinks)
+ + +
[docs]def s3_load_from(path: PathLike, followlinks: bool = False) -> BinaryIO: + '''Read all content in binary on specified path and write into memory + + User should close the BinaryIO manually + + :param path: Given path + :returns: BinaryIO + ''' + return S3Path(path).load(followlinks)
+ + +
[docs]def s3_hasbucket(path: PathLike) -> bool: + ''' + Test if the bucket of s3_url exists + + :param path: Given path + :returns: True if bucket of s3_url eixsts, else False + ''' + return S3Path(path).hasbucket()
+ + +
[docs]def s3_move( + src_url: PathLike, dst_url: PathLike, overwrite: bool = True) -> None: + ''' + Move file/directory path from src_url to dst_url + + :param src_url: Given path + :param dst_url: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + return S3Path(src_url).move(dst_url, overwrite)
+ + +
[docs]def s3_remove(path: PathLike, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on s3, `s3://` and `s3://bucket` are not permitted to remove + + :param path: Given path + :param missing_ok: if False and target file/directory not exists, raise S3FileNotFoundError + :raises: S3PermissionError, S3FileNotFoundError, UnsupportedError + ''' + return S3Path(path).remove(missing_ok)
+ + +
[docs]def s3_scan(path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given s3 directory, in alphabetical order. + Every iteration on generator yields a path string. + + If s3_url is a file path, yields the file only + If s3_url is a non-existent path, return an empty generator + If s3_url is a bucket path, return all file paths in the bucket + If s3_url is an empty bucket, return an empty generator + If s3_url doesn't contain any bucket, which is s3_url == 's3://', raise UnsupportedError. walk() on complete s3 is not supported in megfile + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + return S3Path(path).scan(missing_ok, followlinks)
+ + +
[docs]def s3_scan_stat( + path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a tuple of path string and file stat + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + return S3Path(path).scan_stat(missing_ok, followlinks)
+ + +
[docs]def s3_scandir(path: PathLike, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Get all contents of given s3_url, the order of result is not guaranteed. + + :param path: Given path + :returns: All contents have prefix of s3_url + :raises: S3FileNotFoundError, S3NotADirectoryError + ''' + return S3Path(path).scandir(followlinks)
+ + +
[docs]def s3_stat(path: PathLike, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of s3_url file, including file size and mtime, referring to s3_getsize and s3_getmtime + + If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError + If attempt to get StatResult of complete s3, such as s3_dir_url == 's3://', raise S3BucketNotFoundError + + :param path: Given path + :returns: StatResult + :raises: S3FileNotFoundError, S3BucketNotFoundError + ''' + return S3Path(path).stat(follow_symlinks)
+ + + + + +
[docs]def s3_walk(path: PathLike, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Iteratively traverse the given s3 directory, in top-bottom order. In other words, firstly traverse parent directory, if subdirectories exist, traverse the subdirectories in alphabetical order. + Every iteration on generator yields a 3-tuple: (root, dirs, files) + + - root: Current s3 path; + - dirs: Name list of subdirectories in current directory. The list is sorted by name in ascending alphabetical order; + - files: Name list of files in current directory. The list is sorted by name in ascending alphabetical order; + + If s3_url is a file path, return an empty generator + If s3_url is a non-existent path, return an empty generator + If s3_url is a bucket path, bucket will be the top directory, and will be returned at first iteration of generator + If s3_url is an empty bucket, only yield one 3-tuple (notes: s3 doesn't have empty directory) + If s3_url doesn't contain any bucket, which is s3_url == 's3://', raise UnsupportedError. walk() on complete s3 is not supported in megfile + + :param path: Given path + :param followlinks: whether followlinks is True or False, result is the same. Because s3 symlink not support dir. + :raises: UnsupportedError + :returns: A 3-tuple generator + ''' + return S3Path(path).walk(followlinks)
+ + +
[docs]def s3_getmd5( + path: PathLike, recalculate: bool = False, + followlinks: bool = False) -> str: + ''' + Get md5 meta info in files that uploaded/copied via megfile + + If meta info is lost or non-existent, return None + + :param path: Given path + :param recalculate: calculate md5 in real-time or return s3 etag + :param followlinks: If is True, calculate md5 for real file + :returns: md5 meta info + ''' + return S3Path(path).md5(recalculate, followlinks)
+ + +
[docs]def s3_copy( + src_url: PathLike, + dst_url: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True) -> None: + ''' File copy on S3 + Copy content of file on `src_path` to `dst_path`. + It's caller's responsebility to ensure the s3_isfile(src_url) == True + + :param src_url: Given path + :param dst_path: Target file path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param overwrite: whether or not overwrite file when exists, default is True + ''' + return S3Path(src_url).copy(dst_url, callback, followlinks, overwrite)
+ + +
[docs]def s3_sync( + src_url: PathLike, + dst_url: PathLike, + followlinks: bool = False, + force: bool = False, + overwrite: bool = True) -> None: + ''' + Copy file/directory on src_url to dst_url + + :param src_url: Given path + :param dst_url: Given destination path + :param followlinks: False if regard symlink as file, else True + :param force: Sync file forcely, do not ignore same files, priority is higher than 'overwrite', default is False + :param overwrite: whether or not overwrite file when exists, default is True + ''' + return S3Path(src_url).sync(dst_url, followlinks, force, overwrite)
+ + + + + + + + +
[docs]def s3_save_as(file_object: BinaryIO, path: PathLike): + '''Write the opened binary stream to specified path, but the stream won't be closed + + :param path: Given path + :param file_object: Stream to be read + ''' + return S3Path(path).save(file_object)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/s3_path.html b/_modules/megfile/s3_path.html new file mode 100644 index 00000000..2fec3381 --- /dev/null +++ b/_modules/megfile/s3_path.html @@ -0,0 +1,2518 @@ + + + + + + megfile.s3_path — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.s3_path

+import hashlib
+import io
+import os
+import re
+import time
+from concurrent.futures import ThreadPoolExecutor
+from functools import lru_cache, wraps
+from logging import getLogger as get_logger
+from typing import IO, Any, AnyStr, BinaryIO, Callable, Dict, Iterator, List, Optional, Tuple, Union
+
+import boto3
+import botocore
+from botocore.awsrequest import AWSResponse
+
+from megfile.config import DEFAULT_BLOCK_SIZE, GLOBAL_MAX_WORKERS, S3_CLIENT_CACHE_MODE, S3_MAX_RETRY_TIMES
+from megfile.errors import S3BucketNotFoundError, S3ConfigError, S3FileExistsError, S3FileNotFoundError, S3IsADirectoryError, S3NameTooLongError, S3NotADirectoryError, S3NotALinkError, S3PermissionError, S3UnknownError, SameFileError, UnsupportedError, _create_missing_ok_generator
+from megfile.errors import _logger as error_logger
+from megfile.errors import patch_method, raise_s3_error, s3_error_code_should_retry, s3_should_retry, translate_fs_error, translate_s3_error
+from megfile.interfaces import Access, ContextIterator, FileCacher, FileEntry, PathLike, StatResult, URIPath
+from megfile.lib.compare import is_same_file
+from megfile.lib.compat import fspath
+from megfile.lib.fnmatch import translate
+from megfile.lib.glob import has_magic, has_magic_ignore_brace, ungloblize
+from megfile.lib.joinpath import uri_join
+from megfile.lib.s3_buffered_writer import DEFAULT_MAX_BUFFER_SIZE, GLOBAL_MAX_WORKERS, S3BufferedWriter
+from megfile.lib.s3_cached_handler import S3CachedHandler
+from megfile.lib.s3_limited_seekable_writer import S3LimitedSeekableWriter
+from megfile.lib.s3_memory_handler import S3MemoryHandler
+from megfile.lib.s3_pipe_handler import S3PipeHandler
+from megfile.lib.s3_prefetch_reader import S3PrefetchReader
+from megfile.lib.s3_share_cache_reader import S3ShareCacheReader
+from megfile.lib.url import get_url_scheme
+from megfile.smart_path import SmartPath
+from megfile.utils import _is_pickle, cachedproperty, calculate_md5, generate_cache_path, get_binary_mode, get_content_offset, is_readable, necessary_params, process_local, thread_local
+
+__all__ = [
+    'S3Path',
+    'parse_s3_url',
+    'get_endpoint_url',
+    'get_s3_session',
+    'get_s3_client',
+    's3_path_join',
+    'is_s3',
+    's3_buffered_open',
+    's3_cached_open',
+    's3_memory_open',
+    's3_pipe_open',
+    's3_prefetch_open',
+    's3_share_cache_open',
+    's3_open',
+    'S3Cacher',
+    'S3BufferedWriter',
+    'S3LimitedSeekableWriter',
+    'S3PrefetchReader',
+    'S3ShareCacheReader',
+    's3_upload',
+    's3_download',
+    's3_load_content',
+    's3_readlink',
+    's3_glob',
+    's3_glob_stat',
+    's3_iglob',
+    's3_rename',
+    's3_makedirs',
+    's3_concat',
+    's3_lstat',
+]
+_logger = get_logger(__name__)
+content_md5_header = 'megfile-content-md5'
+endpoint_url = 'https://s3.amazonaws.com'
+max_pool_connections = GLOBAL_MAX_WORKERS  # for compatibility
+max_retries = S3_MAX_RETRY_TIMES
+max_keys = 1000
+
+
+def _patch_make_request(client: botocore.client.BaseClient):
+
+    def after_callback(result: Tuple[AWSResponse, dict], *args, **kwargs):
+        if not isinstance(result, tuple) or len(result) != 2 \
+            or not isinstance(result[0], AWSResponse) or not isinstance(result[1], dict):
+            return result
+        http, parsed_response = result
+        if http.status_code >= 500:
+            error_code = parsed_response.get("Error", {}).get("Code")
+            operation_model = kwargs.get('operation_model') or (
+                args[0] if args else None)
+            operation_name = operation_model.name if operation_model else 'ProxyMethod'
+            error_class = client.exceptions.from_code(error_code)
+            raise error_class(parsed_response, operation_name)
+        return result
+
+    def retry_callback(error, operation_model, request_dict, request_context):
+        if is_readable(request_dict['body']):
+            request_dict['body'].seek(0)
+
+    def before_callback(operation_model, request_dict, request_context):
+        _logger.debug(
+            'send s3 request: %r, with parameters: %s', operation_model.name,
+            request_dict)
+
+    client._make_request = patch_method(
+        client._make_request,
+        max_retries=max_retries,
+        should_retry=s3_should_retry,
+        after_callback=after_callback,
+        before_callback=before_callback,
+        retry_callback=retry_callback)
+    return client
+
+
+
[docs]def parse_s3_url(s3_url: PathLike) -> Tuple[str, str]: + s3_url = fspath(s3_url) + if not is_s3(s3_url): + raise ValueError('Not a s3 url: %r' % s3_url) + rightpart = s3_url.split('://', maxsplit=1)[1] + bucketmatch = re.match('(.*?)/', rightpart) + if bucketmatch is None: + bucket = rightpart + path = '' + else: + bucket = bucketmatch.group(1) + path = rightpart[len(bucket) + 1:] + return bucket, path
+ + +def get_scoped_config(profile_name: Optional[str] = None) -> Dict: + return get_s3_session( + profile_name=profile_name)._session.get_scoped_config() + + +@lru_cache() +def warning_endpoint_url(key: str, endpoint_url: str): + _logger.info("using %s: %s" % (key, endpoint_url)) + + +
[docs]def get_endpoint_url(profile_name: Optional[str] = None) -> str: + '''Get the endpoint url of S3 + + :returns: S3 endpoint url + ''' + if profile_name: + environ_keys = (f'{profile_name}__OSS_ENDPOINT'.upper(),) + else: + environ_keys = ( + 'OSS_ENDPOINT', 'AWS_ENDPOINT_URL_S3', 'AWS_ENDPOINT_URL') + for environ_key in environ_keys: + environ_endpoint_url = os.environ.get(environ_key) + if environ_endpoint_url: + warning_endpoint_url(environ_key, environ_endpoint_url) + return environ_endpoint_url + try: + config = get_scoped_config(profile_name=profile_name) + config_endpoint_url = config.get('s3', {}).get('endpoint_url') + config_endpoint_url = config_endpoint_url or config.get('endpoint_url') + if config_endpoint_url: + warning_endpoint_url('~/.aws/config', config_endpoint_url) + return config_endpoint_url + except botocore.exceptions.ProfileNotFound: + pass + return endpoint_url
+ + +
[docs]def get_s3_session(profile_name=None) -> boto3.Session: + '''Get S3 session + + :returns: S3 session + ''' + return thread_local( + f's3_session:{profile_name}', boto3.Session, profile_name=profile_name)
+ + +def get_access_token(profile_name=None): + access_key_env_name = f"{profile_name}__AWS_ACCESS_KEY_ID".upper( + ) if profile_name else "AWS_ACCESS_KEY_ID" + secret_key_env_name = f"{profile_name}__AWS_SECRET_ACCESS_KEY".upper( + ) if profile_name else "AWS_SECRET_ACCESS_KEY" + access_key = os.getenv(access_key_env_name) + secret_key = os.getenv(secret_key_env_name) + if access_key and secret_key: + return access_key, secret_key + + try: + credentials = get_s3_session( + profile_name=profile_name).get_credentials() + except botocore.exceptions.ProfileNotFound: + credentials = None + if credentials: + if not access_key: + access_key = credentials.access_key + if not secret_key: + secret_key = credentials.secret_key + return access_key, secret_key + + +
[docs]def get_s3_client( + config: Optional[botocore.config.Config] = None, + cache_key: Optional[str] = None, + profile_name: Optional[str] = None): + '''Get S3 client + + :returns: S3 client + ''' + if cache_key is not None: + local_storage = thread_local + if S3_CLIENT_CACHE_MODE == 'process_local': + local_storage = process_local + return local_storage( + f"{cache_key}:{profile_name}", + get_s3_client, + config=config, + profile_name=profile_name) + + if config: + config = botocore.config.Config( + connect_timeout=5, + max_pool_connections=GLOBAL_MAX_WORKERS).merge(config) + else: + config = botocore.config.Config( + connect_timeout=5, max_pool_connections=GLOBAL_MAX_WORKERS) + + addressing_style_env_key = 'AWS_S3_ADDRESSING_STYLE' + if profile_name: + addressing_style_env_key = f'{profile_name}__AWS_S3_ADDRESSING_STYLE'.upper( + ) + addressing_style = os.environ.get(addressing_style_env_key) + if addressing_style: + config = config.merge( + botocore.config.Config(s3={'addressing_style': addressing_style})) + + access_key, secret_key = get_access_token(profile_name) + try: + session = get_s3_session(profile_name=profile_name) + except botocore.exceptions.ProfileNotFound: + session = get_s3_session() + client = session.client( + 's3', + endpoint_url=get_endpoint_url(profile_name=profile_name), + config=config, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + ) + client = _patch_make_request(client) + return client
+ + +def get_s3_client_with_cache( + config: Optional[botocore.config.Config] = None, + profile_name: Optional[str] = None): + return get_s3_client( + config=config, + cache_key='s3_filelike_client', + profile_name=profile_name) + + +
[docs]def s3_path_join(path: PathLike, *other_paths: PathLike) -> str: + ''' + Concat 2 or more path to a complete path + + :param path: Given path + :param other_paths: Paths to be concatenated + :returns: Concatenated complete path + + .. note :: + + The difference between this function and ``os.path.join`` is that this function ignores left side slash (which indicates absolute path) in ``other_paths`` and will directly concat. + e.g. os.path.join('/path', 'to', '/file') => '/file', but s3_path_join('/path', 'to', '/file') => '/path/to/file' + ''' + return uri_join(fspath(path), *map(fspath, other_paths))
+ + +def _list_all_buckets(profile_name: Optional[str] = None) -> List[str]: + client = get_s3_client_with_cache(profile_name=profile_name) + response = client.list_buckets() + return [content['Name'] for content in response['Buckets']] + + +def _parse_s3_url_ignore_brace(s3_url: str) -> Tuple[str, str]: + s3_url = fspath(s3_url) + s3_scheme, rightpart = s3_url[:5], s3_url[5:] + if s3_scheme != 's3://': + raise ValueError('Not a s3 url: %r' % s3_url) + left_brace = False + for current_index, current_character in enumerate(rightpart): + if current_character == "/" and left_brace is False: + return rightpart[:current_index], rightpart[current_index + 1:] + elif current_character == "{": + left_brace = True + elif current_character == "}": + left_brace = False + return rightpart, "" + + +def _group_s3path_by_bucket( + s3_pathname: str, profile_name: Optional[str] = None) -> List[str]: + bucket, key = _parse_s3_url_ignore_brace(s3_pathname) + if not bucket: + if not key: + raise UnsupportedError('Glob whole s3', s3_pathname) + raise S3BucketNotFoundError('Empty bucket name: %r' % s3_pathname) + + grouped_path = [] + + def generate_s3_path(bucket: str, key: str) -> str: + if key: + return "s3://%s/%s" % (bucket, key) + return "s3://%s%s" % (bucket, "/" if s3_pathname.endswith("/") else "") + + all_bucket = lru_cache(maxsize=1)(_list_all_buckets) + for bucketname in ungloblize(bucket): + if has_magic(bucketname): + split_bucketname = bucketname.split("/", 1) + path_part = None + if len(split_bucketname) == 2: + bucketname, path_part = split_bucketname + pattern = re.compile(translate(re.sub(r'\*{2,}', '*', bucketname))) + + for bucket in all_bucket(profile_name): + if pattern.fullmatch(bucket) is not None: + if path_part is not None: + bucket = "%s/%s" % (bucket, path_part) + grouped_path.append(generate_s3_path(bucket, key)) + else: + grouped_path.append(generate_s3_path(bucketname, key)) + + return grouped_path + + +def _s3_split_magic_ignore_brace(s3_pathname: str) -> Tuple[str, str]: + if not s3_pathname: + raise ValueError("s3_pathname: %s", s3_pathname) + + has_protocol = False + if s3_pathname.startswith("s3://"): + has_protocol = True + s3_pathname = s3_pathname[5:] + + has_delimiter = False + if s3_pathname.endswith("/"): + has_delimiter = True + s3_pathname = s3_pathname[:-1] + + normal_parts = [] + magic_parts = [] + left_brace = False + left_index = 0 + for current_index, current_character in enumerate(s3_pathname): + if current_character == "/" and left_brace is False: + if has_magic_ignore_brace(s3_pathname[left_index:current_index]): + magic_parts.append(s3_pathname[left_index:current_index]) + if s3_pathname[current_index + 1:]: + magic_parts.append(s3_pathname[current_index + 1:]) + left_index = len(s3_pathname) + break + normal_parts.append(s3_pathname[left_index:current_index]) + left_index = current_index + 1 + elif current_character == "{": + left_brace = True + elif current_character == "}": + left_brace = False + if s3_pathname[left_index:]: + if has_magic_ignore_brace(s3_pathname[left_index:]): + magic_parts.append(s3_pathname[left_index:]) + else: + normal_parts.append(s3_pathname[left_index:]) + + if has_protocol and normal_parts: + normal_parts.insert(0, "s3:/") + elif has_protocol: + magic_parts.insert(0, "s3:/") + + if has_delimiter and magic_parts: + magic_parts.append("") + elif has_delimiter: + normal_parts.append("") + + return "/".join(normal_parts), "/".join(magic_parts) + + +def _group_s3path_by_prefix(s3_pathname: str) -> List[str]: + + _, key = parse_s3_url(s3_pathname) + if not key: + return ungloblize(s3_pathname) + + top_dir, magic_part = _s3_split_magic_ignore_brace(s3_pathname) + if not top_dir: + return [magic_part] + grouped_path = [] + for pathname in ungloblize(top_dir): + if magic_part: + pathname = "/".join([pathname, magic_part]) + grouped_path.append(pathname) + return grouped_path + + +def _become_prefix(prefix: str) -> str: + if prefix != '' and not prefix.endswith('/'): + prefix += '/' + return prefix + + +def _s3_split_magic(s3_pathname: str) -> Tuple[str, str]: + if not has_magic(s3_pathname): + return s3_pathname, '' + delimiter = '/' + normal_parts = [] + magic_parts = [] + all_parts = s3_pathname.split(delimiter) + for i, part in enumerate(all_parts): + if not has_magic(part): + normal_parts.append(part) + else: + magic_parts = all_parts[i:] + break + return delimiter.join(normal_parts), delimiter.join(magic_parts) + + +def _list_objects_recursive( + s3_client, bucket: str, prefix: str, delimiter: str = ''): + + resp = s3_client.list_objects_v2( + Bucket=bucket, Prefix=prefix, Delimiter=delimiter, MaxKeys=max_keys) + + while True: + yield resp + + if not resp['IsTruncated']: + break + + resp = s3_client.list_objects_v2( + Bucket=bucket, + Prefix=prefix, + Delimiter=delimiter, + ContinuationToken=resp['NextContinuationToken'], + MaxKeys=max_keys) + + +def _make_stat(content: Dict[str, Any]): + return StatResult( + islnk=content.get('islnk', False), + size=content['Size'], + mtime=content['LastModified'].timestamp(), + extra=content, + ) + + +def _s3_glob_stat_single_path( + s3_pathname: PathLike, + recursive: bool = True, + missing_ok: bool = True, + followlinks: bool = False, + profile_name: Optional[str] = None) -> Iterator[FileEntry]: + if not recursive: + # If not recursive, replace ** with * + s3_pathname = re.sub(r'\*{2,}', '*', s3_pathname) + top_dir, wildcard_part = _s3_split_magic(s3_pathname) + search_dir = wildcard_part.endswith('/') + + def should_recursive(wildcard_part: str) -> bool: + if '**' in wildcard_part: + return True + for expanded_path in ungloblize(wildcard_part): + parts_length = len(expanded_path.split('/')) + if parts_length + search_dir >= 2: + return True + return False + + def create_generator(_s3_pathname) -> Iterator[FileEntry]: + top_dir_with_profile = top_dir + if profile_name: + top_dir_with_profile = f's3+{profile_name}://{top_dir[5:]}' + if not S3Path(top_dir_with_profile).exists(): + return + if not has_magic(_s3_pathname): + _s3_pathname_obj = S3Path(_s3_pathname) + if _s3_pathname_obj.is_file(): + stat = S3Path(_s3_pathname).stat(follow_symlinks=followlinks) + yield FileEntry( + _s3_pathname_obj.name, _s3_pathname_obj.path, stat) + if _s3_pathname_obj.is_dir(): + yield FileEntry( + _s3_pathname_obj.name, _s3_pathname_obj.path, + StatResult(isdir=True)) + return + + delimiter = '' + if not should_recursive(wildcard_part): + delimiter = '/' + + dirnames = set() + pattern = re.compile(translate(_s3_pathname)) + bucket, key = parse_s3_url(top_dir) + prefix = _become_prefix(key) + client = get_s3_client_with_cache(profile_name=profile_name) + with raise_s3_error(_s3_pathname): + for resp in _list_objects_recursive(client, bucket, prefix, + delimiter): + for content in resp.get('Contents', []): + path = s3_path_join('s3://', bucket, content['Key']) + if not search_dir and pattern.match(path): + yield FileEntry( + S3Path(path).name, path, _make_stat(content)) + dirname = os.path.dirname(path) + while dirname not in dirnames and dirname != top_dir: + dirnames.add(dirname) + path = dirname + '/' if search_dir else dirname + if pattern.match(path): + yield FileEntry( + S3Path(path).name, path, StatResult(isdir=True)) + dirname = os.path.dirname(dirname) + for common_prefix in resp.get('CommonPrefixes', []): + path = s3_path_join( + 's3://', bucket, common_prefix['Prefix']) + dirname = os.path.dirname(path) + if dirname not in dirnames and dirname != top_dir: + dirnames.add(dirname) + path = dirname + '/' if search_dir else dirname + if pattern.match(path): + yield FileEntry( + S3Path(path).name, path, StatResult(isdir=True)) + + return create_generator(s3_pathname) + + +def _s3_scan_pairs(src_url: PathLike, + dst_url: PathLike) -> Iterator[Tuple[PathLike, PathLike]]: + for src_file_path in S3Path(src_url).scan(): + content_path = src_file_path[len(src_url):] + if len(content_path) > 0: + dst_file_path = s3_path_join(dst_url, content_path) + else: + dst_file_path = dst_url + yield src_file_path, dst_file_path + + +
[docs]def is_s3(path: PathLike) -> bool: + ''' + 1. According to `aws-cli <https://docs.aws.amazon.com/cli/latest/reference/s3/index.html>`_ , test if a path is s3 path. + 2. megfile also support the path like `s3[+profile_name]://bucket/key` + + :param path: Path to be tested + :returns: True if path is s3 path, else False + ''' + path = fspath(path) + if re.match(r'^s3(\+\w+)?:\/\/', path): + return True + return False
+ + +def _s3_binary_mode(s3_open_func): + + @wraps(s3_open_func) + def wrapper( + s3_url, + mode: str = 'rb', + encoding: Optional[str] = None, + errors: Optional[str] = None, + **kwargs): + bucket, key = parse_s3_url(s3_url) + if not bucket: + raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url) + + if not key or key.endswith('/'): + raise S3IsADirectoryError('Is a directory: %r' % s3_url) + + if 'x' in mode: + if S3Path(s3_url).is_file(): + raise S3FileExistsError('File exists: %r' % s3_url) + mode = mode.replace('x', 'w') + + fileobj = s3_open_func(s3_url, get_binary_mode(mode), **kwargs) + if 'b' not in mode: + fileobj = io.TextIOWrapper( + fileobj, encoding=encoding, errors=errors) # pytype: disable=wrong-arg-types + fileobj.mode = mode + return fileobj + + return wrapper + + +
[docs]@_s3_binary_mode +def s3_prefetch_open( + s3_url: PathLike, + mode: str = 'rb', + followlinks: bool = False, + *, + max_concurrency: Optional[int] = None, + max_block_size: int = DEFAULT_BLOCK_SIZE) -> S3PrefetchReader: + '''Open a asynchronous prefetch reader, to support fast sequential read and random read + + .. note :: + + User should make sure that reader / writer are closed correctly + + Supports context manager + + Some parameter setting may perform well: max_concurrency=10 or 20, max_block_size=8 or 16 MB, default value None means using global thread pool + + :param max_concurrency: Max download thread number, None by default + :param max_block_size: Max data size downloaded by each thread, in bytes, 8MB by default + :returns: An opened S3PrefetchReader object + :raises: S3FileNotFoundError + ''' + if mode != 'rb': + raise ValueError('unacceptable mode: %r' % mode) + if not isinstance(s3_url, S3Path): + s3_url = S3Path(s3_url) + if followlinks: + try: + s3_url = s3_url.readlink() + except S3NotALinkError: + pass + + bucket, key = parse_s3_url(s3_url.path_with_protocol) + config = botocore.config.Config(max_pool_connections=max_pool_connections) + client = get_s3_client_with_cache( + config=config, profile_name=s3_url._profile_name) + return S3PrefetchReader( + bucket, + key, + s3_client=client, + max_retries=max_retries, + max_workers=max_concurrency, + block_size=max_block_size, + profile_name=s3_url._profile_name)
+ + +
[docs]@_s3_binary_mode +def s3_share_cache_open( + s3_url: PathLike, + mode: str = 'rb', + followlinks: bool = False, + *, + cache_key: str = 'lru', + max_concurrency: Optional[int] = None, + max_block_size: int = DEFAULT_BLOCK_SIZE) -> S3ShareCacheReader: + '''Open a asynchronous prefetch reader, to support fast sequential read and random read + + .. note :: + + User should make sure that reader / writer are closed correctly + + Supports context manager + + Some parameter setting may perform well: max_concurrency=10 or 20, max_block_size=8 or 16 MB, default value None means using global thread pool + + :param max_concurrency: Max download thread number, None by default + :param max_block_size: Max data size downloaded by each thread, in bytes, 8MB by default + :returns: An opened S3ShareCacheReader object + :raises: S3FileNotFoundError + ''' + if mode != 'rb': + raise ValueError('unacceptable mode: %r' % mode) + + if not isinstance(s3_url, S3Path): + s3_url = S3Path(s3_url) + if followlinks: + try: + s3_url = s3_url.readlink() + except S3NotALinkError: + pass + + bucket, key = parse_s3_url(s3_url.path_with_protocol) + config = botocore.config.Config(max_pool_connections=max_pool_connections) + client = get_s3_client_with_cache( + config=config, profile_name=s3_url._profile_name) + return S3ShareCacheReader( + bucket, + key, + cache_key=cache_key, + s3_client=client, + max_retries=max_retries, + max_workers=max_concurrency, + block_size=max_block_size, + profile_name=s3_url._profile_name)
+ + +
[docs]@_s3_binary_mode +def s3_pipe_open( + s3_url: PathLike, + mode: str, + followlinks: bool = False, + *, + join_thread: bool = True) -> S3PipeHandler: + '''Open a asynchronous read-write reader / writer, to support fast sequential read / write + + .. note :: + + User should make sure that reader / writer are closed correctly + + Supports context manager + + When join_thread is False, while the file handle are closing, this function will not wait until the asynchronous writing finishes; + False doesn't affect read-handle, but this can speed up write-handle because file will be written asynchronously. + But asynchronous behaviour can guarantee the file are successfully written, and frequent execution may cause thread and file handle exhaustion + + :param mode: Mode to open file, either "rb" or "wb" + :param join_thread: If wait after function execution until s3 finishes writing + :returns: An opened BufferedReader / BufferedWriter object + ''' + if mode not in ('rb', 'wb'): + raise ValueError('unacceptable mode: %r' % mode) + + if mode[0] == 'r' and not S3Path(s3_url).is_file(): + raise S3FileNotFoundError('No such file: %r' % s3_url) + + if not isinstance(s3_url, S3Path): + s3_url = S3Path(s3_url) + if followlinks: + try: + s3_url = s3_url.readlink() + except S3NotALinkError: + pass + + bucket, key = parse_s3_url(s3_url.path_with_protocol) + config = botocore.config.Config(max_pool_connections=max_pool_connections) + client = get_s3_client_with_cache( + config=config, profile_name=s3_url._profile_name) + return S3PipeHandler( + bucket, + key, + mode, + s3_client=client, + join_thread=join_thread, + profile_name=s3_url._profile_name)
+ + +
[docs]@_s3_binary_mode +def s3_cached_open( + s3_url: PathLike, + mode: str, + followlinks: bool = False, + *, + cache_path: Optional[str] = None) -> S3CachedHandler: + '''Open a local-cache file reader / writer, for frequent random read / write + + .. note :: + + User should make sure that reader / writer are closed correctly + + Supports context manager + + cache_path can specify the path of cache file. Performance could be better if cache file path is on ssd or tmpfs + + :param mode: Mode to open file, could be one of "rb", "wb" or "ab" + :param cache_path: cache file path + :returns: An opened BufferedReader / BufferedWriter object + ''' + if mode not in ('rb', 'wb', 'ab', 'rb+', 'wb+', 'ab+'): + raise ValueError('unacceptable mode: %r' % mode) + if not isinstance(s3_url, S3Path): + s3_url = S3Path(s3_url) + if followlinks: + try: + s3_url = s3_url.readlink() + except S3NotALinkError: + pass + + bucket, key = parse_s3_url(s3_url.path_with_protocol) + config = botocore.config.Config(max_pool_connections=max_pool_connections) + client = get_s3_client_with_cache( + config=config, profile_name=s3_url._profile_name) + return S3CachedHandler( + bucket, + key, + mode, + s3_client=client, + cache_path=cache_path, + profile_name=s3_url._profile_name)
+ + +
[docs]@_s3_binary_mode +def s3_buffered_open( + s3_url: PathLike, + mode: str, + followlinks: bool = False, + *, + max_concurrency: Optional[int] = None, + max_buffer_size: int = DEFAULT_MAX_BUFFER_SIZE, + forward_ratio: Optional[float] = None, + block_size: int = DEFAULT_BLOCK_SIZE, + limited_seekable: bool = False, + buffered: bool = False, + share_cache_key: Optional[str] = None, + cache_path: Optional[str] = None +) -> Union[S3PrefetchReader, S3BufferedWriter, io.BufferedReader, io. + BufferedWriter, S3MemoryHandler]: + '''Open an asynchronous prefetch reader, to support fast sequential read + + .. note :: + + User should make sure that reader / writer are closed correctly + + Supports context manager + + Some parameter setting may perform well: max_concurrency=10 or 20, max_block_size=8 or 16 MB, default value None means using global thread pool + + :param max_concurrency: Max download thread number, None by default + :param max_buffer_size: Max cached buffer size in memory, 128MB by default + :param block_size: Size of single block, 8MB by default. Each block will be uploaded or downloaded by single thread. + :param limited_seekable: If write-handle supports limited seek (both file head part and tail part can seek block_size). Notes: This parameter are valid only for write-handle. Read-handle support arbitrary seek + :returns: An opened S3PrefetchReader object + :raises: S3FileNotFoundError + ''' + if mode not in ('rb', 'wb', 'ab', 'rb+', 'wb+', 'ab+'): + raise ValueError('unacceptable mode: %r' % mode) + if not isinstance(s3_url, S3Path): + s3_url = S3Path(s3_url) + if followlinks: + try: + s3_url = s3_url.readlink() + except S3NotALinkError: + pass + + bucket, key = parse_s3_url(s3_url.path_with_protocol) + config = botocore.config.Config(max_pool_connections=max_pool_connections) + client = get_s3_client_with_cache( + config=config, profile_name=s3_url._profile_name) + + if 'a' in mode or '+' in mode: + if cache_path is None: + return S3MemoryHandler( + bucket, + key, + mode, + s3_client=client, + profile_name=s3_url._profile_name) + return S3CachedHandler( + bucket, + key, + mode, + s3_client=client, + cache_path=cache_path, + profile_name=s3_url._profile_name) + + if mode == 'rb': + # A rough conversion algorithm to align 2 types of Reader / Writer parameters + # TODO: Optimize the conversion algorithm + block_capacity = max_buffer_size // block_size + if forward_ratio is None: + block_forward = None + else: + block_forward = max(int(block_capacity * forward_ratio), 1) + if share_cache_key is not None: + reader = S3ShareCacheReader( + bucket, + key, + cache_key=share_cache_key, + s3_client=client, + max_retries=max_retries, + max_workers=max_concurrency, + block_size=block_size, + block_forward=block_forward, + profile_name=s3_url._profile_name) + else: + reader = S3PrefetchReader( + bucket, + key, + s3_client=client, + max_retries=max_retries, + max_workers=max_concurrency, + block_capacity=block_capacity, + block_forward=block_forward, + block_size=block_size, + profile_name=s3_url._profile_name) + if buffered or _is_pickle(reader): # pytype: disable=wrong-arg-types + reader = io.BufferedReader(reader) # pytype: disable=wrong-arg-types + return reader + + if limited_seekable: + writer = S3LimitedSeekableWriter( + bucket, + key, + s3_client=client, + max_workers=max_concurrency, + max_buffer_size=max_buffer_size, + block_size=block_size, + profile_name=s3_url._profile_name) + else: + writer = S3BufferedWriter( + bucket, + key, + s3_client=client, + max_workers=max_concurrency, + max_buffer_size=max_buffer_size, + block_size=block_size, + profile_name=s3_url._profile_name) + if buffered or _is_pickle(writer): # pytype: disable=wrong-arg-types + writer = io.BufferedWriter(writer) # pytype: disable=wrong-arg-types + return writer
+ + +
[docs]@_s3_binary_mode +def s3_memory_open( + s3_url: PathLike, mode: str, + followlinks: bool = False) -> S3MemoryHandler: + '''Open a memory-cache file reader / writer, for frequent random read / write + + .. note :: + + User should make sure that reader / writer are closed correctly + + Supports context manager + + :param mode: Mode to open file, could be one of "rb", "wb", "ab", "rb+", "wb+" or "ab+" + :returns: An opened BufferedReader / BufferedWriter object + ''' + if mode not in ('rb', 'wb', 'ab', 'rb+', 'wb+', 'ab+'): + raise ValueError('unacceptable mode: %r' % mode) + if not isinstance(s3_url, S3Path): + s3_url = S3Path(s3_url) + if followlinks: + try: + s3_url = s3_url.readlink() + except S3NotALinkError: + pass + + bucket, key = parse_s3_url(s3_url.path_with_protocol) + config = botocore.config.Config(max_pool_connections=max_pool_connections) + client = get_s3_client_with_cache( + config=config, profile_name=s3_url._profile_name) + return S3MemoryHandler( + bucket, key, mode, s3_client=client, profile_name=s3_url._profile_name)
+ + +s3_open = s3_buffered_open + + +
[docs]def s3_download( + src_url: PathLike, + dst_url: PathLike, + followlinks: bool = False, + callback: Optional[Callable[[int], None]] = None, + overwrite: bool = True) -> None: + ''' + Downloads a file from s3 to local filesystem. + :param src_url: source s3 path + :param dst_url: target fs path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param overwrite: whether or not overwrite file when exists, default is True + ''' + from megfile.fs import is_fs + from megfile.fs_path import FSPath + + dst_url = fspath(dst_url) + if not is_fs(dst_url): + raise OSError(f'dst_url is not fs path: {dst_url}') + if not dst_url or dst_url.endswith('/'): + raise S3IsADirectoryError('Is a directory: %r' % dst_url) + + dst_path = FSPath(dst_url) + if not overwrite and dst_path.exists(): + return + + if not isinstance(src_url, S3Path): + src_url = S3Path(src_url) + if followlinks: + try: + src_url = src_url.readlink() + except S3NotALinkError: + pass + src_bucket, src_key = parse_s3_url(src_url.path_with_protocol) + if not src_bucket: + raise S3BucketNotFoundError( + 'Empty bucket name: %r' % src_url.path_with_protocol) + + if not src_url.exists(): + raise S3FileNotFoundError( + 'File not found: %r' % src_url.path_with_protocol) + + if not src_url.is_file(): + raise S3IsADirectoryError( + 'Is a directory: %r' % src_url.path_with_protocol) + + dst_directory = os.path.dirname(dst_path.path_without_protocol) + if dst_directory != '': + os.makedirs(dst_directory, exist_ok=True) + + client = get_s3_client_with_cache(profile_name=src_url._profile_name) + download_file = patch_method( + client.download_file, + max_retries=max_retries, + should_retry=s3_should_retry, + ) + try: + download_file( + src_bucket, + src_key, + dst_path.path_without_protocol, + Callback=callback) + except Exception as error: + error = translate_fs_error(error, dst_url) + error = translate_s3_error(error, src_url.path_with_protocol) + raise error + + src_stat = src_url.stat() + os.utime( + dst_path.path_without_protocol, (src_stat.st_mtime, src_stat.st_mtime))
+ + +
[docs]def s3_upload( + src_url: PathLike, + dst_url: PathLike, + callback: Optional[Callable[[int], None]] = None, + overwrite: bool = True, + **kwargs) -> None: + ''' + Uploads a file from local filesystem to s3. + :param src_url: source fs path + :param dst_url: target s3 path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param overwrite: whether or not overwrite file when exists, default is True + ''' + from megfile.fs import is_fs + from megfile.fs_path import FSPath + + if not is_fs(src_url): + raise OSError(f'src_url is not fs path: {src_url}') + src_path = FSPath(src_url) + + dst_bucket, dst_key = parse_s3_url(dst_url) + if not dst_bucket: + raise S3BucketNotFoundError('Empty bucket name: %r' % dst_url) + if not dst_key or dst_key.endswith('/'): + raise S3IsADirectoryError('Is a directory: %r' % dst_url) + + if not overwrite and S3Path(dst_url).is_file(): + return + + client = get_s3_client_with_cache( + profile_name=S3Path(dst_url)._profile_name) + upload_fileobj = patch_method( + client.upload_fileobj, + max_retries=max_retries, + should_retry=s3_should_retry, + ) + + with open(src_path.path_without_protocol, + 'rb') as src, raise_s3_error(dst_url): + upload_fileobj(src, Bucket=dst_bucket, Key=dst_key, Callback=callback)
+ + +
[docs]def s3_load_content( + s3_url, + start: Optional[int] = None, + stop: Optional[int] = None, + followlinks: bool = False) -> bytes: + ''' + Get specified file from [start, stop) in bytes + + :param s3_url: Specified path + :param start: start index + :param stop: stop index + :returns: bytes content in range [start, stop) + ''' + + def _get_object(client, bucket, key, range_str): + return client.get_object( + Bucket=bucket, Key=key, Range=range_str)['Body'].read() + + s3_url = S3Path(s3_url) + if followlinks: + try: + s3_url = s3_url.readlink() + except S3NotALinkError: + pass + + bucket, key = parse_s3_url(s3_url.path_with_protocol) + if not bucket: + raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url) + if not key or key.endswith('/'): + raise S3IsADirectoryError('Is a directory: %r' % s3_url) + + start, stop = get_content_offset( + start, stop, s3_url.getsize(follow_symlinks=False)) + if start == 0 and stop == 0: + return b'' + range_str = 'bytes=%d-%d' % (start, stop - 1) + + client = get_s3_client_with_cache(profile_name=s3_url._profile_name) + with raise_s3_error(s3_url.path): + return patch_method( + _get_object, + max_retries=max_retries, + should_retry=s3_should_retry, + )(client, bucket, key, range_str)
+ + + + + +
[docs]def s3_rename( + src_url: PathLike, dst_url: PathLike, overwrite: bool = True) -> None: + ''' + Move s3 file path from src_url to dst_url + + :param dst_url: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + S3Path(src_url).rename(dst_url, overwrite)
+ + +
[docs]class S3Cacher(FileCacher): + cache_path = None + + def __init__( + self, path: str, cache_path: Optional[str] = None, mode: str = 'r'): + if mode not in ('r', 'w', 'a'): + raise ValueError('unacceptable mode: %r' % mode) + if mode in ('r', 'a'): + if cache_path is None: + cache_path = generate_cache_path(path) + s3_download(path, cache_path) + self.name = path + self.mode = mode + self.cache_path = cache_path + + def _close(self): + if self.cache_path is not None and \ + os.path.exists(self.cache_path): + if self.mode in ('w', 'a'): + s3_upload(self.cache_path, self.name) + os.unlink(self.cache_path)
+ + +
[docs]def s3_glob( + path: PathLike, + recursive: bool = True, + missing_ok: bool = True, + followlinks: bool = False, +) -> List[str]: + '''Return s3 path list in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: A list contains paths match `s3_pathname` + ''' + return list( + s3_iglob( + path=path, + recursive=recursive, + missing_ok=missing_ok, + followlinks=followlinks))
+ + +
[docs]def s3_glob_stat( + path: PathLike, + recursive: bool = True, + missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + '''Return a generator contains tuples of path and file stat, in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: A generator contains tuples of path and file stat, in which paths match `s3_pathname` + ''' + return S3Path(path).glob_stat( + pattern="", + recursive=recursive, + missing_ok=missing_ok, + followlinks=followlinks)
+ + +
[docs]def s3_iglob( + path: PathLike, + recursive: bool = True, + missing_ok: bool = True, + followlinks: bool = False, +) -> Iterator[str]: + '''Return s3 path iterator in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: An iterator contains paths match `s3_pathname` + ''' + for path_obj in S3Path(path).iglob(pattern="", recursive=recursive, + missing_ok=missing_ok, + followlinks=followlinks): + yield path_obj.path_with_protocol
+ + +
[docs]def s3_makedirs(path: PathLike, exist_ok: bool = False): + ''' + Create an s3 directory. + Purely creating directory is invalid because it's unavailable on OSS. + This function is to test the target bucket have WRITE access. + + :param path: Given path + :param exist_ok: If False and target directory exists, raise S3FileExistsError + :raises: S3BucketNotFoundError, S3FileExistsError + ''' + return S3Path(path).mkdir(parents=True, exist_ok=exist_ok)
+ + +def _group_src_paths_by_block( + src_paths: List[PathLike], block_size: int = DEFAULT_BLOCK_SIZE +) -> List[List[Tuple[PathLike, Optional[str]]]]: + groups = [] + current_group, current_group_size = [], 0 + for src_path in src_paths: + current_file_size = S3Path(src_path).stat().size + if current_file_size == 0: + continue + + if current_file_size >= block_size: + if len(groups) == 0: + if current_group_size + current_file_size > 2 * block_size: + group_lack_size = block_size - current_group_size + current_group.append( + (src_path, f'bytes=0-{group_lack_size-1}')) + groups.extend( + [ + current_group, + [ + ( + src_path, + f'bytes={group_lack_size}-{current_file_size-1}' + ) + ] + ]) + else: + current_group.append((src_path, None)) + groups.append(current_group) + else: + groups[-1].extend(current_group) + groups.append([(src_path, None)]) + current_group, current_group_size = [], 0 + else: + current_group.append((src_path, None)) + current_group_size += current_file_size + if current_group_size >= block_size: + groups.append(current_group) + current_group, current_group_size = [], 0 + if current_group: + groups.append(current_group) + return groups + + +
[docs]def s3_concat( + src_paths: List[PathLike], + dst_path: PathLike, + block_size: int = DEFAULT_BLOCK_SIZE, + max_workers: int = GLOBAL_MAX_WORKERS) -> None: + '''Concatenate s3 files to one file. + + :param src_paths: Given source paths + :param dst_path: Given destination path + ''' + client = S3Path(dst_path)._client + with raise_s3_error(dst_path): + if block_size == 0: + groups = [[(src_path, None)] for src_path in src_paths] + else: + groups = _group_src_paths_by_block(src_paths, block_size=block_size) + + with MultiPartWriter(client, dst_path) as writer, ThreadPoolExecutor( + max_workers=max_workers) as executor: + for index, group in enumerate(groups, start=1): + if len(group) == 1: + executor.submit( + writer.upload_part_copy, index, group[0][0], + group[0][1]) + else: + executor.submit(writer.upload_part_by_paths, index, group)
+ + +
[docs]def s3_lstat(path: PathLike) -> StatResult: + '''Like Path.stat() but, if the path points to a symbolic link, return the symbolic link’s information rather than its target’s.''' + return S3Path(path).lstat()
+ + +
[docs]@SmartPath.register +class S3Path(URIPath): + + protocol = "s3" + + def __init__(self, path: "PathLike", *other_paths: "PathLike"): + super().__init__(path, *other_paths) + protocol = get_url_scheme(self.path) + self._protocol_with_profile = self.protocol + self._profile_name = None + if protocol.startswith('s3+'): + self._protocol_with_profile = protocol + self._profile_name = protocol[3:] + self._s3_path = f"s3://{self.path[len(protocol)+3:]}" + elif not protocol: + self._s3_path = f"s3://{self.path.lstrip('/')}" + else: + self._s3_path = self.path + +
[docs] @cachedproperty + def path_with_protocol(self) -> str: + '''Return path with protocol, like file:///root, s3://bucket/key''' + path = self.path + protocol_prefix = self._protocol_with_profile + "://" + if path.startswith(protocol_prefix): + return path + return protocol_prefix + path.lstrip('/')
+ +
[docs] @cachedproperty + def path_without_protocol(self) -> str: + '''Return path without protocol, example: if path is s3://bucket/key, return bucket/key''' + path = self.path + protocol_prefix = self._protocol_with_profile + "://" + if path.startswith(protocol_prefix): + path = path[len(protocol_prefix):] + return path
+ +
[docs] @cachedproperty + def parts(self) -> Tuple[str]: + '''A tuple giving access to the path’s various components''' + parts = [f"{self._protocol_with_profile}://"] + path = self.path_without_protocol + path = path.lstrip('/') + if path != '': + parts.extend(path.split('/')) + return tuple(parts)
+ + @cachedproperty + def _client(self): + return get_s3_client_with_cache(profile_name=self._profile_name) + + def _s3_get_metadata(self) -> dict: + ''' + Get object metadata + + :param path: Object path + :returns: Object metadata + ''' + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket: + return {} + if not key or key.endswith('/'): + return {} + try: + with raise_s3_error(self.path_with_protocol): + resp = self._client.head_object(Bucket=bucket, Key=key) + return dict( + (key.lower(), value) for key, value in resp['Metadata'].items()) + except Exception as error: + if isinstance(error, + (S3UnknownError, S3ConfigError, S3PermissionError)): + raise error + return {} + +
[docs] def access( + self, mode: Access = Access.READ, + followlinks: bool = False) -> bool: + ''' + Test if path has access permission described by mode + + :param mode: access mode + :returns: bool, if the bucket of s3_url has read/write access. + ''' + s3_url = self.path_with_protocol + if followlinks: + try: + s3_url = self.readlink().path_with_protocol + except S3NotALinkError: + pass + bucket, key = parse_s3_url(s3_url) # only check bucket accessibility + if not bucket: + raise Exception("No available bucket") + if not isinstance(mode, Access): + raise TypeError( + 'Unsupported mode: {} -- Mode should use one of the enums belonging to: {}' + .format(mode, ', '.join([str(a) for a in Access]))) + if mode not in (Access.READ, Access.WRITE): + raise TypeError('Unsupported mode: {}'.format(mode)) + + try: + if not self.exists(): + return False + except Exception as error: + error = translate_s3_error(error, s3_url) + if isinstance(error, S3PermissionError): + return False + raise error + + if mode == Access.READ: + return True + try: + if not key: + key = 'test' + elif key.endswith('/'): + key = key[:-1] + upload_id = self._client.create_multipart_upload( + Bucket=bucket, + Key=key, + )['UploadId'] + self._client.abort_multipart_upload( + Bucket=bucket, + Key=key, + UploadId=upload_id, + ) + return True + except Exception as error: + error = translate_s3_error(error, s3_url) + if isinstance(error, S3PermissionError): + return False + raise error
+ +
[docs] def exists(self, followlinks: bool = False) -> bool: + ''' + Test if s3_url exists + + If the bucket of s3_url are not permitted to read, return False + + :returns: True if s3_url eixsts, else False + ''' + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket: # s3:// => True, s3:///key => False + return not key + + return self.is_file(followlinks) or self.is_dir()
+ +
[docs] def getmtime(self, follow_symlinks: bool = False) -> float: + ''' + Get last-modified time of the file on the given s3_url path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. The mtime of empty directory is 1970-01-01 00:00:00 + + If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError + + :returns: Last-modified time + :raises: S3FileNotFoundError, UnsupportedError + ''' + return self.stat(follow_symlinks=follow_symlinks).mtime
+ +
[docs] def getsize(self, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given s3_url path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError + + :returns: File size + :raises: S3FileNotFoundError, UnsupportedError + ''' + return self.stat(follow_symlinks=follow_symlinks).size
+ +
[docs] def glob( + self, + pattern, + recursive: bool = True, + missing_ok: bool = True, + followlinks: bool = False, + ) -> List['S3Path']: + '''Return s3 path list in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: A list contains paths match `s3_pathname` + ''' + return list( + self.iglob( + pattern=pattern, + recursive=recursive, + missing_ok=missing_ok, + followlinks=followlinks))
+ +
[docs] def glob_stat( + self, + pattern, + recursive: bool = True, + missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + '''Return a generator contains tuples of path and file stat, in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: A generator contains tuples of path and file stat, in which paths match `s3_pathname` + ''' + glob_path = self._s3_path + if pattern: + glob_path = self.joinpath(pattern)._s3_path # pytype: disable=attribute-error + s3_pathname = fspath(glob_path) + + def create_generator(): + for group_s3_pathname_1 in _group_s3path_by_bucket( + s3_pathname, self._profile_name): + for group_s3_pathname_2 in _group_s3path_by_prefix( + group_s3_pathname_1): + for file_entry in _s3_glob_stat_single_path( + group_s3_pathname_2, recursive, missing_ok, + followlinks=followlinks, + profile_name=self._profile_name): + if self._profile_name: + file_entry = file_entry._replace( + path= + f"{self._protocol_with_profile}://{file_entry.path[5:]}" + ) + yield file_entry + + return _create_missing_ok_generator( + create_generator(), missing_ok, + S3FileNotFoundError('No match any file: %r' % s3_pathname))
+ +
[docs] def iglob( + self, + pattern, + recursive: bool = True, + missing_ok: bool = True, + followlinks: bool = False, + ) -> Iterator['S3Path']: + '''Return s3 path iterator in ascending alphabetical order, in which path matches glob pattern + Notes: Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :raises: UnsupportedError, when bucket part contains wildcard characters + :returns: An iterator contains paths match `s3_pathname` + ''' + for file_entry in self.glob_stat(pattern=pattern, recursive=recursive, + missing_ok=missing_ok, + followlinks=followlinks): + yield self.from_path(file_entry.path)
+ +
[docs] def is_dir(self, followlinks: bool = False) -> bool: + ''' + Test if an s3 url is directory + Specific procedures are as follows: + If there exists a suffix, of which ``os.path.join(s3_url, suffix)`` is a file + If the url is empty bucket or s3:// + + :param followlinks: whether followlinks is True or False, result is the same. Because s3 symlink not support dir. + :returns: True if path is s3 directory, else False + ''' + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket: # s3:// => True, s3:///key => False + return not key + prefix = _become_prefix(key) + try: + resp = self._client.list_objects_v2( + Bucket=bucket, Prefix=prefix, Delimiter='/', MaxKeys=1) + except Exception as error: + error = translate_s3_error(error, self.path_with_protocol) + if isinstance(error, + (S3UnknownError, S3ConfigError, S3PermissionError)): + raise error + return False + + if not key: # bucket is accessible + return True + + if 'KeyCount' in resp: + return resp['KeyCount'] > 0 + + return len(resp.get('Contents', [])) > 0 or \ + len(resp.get('CommonPrefixes', [])) > 0
+ +
[docs] def is_file(self, followlinks: bool = False) -> bool: + ''' + Test if an s3_url is file + + :returns: True if path is s3 file, else False + ''' + s3_url = self.path_with_protocol + if followlinks: + try: + s3_url = self.readlink().path_with_protocol + except S3NotALinkError: + pass + bucket, key = parse_s3_url(s3_url) + if not bucket or not key or key.endswith('/'): + # s3://, s3:///key, s3://bucket, s3://bucket/prefix/ + return False + try: + self._client.head_object(Bucket=bucket, Key=key) + except Exception as error: + error = translate_s3_error(error, s3_url) + if isinstance(error, + (S3UnknownError, S3ConfigError, S3PermissionError)): + raise error + return False + return True
+ +
[docs] def listdir(self, followlinks: bool = False) -> List[str]: + ''' + Get all contents of given s3_url. The result is in acsending alphabetical order. + + :returns: All contents have prefix of s3_url in acsending alphabetical order + :raises: S3FileNotFoundError, S3NotADirectoryError + ''' + entries = list(self.scandir(followlinks=followlinks)) + return sorted([entry.name for entry in entries])
+ +
[docs] def iterdir(self, followlinks: bool = False) -> Iterator['S3Path']: + ''' + Get all contents of given s3_url. The result is in acsending alphabetical order. + + :returns: All contents have prefix of s3_url in acsending alphabetical order + :raises: S3FileNotFoundError, S3NotADirectoryError + ''' + for path in self.listdir(followlinks=followlinks): + yield self.joinpath(path) # type: ignore
+ +
[docs] def load(self, followlinks: bool = False) -> BinaryIO: + '''Read all content in binary on specified path and write into memory + + User should close the BinaryIO manually + + :returns: BinaryIO + ''' + s3_url = self.path_with_protocol + if followlinks: + try: + s3_url = self.readlink().path_with_protocol + except S3NotALinkError: + pass + bucket, key = parse_s3_url(s3_url) + if not bucket: + raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url) + if not key or key.endswith('/'): + raise S3IsADirectoryError('Is a directory: %r' % s3_url) + + buffer = io.BytesIO() + with raise_s3_error(s3_url): + self._client.download_fileobj(bucket, key, buffer) + buffer.seek(0) + return buffer
+ +
[docs] def hasbucket(self) -> bool: + ''' + Test if the bucket of s3_url exists + + :returns: True if bucket of s3_url eixsts, else False + ''' + bucket, _ = parse_s3_url(self.path_with_protocol) + if not bucket: + return False + + try: + self._client.head_bucket(Bucket=bucket) + except Exception as error: + error = translate_s3_error(error, self.path_with_protocol) + if isinstance(error, S3PermissionError): + # Aliyun OSS doesn't give bucket api permission when you only have read and write permission + try: + self._client.list_objects_v2(Bucket=bucket, MaxKeys=1) + return True + except Exception as error2: + error2 = translate_s3_error(error2, self.path_with_protocol) + if isinstance( + error2, + (S3UnknownError, S3ConfigError, S3PermissionError)): + raise error2 + return False + elif isinstance(error, (S3UnknownError, S3ConfigError)): + raise error + elif isinstance(error, S3FileNotFoundError): + return False + + return True
+ +
[docs] def mkdir(self, mode=0o777, parents: bool = False, exist_ok: bool = False): + ''' + Create an s3 directory. + Purely creating directory is invalid because it's unavailable on OSS. + This function is to test the target bucket have WRITE access. + + :param mode: mode is ignored, only be compatible with pathlib.Path + :param parents: parents is ignored, only be compatible with pathlib.Path + :param exist_ok: If False and target directory exists, raise S3FileExistsError + :raises: S3BucketNotFoundError, S3FileExistsError + ''' + bucket, _ = parse_s3_url(self.path_with_protocol) + if not bucket: + raise S3BucketNotFoundError( + 'Empty bucket name: %r' % self.path_with_protocol) + if not self.hasbucket(): + raise S3BucketNotFoundError( + 'No such bucket: %r' % self.path_with_protocol) + if exist_ok: + if self.is_file(): + raise S3FileExistsError( + 'File exists: %r' % self.path_with_protocol) + return + if self.exists(): + raise S3FileExistsError('File exists: %r' % self.path_with_protocol)
+ +
[docs] def move(self, dst_url: PathLike, overwrite: bool = True) -> None: + ''' + Move file/directory path from src_url to dst_url + + :param dst_url: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + for src_file_path, dst_file_path in _s3_scan_pairs( + self.path_with_protocol, dst_url): + S3Path(src_file_path).rename(dst_file_path, overwrite)
+ +
[docs] def remove(self, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on s3, `s3://` and `s3://bucket` are not permitted to remove + + :param missing_ok: if False and target file/directory not exists, raise S3FileNotFoundError + :raises: S3PermissionError, S3FileNotFoundError, UnsupportedError + ''' + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket: + if not key: + raise UnsupportedError( + 'Remove whole s3', self.path_with_protocol) + raise S3BucketNotFoundError( + 'Empty bucket name: %r' % self.path_with_protocol) + if not key: + raise UnsupportedError('Remove bucket', self.path_with_protocol) + if not self.exists(): + if missing_ok: + return + raise S3FileNotFoundError( + 'No such file or directory: %r' % self.path_with_protocol) + + client = self._client + with raise_s3_error(self.path_with_protocol): + if self.is_file(): + client.delete_object(Bucket=bucket, Key=key) + return + prefix = _become_prefix(key) + total_count, error_count = 0, 0 + for resp in _list_objects_recursive(client, bucket, prefix): + if 'Contents' in resp: + keys = [ + { + 'Key': content['Key'] + } for content in resp['Contents'] + ] + total_count += len(keys) + errors = [] + retries = 2 + retry_interval = min(0.1 * 2**retries, 30) + for i in range(retries): + # doc: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.delete_objects + if not keys: + break + response = client.delete_objects( + Bucket=bucket, Delete={'Objects': keys}) + keys = [] + for error_info in response.get('Errors', []): + if s3_error_code_should_retry( + error_info.get('Code')): + error_logger.warning( + "retry %s times, removing file: %s, with error %s: %s" + % ( + i + 1, error_info['Key'], + error_info['Code'], + error_info['Message'])) + keys.append({'Key': error_info['Key']}) + else: + errors.append(error_info) + time.sleep(retry_interval) + for error_info in errors: + error_logger.error( + "failed remove file: %s, with error %s: %s" % ( + error_info['Key'], error_info['Code'], + error_info['Message'])) + error_count += len(errors) + if error_count > 0: + error_msg = "failed remove path: %s, total file count: %s, failed count: %s" % ( + self.path_with_protocol, total_count, error_count) + raise S3UnknownError( + Exception(error_msg), self.path_with_protocol)
+ +
[docs] def rename(self, dst_path: PathLike, overwrite: bool = True) -> 'S3Path': + ''' + Move s3 file path from src_url to dst_url + + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + if self.is_file(): + self.copy(dst_path, overwrite=overwrite) + else: + self.sync(dst_path, overwrite=overwrite) + self.remove(missing_ok=True) + return self.from_path(dst_path)
+ +
[docs] def scan(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given s3 directory, in alphabetical order. + Every iteration on generator yields a path string. + + If s3_url is a file path, yields the file only + If s3_url is a non-existent path, return an empty generator + If s3_url is a bucket path, return all file paths in the bucket + If s3_url is an empty bucket, return an empty generator + If s3_url doesn't contain any bucket, which is s3_url == 's3://', raise UnsupportedError. walk() on complete s3 is not supported in megfile + + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + scan_stat_iter = self.scan_stat( + missing_ok=missing_ok, followlinks=followlinks) + + def create_generator() -> Iterator[str]: + for file_entry in scan_stat_iter: + yield file_entry.path + + return create_generator()
+ +
[docs] def scan_stat(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a tuple of path string and file stat + + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket: + raise UnsupportedError('Scan whole s3', self.path_with_protocol) + + def create_generator() -> Iterator[FileEntry]: + if not self.is_dir(): + if self.is_file(): + # On s3, file and directory may be of same name and level, so need to test the path is file or directory + yield FileEntry( + self.name, fspath(self.path_with_protocol), + self.stat(follow_symlinks=followlinks)) + return + + if not key.endswith('/') and self.is_file(): + yield FileEntry( + self.name, fspath(self.path_with_protocol), + self.stat(follow_symlinks=followlinks)) + + prefix = _become_prefix(key) + client = self._client + with raise_s3_error(self.path_with_protocol): + for resp in _list_objects_recursive(client, bucket, prefix): + for content in resp.get('Contents', []): + full_path = s3_path_join( + f'{self._protocol_with_profile}://', bucket, + content['Key']) + + if followlinks: + try: + origin_path = self.from_path( + full_path).readlink() + yield FileEntry( + origin_path.name, + origin_path.path_with_protocol, + origin_path.lstat()) + continue + except S3NotALinkError: + pass + + yield FileEntry( + S3Path(full_path).name, full_path, + _make_stat(content)) + + return _create_missing_ok_generator( + create_generator(), missing_ok, + S3FileNotFoundError( + 'No match any file in: %r' % self.path_with_protocol))
+ +
[docs] def scandir(self, followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Get all contents of given s3_url, the order of result is not guaranteed. + + :returns: All contents have prefix of s3_url + :raises: S3FileNotFoundError, S3NotADirectoryError + ''' + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket and key: + raise S3BucketNotFoundError( + 'Empty bucket name: %r' % self.path_with_protocol) + + if self.is_file(): + raise S3NotADirectoryError( + 'Not a directory: %r' % self.path_with_protocol) + elif not self.is_dir(): + raise S3FileNotFoundError( + 'No such directory: %r' % self.path_with_protocol) + prefix = _become_prefix(key) + client = self._client + + # In order to do check on creation, + # we need to wrap the iterator in another function + def create_generator() -> Iterator[FileEntry]: + with raise_s3_error(self.path_with_protocol): + + def generate_s3_path( + protocol: str, bucket: str, key: str) -> str: + return "%s://%s/%s" % (protocol, bucket, key) + + if not bucket and not key: # list buckets + response = client.list_buckets() + for content in response['Buckets']: + yield FileEntry( + content['Name'], f"s3://{content['Name']}", + StatResult( + ctime=content['CreationDate'].timestamp(), + isdir=True, + extra=content, + )) + return + + for resp in _list_objects_recursive(client, bucket, prefix, + '/'): + for common_prefix in resp.get('CommonPrefixes', []): + yield FileEntry( + common_prefix['Prefix'][len(prefix):-1], + generate_s3_path( + self._protocol_with_profile, bucket, + common_prefix['Prefix']), + StatResult(isdir=True, extra=common_prefix)) + for content in resp.get('Contents', []): + src_url = generate_s3_path( + self._protocol_with_profile, bucket, content['Key']) + + if followlinks: + try: + origin_path = self.from_path(src_url).readlink() + yield FileEntry( + origin_path.name, + origin_path.path_with_protocol, + origin_path.lstat()) + continue + except S3NotALinkError: + pass + + yield FileEntry( + content['Key'][len(prefix):], src_url, + _make_stat(content)) + + return ContextIterator(create_generator())
+ + def _getdirstat(self) -> StatResult: + ''' + Return StatResult of given s3_url directory, including: + + 1. Directory size: the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path + 2. Last-modified time of directory: return the latest modified time of all file in it. The mtime of empty directory is 1970-01-01 00:00:00 + + :returns: An int indicates size in Bytes + ''' + if not self.is_dir(): + raise S3FileNotFoundError( + 'No such file or directory: %r' % self.path_with_protocol) + + bucket, key = parse_s3_url(self.path_with_protocol) + prefix = _become_prefix(key) + client = self._client + size = 0 + mtime = 0.0 + with raise_s3_error(self.path_with_protocol): + for resp in _list_objects_recursive(client, bucket, prefix): + for content in resp.get('Contents', []): + size += content['Size'] + last_modified = content['LastModified'].timestamp() + if mtime < last_modified: + mtime = last_modified + + return StatResult(size=size, mtime=mtime, isdir=True) + +
[docs] def stat(self, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of s3_url file, including file size and mtime, referring to s3_getsize and s3_getmtime + + If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError + If attempt to get StatResult of complete s3, such as s3_dir_url == 's3://', raise S3BucketNotFoundError + + :returns: StatResult + :raises: S3FileNotFoundError, S3BucketNotFoundError + ''' + islnk = False + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket: + raise S3BucketNotFoundError( + 'Empty bucket name: %r' % self.path_with_protocol) + + if not self.is_file(): + return self._getdirstat() + + client = self._client + with raise_s3_error(self.path_with_protocol): + content = client.head_object(Bucket=bucket, Key=key) + if 'Metadata' in content: + metadata = dict( + (key.lower(), value) + for key, value in content['Metadata'].items()) + if metadata and 'symlink_to' in metadata: + islnk = True + if islnk and follow_symlinks: + s3_url = metadata['symlink_to'] + bucket, key = parse_s3_url(s3_url) + content = client.head_object(Bucket=bucket, Key=key) + stat_record = StatResult( + islnk=islnk, + size=content['ContentLength'], + mtime=content['LastModified'].timestamp(), + extra=content) + return stat_record
+ + + +
[docs] def walk(self, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Iteratively traverse the given s3 directory, in top-bottom order. In other words, firstly traverse parent directory, if subdirectories exist, traverse the subdirectories in alphabetical order. + Every iteration on generator yields a 3-tuple: (root, dirs, files) + + - root: Current s3 path; + - dirs: Name list of subdirectories in current directory. The list is sorted by name in ascending alphabetical order; + - files: Name list of files in current directory. The list is sorted by name in ascending alphabetical order; + + If s3_url is a file path, return an empty generator + If s3_url is a non-existent path, return an empty generator + If s3_url is a bucket path, bucket will be the top directory, and will be returned at first iteration of generator + If s3_url is an empty bucket, only yield one 3-tuple (notes: s3 doesn't have empty directory) + If s3_url doesn't contain any bucket, which is s3_url == 's3://', raise UnsupportedError. walk() on complete s3 is not supported in megfile + + :param followlinks: whether followlinks is True or False, result is the same. Because s3 symlink not support dir. + :raises: UnsupportedError + :returns: A 3-tuple generator + ''' + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket: + raise UnsupportedError('Walk whole s3', self.path_with_protocol) + + if not self.is_dir(): + return + + stack = [key] + client = self._client + while len(stack) > 0: + current = _become_prefix(stack.pop()) + dirs, files = [], [] + for resp in _list_objects_recursive(client, bucket, current, '/'): + for common_prefix in resp.get('CommonPrefixes', []): + dirs.append(common_prefix['Prefix'][:-1]) + for content in resp.get('Contents', []): + files.append(content['Key']) + + dirs = sorted(dirs) + stack.extend(reversed(dirs)) + + root = s3_path_join( + f'{self._protocol_with_profile}://', bucket, current)[:-1] + dirs = [path[len(current):] for path in dirs] + files = sorted(path[len(current):] for path in files) + yield root, dirs, files
+ +
[docs] def md5(self, recalculate: bool = False, followlinks: bool = False) -> str: + ''' + Get md5 meta info in files that uploaded/copied via megfile + + If meta info is lost or non-existent, return None + + :param recalculate: calculate md5 in real-time or return s3 etag + :param followlinks: If is True, calculate md5 for real file + :returns: md5 meta info + ''' + bucket, _ = parse_s3_url(self.path_with_protocol) + if not bucket: + raise S3BucketNotFoundError( + 'Empty bucket name: %r' % self.path_with_protocol) + stat = self.stat(follow_symlinks=followlinks) + if stat.isdir: + hash_md5 = hashlib.md5() # nosec + for file_name in self.listdir(): + chunk = S3Path( + s3_path_join( + self.path_with_protocol, + file_name)).md5(recalculate=recalculate).encode() + hash_md5.update(chunk) + return hash_md5.hexdigest() + if recalculate: + path_instance = self + if followlinks: + try: + path_instance = self.readlink() + except S3NotALinkError: + pass + with path_instance.open('rb') as f: + return calculate_md5(f) + return stat.extra.get('ETag', '')[1:-1]
+ +
[docs] def copy( + self, + dst_url: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True) -> None: + ''' File copy on S3 + Copy content of file on `src_path` to `dst_path`. + It's caller's responsebility to ensure the s3_isfile(src_url) == True + + :param dst_path: Target file path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param overwrite: whether or not overwrite file when exists, default is True + ''' + if not overwrite and self.from_path(dst_url).is_file(): + return + + src_url = self.path_with_protocol + src_bucket, src_key = parse_s3_url(src_url) + dst_bucket, dst_key = parse_s3_url(dst_url) + if dst_bucket == src_bucket and src_key.rstrip('/') == dst_key.rstrip( + '/'): + raise SameFileError( + f"'{src_url}' and '{dst_url}' are the same file") + + if not src_bucket: + raise S3BucketNotFoundError('Empty bucket name: %r' % src_url) + if self.is_dir(): + raise S3IsADirectoryError('Is a directory: %r' % src_url) + + if not dst_bucket: + raise S3BucketNotFoundError('Empty bucket name: %r' % dst_url) + if not dst_key or dst_key.endswith('/'): + raise S3IsADirectoryError('Is a directory: %r' % dst_url) + + if followlinks: + try: + s3_url = self.readlink().path + src_bucket, src_key = parse_s3_url(s3_url) + except S3NotALinkError: + pass + + with raise_s3_error(f"'{src_url}' or '{dst_url}'"): + self._client.copy( + { + 'Bucket': src_bucket, + 'Key': src_key, + }, + Bucket=dst_bucket, + Key=dst_key, + Callback=callback)
+ +
[docs] def sync( + self, + dst_url: PathLike, + followlinks: bool = False, + force: bool = False, + overwrite: bool = True) -> None: + ''' + Copy file/directory on src_url to dst_url + + :param dst_url: Given destination path + :param followlinks: False if regard symlink as file, else True + :param force: Sync file forcely, do not ignore same files, priority is higher than 'overwrite', default is False + :param overwrite: whether or not overwrite file when exists, default is True + ''' + for src_file_path, dst_file_path in _s3_scan_pairs( + self.path_with_protocol, dst_url): + src_file_path = self.from_path(src_file_path) + dst_file_path = self.from_path(dst_file_path) + + if force: + pass + elif not overwrite and dst_file_path.exists(): + continue + elif dst_file_path.exists() and is_same_file( + src_file_path.stat(), dst_file_path.stat(), 'copy'): + continue + + src_file_path.copy(dst_file_path, followlinks=followlinks)
+ + + + + + + +
[docs] def save(self, file_object: BinaryIO): + '''Write the opened binary stream to specified path, but the stream won't be closed + + :param file_object: Stream to be read + ''' + bucket, key = parse_s3_url(self.path_with_protocol) + if not bucket: + raise S3BucketNotFoundError( + 'Empty bucket name: %r' % self.path_with_protocol) + if not key or key.endswith('/'): + raise S3IsADirectoryError( + 'Is a directory: %r' % self.path_with_protocol) + + with raise_s3_error(self.path_with_protocol): + self._client.upload_fileobj(file_object, Bucket=bucket, Key=key)
+ +
[docs] def open( + self, + mode: str = 'r', + *, + encoding: Optional[str] = None, + errors: Optional[str] = None, + s3_open_func: Callable[[str, str], BinaryIO] = s3_open, + **kwargs) -> IO[AnyStr]: # pytype: disable=signature-mismatch + return s3_open_func( # pytype: disable=wrong-keyword-args + self, + mode, + encoding=encoding, + errors=errors, + **necessary_params(s3_open_func, **kwargs))
+ +
[docs] def absolute(self) -> 'S3Path': + ''' + Make the path absolute, without normalization or resolving symlinks. Returns a new path object + ''' + return self
+ +
[docs] def cwd(self) -> 'S3Path': + '''Return current working directory + + returns: Current working directory + ''' + return self.from_path(self.path_with_protocol)
+ + +class MultiPartWriter: + + def __init__(self, client, path: PathLike) -> None: + self._client = client + self._multipart_upload_info = [] + + bucket, key = parse_s3_url(path) + self._bucket = bucket + self._key = key + self._upload_id = self._client.create_multipart_upload( + Bucket=self._bucket, Key=self._key)['UploadId'] + + def upload_part(self, part_num: int, file_obj: io.BytesIO) -> None: + response = self._client.upload_part( + Body=file_obj, + UploadId=self._upload_id, + PartNumber=part_num, + Bucket=self._bucket, + Key=self._key, + ) + self._multipart_upload_info.append( + { + 'PartNumber': part_num, + 'ETag': response['ETag'] + }) + + def upload_part_by_paths( + self, part_num: int, paths: List[Tuple[PathLike, str]]) -> None: + file_obj = io.BytesIO() + + def get_object( + client, bucket, key, range_str: Optional[str] = None) -> bytes: + if range_str: + return client.get_object( + Bucket=bucket, Key=key, Range=range_str)['Body'].read() + else: + return client.get_object(Bucket=bucket, Key=key)['Body'].read() + + get_object = patch_method( + get_object, + max_retries=max_retries, + should_retry=s3_should_retry, + ) + for path, bytes_range in paths: + bucket, key = parse_s3_url(path) + if bytes_range: + file_obj.write( + get_object(self._client, bucket, key, bytes_range)) + else: + file_obj.write(get_object(self._client, bucket, key)) + file_obj.seek(0, os.SEEK_SET) + self.upload_part(part_num, file_obj) + + def upload_part_copy( + self, + part_num: int, + path: PathLike, + copy_source_range: Optional[str] = None) -> None: + bucket, key = parse_s3_url(path) + params = dict( + UploadId=self._upload_id, + PartNumber=part_num, + CopySource={ + 'Bucket': bucket, + 'Key': key + }, + Bucket=self._bucket, + Key=self._key, + ) + if copy_source_range: + params['CopySourceRange'] = copy_source_range + response = self._client.upload_part_copy(**params) + self._multipart_upload_info.append( + { + 'PartNumber': part_num, + 'ETag': response['CopyPartResult']['ETag'] + }) + + def close(self): + self._multipart_upload_info.sort(key=lambda t: t['PartNumber']) + self._client.complete_multipart_upload( + UploadId=self._upload_id, + Bucket=self._bucket, + Key=self._key, + MultipartUpload={'Parts': self._multipart_upload_info}) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/sftp.html b/_modules/megfile/sftp.html new file mode 100644 index 00000000..f6a74591 --- /dev/null +++ b/_modules/megfile/sftp.html @@ -0,0 +1,518 @@ + + + + + + megfile.sftp — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.sftp

+from typing import IO, AnyStr, BinaryIO, Callable, Iterator, List, Optional, Tuple
+
+from megfile.interfaces import FileEntry, PathLike, StatResult
+from megfile.sftp_path import SftpPath, is_sftp, sftp_concat, sftp_download, sftp_glob, sftp_glob_stat, sftp_iglob, sftp_lstat, sftp_path_join, sftp_readlink, sftp_resolve, sftp_upload
+
+__all__ = [
+    'is_sftp',
+    'sftp_readlink',
+    'sftp_glob',
+    'sftp_iglob',
+    'sftp_glob_stat',
+    'sftp_resolve',
+    'sftp_download',
+    'sftp_upload',
+    'sftp_path_join',
+    'sftp_concat',
+    'sftp_lstat',
+    'sftp_exists',
+    'sftp_getmtime',
+    'sftp_getsize',
+    'sftp_isdir',
+    'sftp_isfile',
+    'sftp_listdir',
+    'sftp_load_from',
+    'sftp_makedirs',
+    'sftp_realpath',
+    'sftp_rename',
+    'sftp_move',
+    'sftp_remove',
+    'sftp_scan',
+    'sftp_scan_stat',
+    'sftp_scandir',
+    'sftp_stat',
+    'sftp_unlink',
+    'sftp_walk',
+    'sftp_getmd5',
+    'sftp_symlink',
+    'sftp_islink',
+    'sftp_save_as',
+    'sftp_open',
+    'sftp_chmod',
+    'sftp_absolute',
+    'sftp_rmdir',
+    'sftp_copy',
+    'sftp_sync',
+]
+
+
+
[docs]def sftp_exists(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if the path exists + + :param path: Given path + :param followlinks: False if regard symlink as file, else True + :returns: True if the path exists, else False + + ''' + return SftpPath(path).exists(followlinks)
+ + +
[docs]def sftp_getmtime(path: PathLike, follow_symlinks: bool = False) -> float: + ''' + Get last-modified time of the file on the given path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. + + :param path: Given path + :returns: last-modified time + ''' + return SftpPath(path).getmtime(follow_symlinks)
+ + +
[docs]def sftp_getsize(path: PathLike, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given file path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + :param path: Given path + :returns: File size + + ''' + return SftpPath(path).getsize(follow_symlinks)
+ + +
[docs]def sftp_isdir(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if a path is directory + + .. note:: + + The difference between this function and ``os.path.isdir`` is that this function regard symlink as file + + :param path: Given path + :param followlinks: False if regard symlink as file, else True + :returns: True if the path is a directory, else False + + ''' + return SftpPath(path).is_dir(followlinks)
+ + +
[docs]def sftp_isfile(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if a path is file + + .. note:: + + The difference between this function and ``os.path.isfile`` is that this function regard symlink as file + + :param path: Given path + :param followlinks: False if regard symlink as file, else True + :returns: True if the path is a file, else False + + ''' + return SftpPath(path).is_file(followlinks)
+ + +
[docs]def sftp_listdir(path: PathLike) -> List[str]: + ''' + Get all contents of given sftp path. The result is in acsending alphabetical order. + + :param path: Given path + :returns: All contents have in the path in acsending alphabetical order + ''' + return SftpPath(path).listdir()
+ + +
[docs]def sftp_load_from(path: PathLike) -> BinaryIO: + '''Read all content on specified path and write into memory + + User should close the BinaryIO manually + + :param path: Given path + :returns: Binary stream + ''' + return SftpPath(path).load()
+ + +
[docs]def sftp_makedirs( + path: PathLike, mode=0o777, parents: bool = False, + exist_ok: bool = False): + ''' + make a directory on sftp, including parent directory + + If there exists a file on the path, raise FileExistsError + + :param path: Given path + :param mode: If mode is given, it is combined with the process’ umask value to determine the file mode and access flags. + :param parents: If parents is true, any missing parents of this path are created as needed; + If parents is false (the default), a missing parent raises FileNotFoundError. + :param exist_ok: If False and target directory exists, raise FileExistsError + :raises: FileExistsError + ''' + return SftpPath(path).mkdir(mode, parents, exist_ok)
+ + +
[docs]def sftp_realpath(path: PathLike) -> str: + '''Return the real path of given path + + :param path: Given path + :returns: Real path of given path + ''' + return SftpPath(path).realpath()
+ + +
[docs]def sftp_rename( + src_path: PathLike, dst_path: PathLike, + overwrite: bool = True) -> 'SftpPath': + ''' + rename file on sftp + + :param src_path: Given path + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + return SftpPath(src_path).rename(dst_path, overwrite)
+ + +
[docs]def sftp_move( + src_path: PathLike, dst_path: PathLike, + overwrite: bool = True) -> 'SftpPath': + ''' + move file on sftp + + :param src_path: Given path + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + return SftpPath(src_path).replace(dst_path, overwrite)
+ + +
[docs]def sftp_remove(path: PathLike, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on sftp + + :param path: Given path + :param missing_ok: if False and target file/directory not exists, raise FileNotFoundError + ''' + return SftpPath(path).remove(missing_ok)
+ + +
[docs]def sftp_scan( + path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a path string. + + If path is a file path, yields the file only + If path is a non-existent path, return an empty generator + If path is a bucket path, return all file paths in the bucket + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :returns: A file path generator + ''' + return SftpPath(path).scan(missing_ok, followlinks)
+ + +
[docs]def sftp_scan_stat( + path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a tuple of path string and file stat + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :returns: A file path generator + ''' + return SftpPath(path).scan_stat(missing_ok, followlinks)
+ + +
[docs]def sftp_scandir(path: PathLike) -> Iterator[FileEntry]: + ''' + Get all content of given file path. + + :param path: Given path + :returns: An iterator contains all contents have prefix path + ''' + return SftpPath(path).scandir()
+ + +
[docs]def sftp_stat(path: PathLike, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of file on sftp, including file size and mtime, referring to fs_getsize and fs_getmtime + + :param path: Given path + :returns: StatResult + ''' + return SftpPath(path).stat(follow_symlinks)
+ + + + + +
[docs]def sftp_walk(path: PathLike, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Generate the file names in a directory tree by walking the tree top-down. + For each directory in the tree rooted at directory path (including path itself), + it yields a 3-tuple (root, dirs, files). + + root: a string of current path + dirs: name list of subdirectories (excluding '.' and '..' if they exist) in 'root'. The list is sorted by ascending alphabetical order + files: name list of non-directory files (link is regarded as file) in 'root'. The list is sorted by ascending alphabetical order + + If path not exists, or path is a file (link is regarded as file), return an empty generator + + .. note:: + + Be aware that setting ``followlinks`` to True can lead to infinite recursion if a link points to a parent directory of itself. fs_walk() does not keep track of the directories it visited already. + + :param path: Given path + :param followlinks: False if regard symlink as file, else True + :returns: A 3-tuple generator + ''' + return SftpPath(path).walk(followlinks)
+ + +
[docs]def sftp_getmd5( + path: PathLike, recalculate: bool = False, followlinks: bool = True): + ''' + Calculate the md5 value of the file + + :param path: Given path + :param recalculate: Ignore this parameter, just for compatibility + :param followlinks: Ignore this parameter, just for compatibility + returns: md5 of file + ''' + return SftpPath(path).md5(recalculate, followlinks)
+ + + + + + + + +
[docs]def sftp_save_as(file_object: BinaryIO, path: PathLike): + '''Write the opened binary stream to path + If parent directory of path doesn't exist, it will be created. + + :param path: Given path + :param file_object: stream to be read + ''' + return SftpPath(path).save(file_object)
+ + +
[docs]def sftp_open( + path: PathLike, + mode: str = 'r', + buffering=-1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + **kwargs) -> IO[AnyStr]: # pytype: disable=signature-mismatch + '''Open a file on the path. + + :param path: Given path + :param mode: Mode to open file + :param buffering: buffering is an optional integer used to set the buffering policy. + :param encoding: encoding is the name of the encoding used to decode or encode the file. This should only be used in text mode. + :param errors: errors is an optional string that specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. + :returns: File-Like object + ''' + return SftpPath(path).open(mode, buffering, encoding, errors)
+ + +
[docs]def sftp_chmod(path: PathLike, mode: int, follow_symlinks: bool = True): + ''' + Change the file mode and permissions, like os.chmod(). + + :param path: Given path + :param mode: the file mode you want to change + :param followlinks: Ignore this parameter, just for compatibility + ''' + return SftpPath(path).chmod(mode, follow_symlinks)
+ + +
[docs]def sftp_absolute(path: PathLike) -> 'SftpPath': + ''' + Make the path absolute, without normalization or resolving symlinks. Returns a new path object + ''' + return SftpPath(path).absolute()
+ + +
[docs]def sftp_rmdir(path: PathLike): + ''' + Remove this directory. The directory must be empty. + ''' + return SftpPath(path).rmdir()
+ + +
[docs]def sftp_copy( + src_path: PathLike, + dst_path: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True): + """ + Copy the file to the given destination path. + + :param src_path: Given path + :param dst_path: The destination path to copy the file to. + :param callback: An optional callback function that takes an integer parameter and is called + periodically during the copy operation to report the number of bytes copied. + :param followlinks: Whether to follow symbolic links when copying directories. + :raises IsADirectoryError: If the source is a directory. + :raises OSError: If there is an error copying the file. + """ + return SftpPath(src_path).copy(dst_path, callback, followlinks, overwrite)
+ + +
[docs]def sftp_sync( + src_path: PathLike, + dst_path: PathLike, + followlinks: bool = False, + force: bool = False, + overwrite: bool = True): + '''Copy file/directory on src_url to dst_url + + :param src_path: Given path + :param dst_url: Given destination path + :param followlinks: False if regard symlink as file, else True + :param force: Sync file forcely, do not ignore same files, priority is higher than 'overwrite', default is False + :param overwrite: whether or not overwrite file when exists, default is True + ''' + return SftpPath(src_path).sync(dst_path, followlinks, force, overwrite)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/sftp_path.html b/_modules/megfile/sftp_path.html new file mode 100644 index 00000000..e9a62ee1 --- /dev/null +++ b/_modules/megfile/sftp_path.html @@ -0,0 +1,1468 @@ + + + + + + megfile.sftp_path — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.sftp_path

+import atexit
+import fcntl
+import hashlib
+import io
+import os
+import random
+import shlex
+import socket
+import subprocess
+from logging import getLogger as get_logger
+from stat import S_ISDIR, S_ISLNK, S_ISREG
+from typing import IO, AnyStr, BinaryIO, Callable, Iterator, List, Optional, Tuple, Union
+from urllib.parse import urlsplit, urlunsplit
+
+import paramiko
+
+from megfile.config import SFTP_MAX_RETRY_TIMES
+from megfile.errors import SameFileError, _create_missing_ok_generator, patch_method
+from megfile.interfaces import ContextIterator, FileEntry, PathLike, StatResult
+from megfile.lib.compare import is_same_file
+from megfile.lib.compat import fspath
+from megfile.lib.glob import FSFunc, iglob
+from megfile.lib.joinpath import uri_join
+from megfile.pathlike import PathLike, URIPath
+from megfile.smart_path import SmartPath
+from megfile.utils import cachedproperty, calculate_md5, thread_local
+
+_logger = get_logger(__name__)
+
+__all__ = [
+    'SftpPath',
+    'is_sftp',
+    'sftp_readlink',
+    'sftp_glob',
+    'sftp_iglob',
+    'sftp_glob_stat',
+    'sftp_resolve',
+    'sftp_download',
+    'sftp_upload',
+    'sftp_path_join',
+    'sftp_concat',
+    'sftp_lstat',
+]
+
+SFTP_USERNAME = "SFTP_USERNAME"
+SFTP_PASSWORD = "SFTP_PASSWORD"
+SFTP_PRIVATE_KEY_PATH = "SFTP_PRIVATE_KEY_PATH"
+SFTP_PRIVATE_KEY_TYPE = "SFTP_PRIVATE_KEY_TYPE"
+SFTP_PRIVATE_KEY_PASSWORD = "SFTP_PRIVATE_KEY_PASSWORD"
+SFTP_MAX_UNAUTH_CONN = "SFTP_MAX_UNAUTH_CONN"
+MAX_RETRIES = SFTP_MAX_RETRY_TIMES
+DEFAULT_SSH_CONNECT_TIMEOUT = 5
+DEFAULT_SSH_KEEPALIVE_INTERVAL = 15
+
+
+def _make_stat(stat: paramiko.SFTPAttributes) -> StatResult:
+    return StatResult(
+        size=stat.st_size,
+        mtime=stat.st_mtime,
+        isdir=S_ISDIR(stat.st_mode),
+        islnk=S_ISLNK(stat.st_mode),
+        extra=stat,
+    )
+
+
+def get_private_key():
+    key_with_types = {
+        'DSA': paramiko.DSSKey,
+        'RSA': paramiko.RSAKey,
+        'ECDSA': paramiko.ECDSAKey,
+        'ED25519': paramiko.Ed25519Key,
+    }
+    key_type = os.getenv(SFTP_PRIVATE_KEY_TYPE, 'RSA').upper()
+    if os.getenv(SFTP_PRIVATE_KEY_PATH):
+        private_key_path = os.getenv(SFTP_PRIVATE_KEY_PATH)
+        if not os.path.exists(private_key_path):
+            raise FileNotFoundError(
+                f"Private key file not exist: '{SFTP_PRIVATE_KEY_PATH}'")
+        return key_with_types[key_type].from_private_key_file(
+            private_key_path, password=os.getenv(SFTP_PRIVATE_KEY_PASSWORD))
+    return None
+
+
+def provide_connect_info(
+        hostname: str,
+        port: Optional[int] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+):
+    if not port:
+        port = 22
+    if not username:
+        username = os.getenv(SFTP_USERNAME)
+    if not password:
+        password = os.getenv(SFTP_PASSWORD)
+    private_key = get_private_key()
+    return hostname, port, username, password, private_key
+
+
+def sftp_should_retry(error: Exception) -> bool:
+    if type(error) is EOFError:
+        return False
+    elif isinstance(error, (
+            paramiko.ssh_exception.SSHException,
+            ConnectionError,
+            socket.timeout,
+    )):
+        return True
+    elif isinstance(error, OSError):
+        for err_msg in [
+                'Socket is closed',
+                'Cannot assign requested address',
+        ]:
+            if err_msg in str(error):
+                return True
+    return False
+
+
+def _patch_sftp_client_request(
+        client: paramiko.SFTPClient,
+        hostname: str,
+        port: Optional[int] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+):
+
+    def retry_callback(error, *args, **kwargs):
+        client.close()
+        ssh_client = get_ssh_client(hostname, port, username, password)
+        ssh_client.close()
+        atexit.unregister(ssh_client.close)
+        ssh_key = f'ssh_client:{hostname},{port},{username},{password}'
+        if thread_local.get(ssh_key):
+            del thread_local[ssh_key]
+        sftp_key = f'sftp_client:{hostname},{port},{username},{password}'
+        if thread_local.get(sftp_key):
+            del thread_local[sftp_key]
+
+        new_sftp_client = get_sftp_client(
+            hostname=hostname,
+            port=port,
+            username=username,
+            password=password,
+        )
+        client.sock = new_sftp_client.sock
+
+    client._request = patch_method(
+        client._request,  # pytype: disable=attribute-error
+        max_retries=MAX_RETRIES,
+        should_retry=sftp_should_retry,
+        retry_callback=retry_callback)
+    return client
+
+
+def _get_sftp_client(
+        hostname: str,
+        port: Optional[int] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+) -> paramiko.SFTPClient:
+    '''Get sftp client
+
+    :returns: sftp client
+    '''
+    session = get_ssh_session(
+        hostname=hostname,
+        port=port,
+        username=username,
+        password=password,
+    )
+    session.invoke_subsystem("sftp")
+    sftp_client = paramiko.SFTPClient(session)
+    _patch_sftp_client_request(sftp_client, hostname, port, username, password)
+    return sftp_client
+
+
+def get_sftp_client(
+        hostname: str,
+        port: Optional[int] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+) -> paramiko.SFTPClient:
+    '''Get sftp client
+
+    :returns: sftp client
+    '''
+    return thread_local(
+        f'sftp_client:{hostname},{port},{username},{password}',
+        _get_sftp_client, hostname, port, username, password)
+
+
+def _get_ssh_client(
+        hostname: str,
+        port: Optional[int] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+) -> paramiko.SSHClient:
+    hostname, port, username, password, private_key = provide_connect_info(
+        hostname=hostname,
+        port=port,
+        username=username,
+        password=password,
+    )
+
+    ssh_client = paramiko.SSHClient()
+    ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+    max_unauth_connections = int(os.getenv(SFTP_MAX_UNAUTH_CONN, 10))
+    try:
+        fd = os.open(
+            os.path.join(
+                '/tmp',
+                f'megfile-sftp-{hostname}-{random.randint(1, max_unauth_connections)}'
+            ), os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
+    except Exception:
+        _logger.warning(
+            "Can't create file lock in '/tmp', please control the SFTP concurrency count by yourself."
+        )
+        fd = None
+    if fd:
+        fcntl.flock(fd, fcntl.LOCK_EX)
+    ssh_client.connect(
+        hostname=hostname,
+        port=port,
+        username=username,
+        password=password,
+        pkey=private_key,
+        timeout=DEFAULT_SSH_CONNECT_TIMEOUT,
+        auth_timeout=DEFAULT_SSH_CONNECT_TIMEOUT,
+        banner_timeout=DEFAULT_SSH_CONNECT_TIMEOUT,
+    )
+    if fd:
+        fcntl.flock(fd, fcntl.LOCK_UN)
+        os.close(fd)
+    atexit.register(ssh_client.close)
+    return ssh_client
+
+
+def get_ssh_client(
+        hostname: str,
+        port: Optional[int] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+) -> paramiko.SSHClient:
+    return thread_local(
+        f'ssh_client:{hostname},{port},{username},{password}', _get_ssh_client,
+        hostname, port, username, password)
+
+
+def get_ssh_session(
+        hostname: str,
+        port: Optional[int] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+) -> paramiko.Channel:
+
+    def retry_callback(error, *args, **kwargs):
+        ssh_client = get_ssh_client(hostname, port, username, password)
+        ssh_client.close()
+        atexit.unregister(ssh_client.close)
+        ssh_key = f'ssh_client:{hostname},{port},{username},{password}'
+        if thread_local.get(ssh_key):
+            del thread_local[ssh_key]
+        sftp_key = f'sftp_client:{hostname},{port},{username},{password}'
+        if thread_local.get(sftp_key):
+            del thread_local[sftp_key]
+
+    return patch_method(
+        _open_session,  # pytype: disable=attribute-error
+        max_retries=MAX_RETRIES,
+        should_retry=sftp_should_retry,
+        retry_callback=retry_callback)(
+            hostname,
+            port,
+            username,
+            password,
+        )
+
+
+def _open_session(
+        hostname: str,
+        port: Optional[int] = None,
+        username: Optional[str] = None,
+        password: Optional[str] = None,
+) -> paramiko.Channel:
+    ssh_client = get_ssh_client(hostname, port, username, password)
+    transport = ssh_client.get_transport()
+    if not transport:
+        raise paramiko.SSHException('Get transport error')
+    transport.set_keepalive(DEFAULT_SSH_KEEPALIVE_INTERVAL)
+    session = transport.open_session(timeout=DEFAULT_SSH_CONNECT_TIMEOUT)
+    if not session:
+        raise paramiko.SSHException('Create session error')
+    session.settimeout(DEFAULT_SSH_CONNECT_TIMEOUT)
+    return session
+
+
+
[docs]def is_sftp(path: PathLike) -> bool: + '''Test if a path is sftp path + + :param path: Path to be tested + :returns: True of a path is sftp path, else False + ''' + path = fspath(path) + parts = urlsplit(path) + return parts.scheme == 'sftp'
+ + + + + +
[docs]def sftp_glob(path: PathLike, recursive: bool = True, + missing_ok: bool = True) -> List[str]: + '''Return path list in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param path: Given path + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: A list contains paths match `pathname` + ''' + return list( + sftp_iglob(path=path, recursive=recursive, missing_ok=missing_ok))
+ + +
[docs]def sftp_glob_stat( + path: PathLike, recursive: bool = True, + missing_ok: bool = True) -> Iterator[FileEntry]: + '''Return a list contains tuples of path and file stat, in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. sftp_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param path: Given path + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: A list contains tuples of path and file stat, in which paths match `pathname` + ''' + for path in sftp_iglob(path=path, recursive=recursive, + missing_ok=missing_ok): + path_object = SftpPath(path) + yield FileEntry( + path_object.name, path_object.path_with_protocol, + path_object.lstat())
+ + +
[docs]def sftp_iglob(path: PathLike, recursive: bool = True, + missing_ok: bool = True) -> Iterator[str]: + '''Return path iterator in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param path: Given path + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: An iterator contains paths match `pathname` + ''' + + for path in SftpPath(path).iglob(pattern="", recursive=recursive, + missing_ok=missing_ok): + yield path.path_with_protocol
+ + +
[docs]def sftp_resolve(path: PathLike, strict=False) -> 'str': + '''Equal to fs_realpath + + :param path: Given path + :param strict: Ignore this parameter, just for compatibility + :return: Return the canonical path of the specified filename, eliminating any symbolic links encountered in the path. + :rtype: SftpPath + ''' + return SftpPath(path).resolve(strict).path_with_protocol
+ + +def _sftp_scan_pairs(src_url: PathLike, + dst_url: PathLike) -> Iterator[Tuple[PathLike, PathLike]]: + for src_file_path in SftpPath(src_url).scan(): + content_path = src_file_path[len(src_url):] + if len(content_path) > 0: + dst_file_path = SftpPath(dst_url).joinpath( + content_path).path_with_protocol + else: + dst_file_path = dst_url + yield src_file_path, dst_file_path + + +
[docs]def sftp_download( + src_url: PathLike, + dst_url: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True): + ''' + Downloads a file from sftp to local filesystem. + :param src_url: source sftp path + :param dst_url: target fs path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param overwrite: whether or not overwrite file when exists, default is True + ''' + from megfile.fs import is_fs + from megfile.fs_path import FSPath + + if not is_fs(dst_url): + raise OSError(f'dst_url is not fs path: {dst_url}') + if not is_sftp(src_url) and not isinstance(src_url, SftpPath): + raise OSError(f'src_url is not sftp path: {src_url}') + + dst_path = FSPath(dst_url) + if not overwrite and dst_path.exists(): + return + + if isinstance(src_url, SftpPath): + src_path = src_url + else: + src_path = SftpPath(src_url) + + if followlinks and src_path.is_symlink(): + src_path = src_path.readlink() + if src_path.is_dir(): + raise IsADirectoryError('Is a directory: %r' % src_url) + if str(dst_url).endswith('/'): + raise IsADirectoryError('Is a directory: %r' % dst_url) + + dst_path.parent.makedirs(exist_ok=True) + + sftp_callback = None + if callback: + bytes_transferred_before = 0 + + def sftp_callback(bytes_transferred: int, _total_bytes: int): + nonlocal bytes_transferred_before + callback(bytes_transferred - bytes_transferred_before) + bytes_transferred_before = bytes_transferred + + src_path._client.get( + src_path._real_path, + dst_path.path_without_protocol, + callback=sftp_callback) + + src_stat = src_path.stat() + dst_path.utime(src_stat.st_atime, src_stat.st_mtime) + dst_path.chmod(src_stat.st_mode)
+ + +
[docs]def sftp_upload( + src_url: PathLike, + dst_url: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True): + ''' + Uploads a file from local filesystem to sftp server. + :param src_url: source fs path + :param dst_url: target sftp path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param overwrite: whether or not overwrite file when exists, default is True + ''' + from megfile.fs import is_fs + from megfile.fs_path import FSPath + + if not is_fs(src_url): + raise OSError(f'src_url is not fs path: {src_url}') + if not is_sftp(dst_url) and not isinstance(dst_url, SftpPath): + raise OSError(f'dst_url is not sftp path: {dst_url}') + + if followlinks and os.path.islink(src_url): + src_url = os.readlink(src_url) + if os.path.isdir(src_url): + raise IsADirectoryError('Is a directory: %r' % src_url) + if str(dst_url).endswith('/'): + raise IsADirectoryError('Is a directory: %r' % dst_url) + + src_path = FSPath(src_url) + if isinstance(dst_url, SftpPath): + dst_path = dst_url + else: + dst_path = SftpPath(dst_url) + if not overwrite and dst_path.exists(): + return + + dst_path.parent.makedirs(exist_ok=True) + + sftp_callback = None + if callback: + bytes_transferred_before = 0 + + def sftp_callback(bytes_transferred: int, _total_bytes: int): + nonlocal bytes_transferred_before + callback(bytes_transferred - bytes_transferred_before) + bytes_transferred_before = bytes_transferred + + dst_path._client.put( + src_path.path_without_protocol, + dst_path._real_path, + callback=sftp_callback) + + src_stat = src_path.stat() + dst_path.utime(src_stat.st_atime, src_stat.st_mtime) + dst_path.chmod(src_stat.st_mode)
+ + +
[docs]def sftp_path_join(path: PathLike, *other_paths: PathLike) -> str: + ''' + Concat 2 or more path to a complete path + + :param path: Given path + :param other_paths: Paths to be concatenated + :returns: Concatenated complete path + + .. note :: + + The difference between this function and ``os.path.join`` is that this function ignores left side slash (which indicates absolute path) in ``other_paths`` and will directly concat. + e.g. os.path.join('/path', 'to', '/file') => '/file', but sftp_path_join('/path', 'to', '/file') => '/path/to/file' + ''' + return uri_join(fspath(path), *map(fspath, other_paths))
+ + +
[docs]def sftp_concat(src_paths: List[PathLike], dst_path: PathLike) -> None: + '''Concatenate sftp files to one file. + + :param src_paths: Given source paths + :param dst_path: Given destination path + ''' + dst_path_obj = SftpPath(dst_path) + + def get_real_path(path: PathLike) -> str: + return SftpPath(path)._real_path + + command = [ + 'cat', *map(get_real_path, src_paths), '>', + get_real_path(dst_path) + ] + exec_result = dst_path_obj._exec_command(command) + if exec_result.returncode != 0: + _logger.error(exec_result.stderr) + raise OSError(f'Failed to concat {src_paths} to {dst_path}')
+ + +
[docs]def sftp_lstat(path: PathLike) -> StatResult: + ''' + Get StatResult of file on sftp, including file size and mtime, referring to fs_getsize and fs_getmtime + + :param path: Given path + :returns: StatResult + ''' + return SftpPath(path).lstat()
+ + +
[docs]@SmartPath.register +class SftpPath(URIPath): + """sftp protocol + + uri format: + - absolute path + - sftp://[username[:password]@]hostname[:port]//file_path + - relative path + - - sftp://[username[:password]@]hostname[:port]/file_path + """ + + protocol = "sftp" + + def __init__(self, path: "PathLike", *other_paths: "PathLike"): + super().__init__(path, *other_paths) + parts = urlsplit(self.path) + self._urlsplit_parts = parts + self._real_path = parts.path + if parts.path.startswith('//'): + self._root_dir = '/' + else: + self._root_dir = self._client.normalize('.') + self._real_path = os.path.join(self._root_dir, parts.path.lstrip('/')) + +
[docs] @cachedproperty + def parts(self) -> Tuple[str]: + '''A tuple giving access to the path’s various components''' + if self._urlsplit_parts.path.startswith('//'): + new_parts = self._urlsplit_parts._replace(path='//') + else: + new_parts = self._urlsplit_parts._replace(path='/') + parts = [urlunsplit(new_parts)] + path = self._urlsplit_parts.path.lstrip('/') + if path != '': + parts.extend(path.split('/')) + return tuple(parts)
+ + @property + def _client(self): + return get_sftp_client( + hostname=self._urlsplit_parts.hostname, + port=self._urlsplit_parts.port, + username=self._urlsplit_parts.username, + password=self._urlsplit_parts.password) + + def _generate_path_object( + self, sftp_local_path: str, resolve: bool = False): + if resolve or self._root_dir == '/': + sftp_local_path = f"//{sftp_local_path.lstrip('/')}" + else: + sftp_local_path = os.path.relpath( + sftp_local_path, start=self._root_dir) + if sftp_local_path == ".": + sftp_local_path = "/" + new_parts = self._urlsplit_parts._replace(path=sftp_local_path) + return self.from_path(urlunsplit(new_parts)) + +
[docs] def exists(self, followlinks: bool = False) -> bool: + ''' + Test if the path exists + + :param followlinks: False if regard symlink as file, else True + :returns: True if the path exists, else False + + ''' + try: + if followlinks: + self._client.stat(self._real_path) + else: + self._client.lstat(self._real_path) + return True + except FileNotFoundError: + return False
+ +
[docs] def getmtime(self, follow_symlinks: bool = False) -> float: + ''' + Get last-modified time of the file on the given path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. + + :returns: last-modified time + ''' + return self.stat(follow_symlinks=follow_symlinks).mtime
+ +
[docs] def getsize(self, follow_symlinks: bool = False) -> int: + ''' + Get file size on the given file path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + :returns: File size + + ''' + return self.stat(follow_symlinks=follow_symlinks).size
+ +
[docs] def glob(self, pattern, recursive: bool = True, + missing_ok: bool = True) -> List['SftpPath']: + '''Return path list in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: A list contains paths match `pathname` + ''' + return list( + self.iglob( + pattern=pattern, recursive=recursive, missing_ok=missing_ok))
+ +
[docs] def glob_stat( + self, pattern, recursive: bool = True, + missing_ok: bool = True) -> Iterator[FileEntry]: + '''Return a list contains tuples of path and file stat, in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. sftp_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: A list contains tuples of path and file stat, in which paths match `pathname` + ''' + for path_obj in self.iglob(pattern=pattern, recursive=recursive, + missing_ok=missing_ok): + yield FileEntry(path_obj.name, path_obj.path, path_obj.lstat())
+ +
[docs] def iglob(self, pattern, recursive: bool = True, + missing_ok: bool = True) -> Iterator['SftpPath']: + '''Return path iterator in ascending alphabetical order, in which path matches glob pattern + + 1. If doesn't match any path, return empty list + Notice: ``glob.glob`` in standard library returns ['a/'] instead of empty list when pathname is like `a/**`, recursive is True and directory 'a' doesn't exist. fs_glob behaves like ``glob.glob`` in standard library under such circumstance. + 2. No guarantee that each path in result is different, which means: + Assume there exists a path `/a/b/c/b/d.txt` + use path pattern like `/**/b/**/*.txt` to glob, the path above will be returned twice + 3. `**` will match any matched file, directory, symlink and '' by default, when recursive is `True` + 4. fs_glob returns same as glob.glob(pathname, recursive=True) in acsending alphabetical order. + 5. Hidden files (filename stars with '.') will not be found in the result + + :param pattern: Glob the given relative pattern in the directory represented by this path + :param recursive: If False, `**` will not search directory recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + :returns: An iterator contains paths match `pathname` + ''' + glob_path = self.path_with_protocol + if pattern: + glob_path = self.joinpath(pattern).path_with_protocol + + def _scandir(dirname: str) -> Iterator[Tuple[str, bool]]: + for entry in self.from_path(dirname).scandir(): + yield entry.name, entry.is_dir() + + def _exist(path: PathLike, followlinks: bool = False): + return self.from_path(path).exists(followlinks=followlinks) + + def _is_dir(path: PathLike, followlinks: bool = False): + return self.from_path(path).is_dir(followlinks=followlinks) + + fs = FSFunc(_exist, _is_dir, _scandir) + for real_path in _create_missing_ok_generator( + iglob(fspath(glob_path), recursive=recursive, + fs=fs), missing_ok, + FileNotFoundError('No match any file: %r' % glob_path)): + yield self.from_path(real_path)
+ +
[docs] def is_dir(self, followlinks: bool = False) -> bool: + ''' + Test if a path is directory + + .. note:: + + The difference between this function and ``os.path.isdir`` is that this function regard symlink as file + + :param followlinks: False if regard symlink as file, else True + :returns: True if the path is a directory, else False + + ''' + try: + stat = self.stat(follow_symlinks=followlinks) + if S_ISDIR(stat.st_mode): + return True + except FileNotFoundError: + pass + return False
+ +
[docs] def is_file(self, followlinks: bool = False) -> bool: + ''' + Test if a path is file + + .. note:: + + The difference between this function and ``os.path.isfile`` is that this function regard symlink as file + + :param followlinks: False if regard symlink as file, else True + :returns: True if the path is a file, else False + + ''' + try: + stat = self.stat(follow_symlinks=followlinks) + if S_ISREG(stat.st_mode): + return True + except FileNotFoundError: + pass + return False
+ +
[docs] def listdir(self) -> List[str]: + ''' + Get all contents of given sftp path. The result is in acsending alphabetical order. + + :returns: All contents have in the path in acsending alphabetical order + ''' + if not self.is_dir(): + raise NotADirectoryError( + f"Not a directory: '{self.path_with_protocol}'") + return sorted(self._client.listdir(self._real_path))
+ +
[docs] def iterdir(self) -> Iterator['SftpPath']: + ''' + Get all contents of given sftp path. The result is in acsending alphabetical order. + + :returns: All contents have in the path in acsending alphabetical order + ''' + if not self.is_dir(): + raise NotADirectoryError( + f"Not a directory: '{self.path_with_protocol}'") + for path in self.listdir(): + yield self.joinpath(path) # type: ignore
+ +
[docs] def load(self) -> BinaryIO: + '''Read all content on specified path and write into memory + + User should close the BinaryIO manually + + :returns: Binary stream + ''' + with self.open(mode='rb') as f: + data = f.read() + return io.BytesIO(data)
+ +
[docs] def mkdir(self, mode=0o777, parents: bool = False, exist_ok: bool = False): + ''' + make a directory on sftp, including parent directory + + If there exists a file on the path, raise FileExistsError + + :param mode: If mode is given, it is combined with the process’ umask value to determine the file mode and access flags. + :param parents: If parents is true, any missing parents of this path are created as needed; + If parents is false (the default), a missing parent raises FileNotFoundError. + :param exist_ok: If False and target directory exists, raise FileExistsError + :raises: FileExistsError + ''' + if self.exists(): + if not exist_ok: + raise FileExistsError( + f"File exists: '{self.path_with_protocol}'") + return + + if parents: + parent_path_objects = [] + for parent_path_object in self.parents: + if parent_path_object.exists(): + break + else: + parent_path_objects.append(parent_path_object) + for parent_path_object in parent_path_objects[::-1]: + parent_path_object.mkdir( + mode=mode, parents=False, exist_ok=True) + try: + self._client.mkdir(path=self._real_path, mode=mode) + except OSError: + # catch OSError when mkdir concurrently + if not self.exists(): + raise
+ +
[docs] def realpath(self) -> str: + '''Return the real path of given path + + :returns: Real path of given path + ''' + return self.resolve().path_with_protocol
+ + def _is_same_backend(self, other: 'SftpPath') -> bool: + return self._urlsplit_parts.hostname == other._urlsplit_parts.hostname and self._urlsplit_parts.username == other._urlsplit_parts.username and self._urlsplit_parts.password == other._urlsplit_parts.password and self._urlsplit_parts.port == other._urlsplit_parts.port + + def _is_same_protocol(self, path): + return is_sftp(path) + +
[docs] def rename(self, dst_path: PathLike, overwrite: bool = True) -> 'SftpPath': + ''' + rename file on sftp + + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + if not self._is_same_protocol(dst_path): + raise OSError('Not a %s path: %r' % (self.protocol, dst_path)) + + dst_path = self.from_path(str(dst_path).rstrip('/')) + + src_stat = self.stat() + + if self._is_same_backend(dst_path): + if overwrite: + dst_path.remove(missing_ok=True) + self._client.rename(self._real_path, dst_path._real_path) + else: + self.sync(dst_path, overwrite=overwrite) + self.remove(missing_ok=True) + else: + if self.is_dir(): + for file_entry in self.scandir(): + self.from_path(file_entry.path).rename( + dst_path.joinpath(file_entry.name)) + self._client.rmdir(self._real_path) + else: + if overwrite or not dst_path.exists(): + with self.open('rb') as fsrc: + with dst_path.open('wb') as fdst: + length = 16 * 1024 + while True: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + self.unlink() + + dst_path.utime(src_stat.st_atime, src_stat.st_mtime) + dst_path.chmod(src_stat.st_mode) + return dst_path
+ +
[docs] def replace(self, dst_path: PathLike, overwrite: bool = True) -> 'SftpPath': + ''' + move file on sftp + + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + return self.rename(dst_path=dst_path, overwrite=overwrite)
+ +
[docs] def remove(self, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on sftp + + :param missing_ok: if False and target file/directory not exists, raise FileNotFoundError + ''' + if missing_ok and not self.exists(): + return + if self.is_dir(): + for file_entry in self.scandir(): + self.from_path(file_entry.path).remove(missing_ok=missing_ok) + self._client.rmdir(self._real_path) + else: + self._client.unlink(self._real_path)
+ +
[docs] def scan(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a path string. + + If path is a file path, yields the file only + If path is a non-existent path, return an empty generator + If path is a bucket path, return all file paths in the bucket + + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :returns: A file path generator + ''' + scan_stat_iter = self.scan_stat( + missing_ok=missing_ok, followlinks=followlinks) + + for file_entry in scan_stat_iter: + yield file_entry.path
+ +
[docs] def scan_stat(self, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a tuple of path string and file stat + + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :returns: A file path generator + ''' + + def create_generator() -> Iterator[FileEntry]: + + try: + stat = self.stat(follow_symlinks=followlinks) + except FileNotFoundError: + return + if S_ISREG(stat.st_mode): + yield FileEntry( + self.name, self.path_with_protocol, + self.stat(follow_symlinks=followlinks)) + return + + for name in self.listdir(): + current_path = self.joinpath(name) + if current_path.is_dir(): + yield from current_path.scan_stat( + missing_ok=missing_ok, + followlinks=followlinks, + ) + else: + yield FileEntry( + current_path.name, # type: ignore + current_path.path_with_protocol, + current_path.stat(follow_symlinks=followlinks)) + + return _create_missing_ok_generator( + create_generator(), missing_ok, + FileNotFoundError( + 'No match any file in: %r' % self.path_with_protocol))
+ +
[docs] def scandir(self) -> Iterator[FileEntry]: + ''' + Get all content of given file path. + + :returns: An iterator contains all contents have prefix path + ''' + if not self.exists(): + raise FileNotFoundError( + 'No such directory: %r' % self.path_with_protocol) + + if not self.is_dir(): + raise NotADirectoryError( + 'Not a directory: %r' % self.path_with_protocol) + + def create_generator(): + for name in self.listdir(): + current_path = self.joinpath(name) + yield FileEntry( + current_path.name, # type: ignore + current_path.path_with_protocol, + current_path.lstat()) # type: ignore + + return ContextIterator(create_generator())
+ +
[docs] def stat(self, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of file on sftp, including file size and mtime, referring to fs_getsize and fs_getmtime + + :returns: StatResult + ''' + if follow_symlinks: + result = _make_stat(self._client.stat(self._real_path)) + else: + result = _make_stat(self._client.lstat(self._real_path)) + return result
+ + + +
[docs] def walk(self, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Generate the file names in a directory tree by walking the tree top-down. + For each directory in the tree rooted at directory path (including path itself), + it yields a 3-tuple (root, dirs, files). + + root: a string of current path + dirs: name list of subdirectories (excluding '.' and '..' if they exist) in 'root'. The list is sorted by ascending alphabetical order + files: name list of non-directory files (link is regarded as file) in 'root'. The list is sorted by ascending alphabetical order + + If path not exists, or path is a file (link is regarded as file), return an empty generator + + .. note:: + + Be aware that setting ``followlinks`` to True can lead to infinite recursion if a link points to a parent directory of itself. fs_walk() does not keep track of the directories it visited already. + + :param followlinks: False if regard symlink as file, else True + :returns: A 3-tuple generator + ''' + if not self.exists(followlinks=followlinks): + return + + if self.is_file(followlinks=followlinks): + return + + stack = [self._real_path] + while stack: + root = stack.pop() + dirs, files = [], [] + filenames = self._client.listdir(root) + for name in filenames: + current_path = self._generate_path_object(root).joinpath(name) + if current_path.is_file(followlinks=followlinks): + files.append(name) + elif current_path.is_dir(followlinks=followlinks): + dirs.append(name) + + dirs = sorted(dirs) + files = sorted(files) + + yield self._generate_path_object( + root).path_with_protocol, dirs, files + + stack.extend( + (os.path.join(root, directory) for directory in reversed(dirs)))
+ +
[docs] def resolve(self, strict=False) -> 'SftpPath': + '''Equal to sftp_realpath + + :param strict: Ignore this parameter, just for compatibility + :return: Return the canonical path of the specified filename, eliminating any symbolic links encountered in the path. + :rtype: SftpPath + ''' + path = self._client.normalize(self._real_path) + return self._generate_path_object(path, resolve=True)
+ +
[docs] def md5(self, recalculate: bool = False, followlinks: bool = True): + ''' + Calculate the md5 value of the file + + :param recalculate: Ignore this parameter, just for compatibility + :param followlinks: Ignore this parameter, just for compatibility + returns: md5 of file + ''' + if self.is_dir(): + hash_md5 = hashlib.md5() # nosec + for file_name in self.listdir(): + chunk = self.joinpath(file_name).md5( # type: ignore + recalculate=recalculate, followlinks=followlinks).encode() + hash_md5.update(chunk) + return hash_md5.hexdigest() + with self.open('rb') as src: # type: ignore + md5 = calculate_md5(src) + return md5
+ + + + + + + +
[docs] def cwd(self) -> 'SftpPath': + '''Return current working directory + + returns: Current working directory + ''' + return self._generate_path_object(self._client.normalize('.'))
+ +
[docs] def save(self, file_object: BinaryIO): + '''Write the opened binary stream to path + If parent directory of path doesn't exist, it will be created. + + :param file_object: stream to be read + ''' + with self.open(mode='wb') as output: + output.write(file_object.read())
+ +
[docs] def open( + self, + mode: str = 'r', + buffering=-1, + encoding: Optional[str] = None, + errors: Optional[str] = None, + **kwargs) -> IO[AnyStr]: # pytype: disable=signature-mismatch + '''Open a file on the path. + + :param mode: Mode to open file + :param buffering: buffering is an optional integer used to set the buffering policy. + :param encoding: encoding is the name of the encoding used to decode or encode the file. This should only be used in text mode. + :param errors: errors is an optional string that specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. + :returns: File-Like object + ''' + if 'w' in mode or 'x' in mode or 'a' in mode: + if self.is_dir(): + raise IsADirectoryError( + 'Is a directory: %r' % self.path_with_protocol) + self.parent.mkdir(parents=True, exist_ok=True) + elif not self.exists(): + raise FileNotFoundError( + 'No such file: %r' % self.path_with_protocol) + fileobj = self._client.open(self._real_path, mode, bufsize=buffering) + fileobj.name = self.path + if 'r' in mode and 'b' not in mode: + return io.TextIOWrapper( + fileobj, encoding=encoding, errors=errors) # type: ignore + return fileobj # type: ignore
+ +
[docs] def chmod(self, mode: int, follow_symlinks: bool = True): + ''' + Change the file mode and permissions, like os.chmod(). + + :param mode: the file mode you want to change + :param followlinks: Ignore this parameter, just for compatibility + ''' + return self._client.chmod(path=self._real_path, mode=mode)
+ +
[docs] def absolute(self) -> 'SftpPath': + ''' + Make the path absolute, without normalization or resolving symlinks. Returns a new path object + ''' + return self.resolve()
+ +
[docs] def rmdir(self): + ''' + Remove this directory. The directory must be empty. + ''' + if len(self.listdir()) > 0: + raise OSError(f"Directory not empty: '{self.path_with_protocol}'") + return self._client.rmdir(self._real_path)
+ + def _exec_command( + self, + command: List[str], + bufsize: int = -1, + timeout: Optional[int] = None, + environment: Optional[dict] = None, + ) -> subprocess.CompletedProcess: + with get_ssh_session( + hostname=self._urlsplit_parts.hostname, + port=self._urlsplit_parts.port, + username=self._urlsplit_parts.username, + password=self._urlsplit_parts.password, + ) as chan: + chan.settimeout(timeout) + if environment: + chan.update_environment(environment) + chan.exec_command(' '.join([shlex.quote(arg) for arg in command])) + stdout = chan.makefile( + "r", bufsize).read().decode(errors="backslashreplace") + stderr = chan.makefile_stderr( + "r", bufsize).read().decode(errors="backslashreplace") + returncode = chan.recv_exit_status() + return subprocess.CompletedProcess( + args=command, returncode=returncode, stdout=stdout, stderr=stderr) + +
[docs] def copy( + self, + dst_path: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True): + """ + Copy the file to the given destination path. + + :param dst_path: The destination path to copy the file to. + :param callback: An optional callback function that takes an integer parameter and is called + periodically during the copy operation to report the number of bytes copied. + :param followlinks: Whether to follow symbolic links when copying directories. + :raises IsADirectoryError: If the source is a directory. + :raises OSError: If there is an error copying the file. + """ + if followlinks and self.is_symlink(): + return self.readlink().copy(dst_path=dst_path, callback=callback) + + if not self._is_same_protocol(dst_path): + raise OSError('Not a %s path: %r' % (self.protocol, dst_path)) + if str(dst_path).endswith('/'): + raise IsADirectoryError('Is a directory: %r' % dst_path) + + if self.is_dir(): + raise IsADirectoryError( + 'Is a directory: %r' % self.path_with_protocol) + + if not overwrite and self.from_path(dst_path).exists(): + return + + self.from_path(os.path.dirname(dst_path)).makedirs(exist_ok=True) + dst_path = self.from_path(dst_path) + if self._is_same_backend(dst_path): + if self._real_path == dst_path._real_path: + raise SameFileError( + f"'{self.path}' and '{dst_path.path}' are the same file") + exec_result = self._exec_command( + ["cp", self._real_path, dst_path._real_path]) + if exec_result.returncode != 0: + _logger.error(exec_result.stderr) + raise OSError( + f'Copy file error, returncode: {exec_result.returncode}') + if callback: + callback(self.stat(follow_symlinks=followlinks).size) + else: + with self.open('rb') as fsrc: + with dst_path.open('wb') as fdst: + length = 16 * 1024 + while True: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + if callback: + callback(len(buf)) + + src_stat = self.stat() + dst_path.utime(src_stat.st_atime, src_stat.st_mtime) + dst_path._client.chmod(dst_path._real_path, src_stat.st_mode)
+ +
[docs] def sync( + self, + dst_path: PathLike, + followlinks: bool = False, + force: bool = False, + overwrite: bool = True): + '''Copy file/directory on src_url to dst_url + + :param dst_url: Given destination path + :param followlinks: False if regard symlink as file, else True + :param force: Sync file forcely, do not ignore same files, priority is higher than 'overwrite', default is False + :param overwrite: whether or not overwrite file when exists, default is True + ''' + if not self._is_same_protocol(dst_path): + raise OSError('Not a %s path: %r' % (self.protocol, dst_path)) + + for src_file_path, dst_file_path in _sftp_scan_pairs( + self.path_with_protocol, dst_path): + dst_path = self.from_path(dst_file_path) + src_path = self.from_path(src_file_path) + + if force: + pass + elif not overwrite and dst_path.exists(): + continue + elif dst_path.exists() and is_same_file(src_path.stat(), + dst_path.stat(), 'copy'): + continue + + self.from_path(src_file_path).copy( + dst_file_path, followlinks=followlinks)
+ +
[docs] def utime(self, atime: Union[float, int], mtime: Union[float, int]) -> None: + """ + Set the access and modified times of the file specified by path. + + :param atime: The access time to be set. + :type atime: Union[float, int] + :param mtime: The modification time to be set. + :type mtime: Union[float, int] + :return: None + """ + return self._client.utime(self._real_path, (atime, mtime))
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/smart.html b/_modules/megfile/smart.html new file mode 100644 index 00000000..d7372187 --- /dev/null +++ b/_modules/megfile/smart.html @@ -0,0 +1,1151 @@ + + + + + + megfile.smart — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.smart

+import os
+from collections import defaultdict
+from functools import partial
+from stat import S_ISDIR as stat_isdir
+from stat import S_ISLNK as stat_islnk
+from typing import IO, Any, AnyStr, BinaryIO, Callable, Iterable, Iterator, List, Optional, Tuple
+
+from tqdm import tqdm
+
+from megfile.errors import S3UnknownError
+from megfile.fs import fs_copy, is_fs
+from megfile.interfaces import Access, ContextIterator, FileCacher, FileEntry, NullCacher, PathLike, StatResult
+from megfile.lib.combine_reader import CombineReader
+from megfile.lib.compare import get_sync_type, is_same_file
+from megfile.lib.compat import fspath
+from megfile.lib.glob import globlize, ungloblize
+from megfile.s3 import is_s3, s3_concat, s3_copy, s3_download, s3_load_content, s3_open, s3_upload
+from megfile.sftp import sftp_concat, sftp_copy, sftp_download, sftp_upload
+from megfile.smart_path import SmartPath, get_traditional_path
+from megfile.utils import combine, generate_cache_path
+
+__all__ = [
+    'smart_access',
+    'smart_cache',
+    'smart_combine_open',
+    'smart_copy',
+    'smart_exists',
+    'smart_getmtime',
+    'smart_getsize',
+    'smart_glob_stat',
+    'smart_glob',
+    'smart_iglob',
+    'smart_isdir',
+    'smart_isfile',
+    'smart_islink',
+    'smart_listdir',
+    'smart_load_content',
+    'smart_save_content',
+    'smart_load_from',
+    'smart_load_text',
+    'smart_save_text',
+    'smart_makedirs',
+    'smart_open',
+    'smart_path_join',
+    'smart_remove',
+    'smart_move',
+    'smart_rename',
+    'smart_save_as',
+    'smart_scan_stat',
+    'smart_scan',
+    'smart_scandir',
+    'smart_stat',
+    'smart_sync',
+    'smart_sync_with_progress',
+    'smart_touch',
+    'smart_unlink',
+    'smart_walk',
+    'smart_getmd5',
+    'smart_realpath',
+    'smart_ismount',
+    'smart_relpath',
+    'smart_abspath',
+    'smart_isabs',
+    'smart_symlink',
+    'smart_readlink',
+    'register_copy_func',
+    'smart_concat',
+    'SmartCacher',
+]
+
+
+
+
+
+
+
+
+
[docs]def smart_isdir(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if a file path or an s3 url is directory + + :param path: Path to be tested + :returns: True if path is directory, else False + ''' + return SmartPath(path).is_dir(followlinks=followlinks)
+ + +
[docs]def smart_isfile(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if a file path or an s3 url is file + + :param path: Path to be tested + :returns: True if path is file, else False + ''' + return SmartPath(path).is_file(followlinks=followlinks)
+ + + + + +
[docs]def smart_access(path: PathLike, mode: Access) -> bool: + ''' + Test if path has access permission described by mode + + :param path: Path to be tested + :param mode: Access mode(Access.READ, Access.WRITE, Access.BUCKETREAD, Access.BUCKETWRITE) + :returns: bool, if the path has read/write access. + ''' + return SmartPath(path).access(mode)
+ + +
[docs]def smart_exists(path: PathLike, followlinks: bool = False) -> bool: + ''' + Test if path or s3_url exists + + :param path: Path to be tested + :returns: True if path eixsts, else False + ''' + return SmartPath(path).exists(followlinks=followlinks)
+ + +
[docs]def smart_listdir(path: Optional[PathLike] = None) -> List[str]: + ''' + Get all contents of given s3_url or file path. The result is in acsending alphabetical order. + + :param path: Given path + :returns: All contents of given s3_url or file path in acsending alphabetical order. + :raises: FileNotFoundError, NotADirectoryError + ''' + if path is None: + return sorted(os.listdir(path)) + return SmartPath(path).listdir()
+ + +
[docs]def smart_scandir(path: Optional[PathLike] = None) -> Iterator[FileEntry]: + ''' + Get all content of given s3_url or file path. + + :param path: Given path + :returns: An iterator contains all contents have prefix path + :raises: FileNotFoundError, NotADirectoryError + ''' + if path is None: + + def create_generator(): + with os.scandir(None) as entries: + for entry in entries: + stat = entry.stat() + yield FileEntry( + entry.name, entry.path, + StatResult( + size=stat.st_size, + ctime=stat.st_ctime, + mtime=stat.st_mtime, + isdir=stat_isdir(stat.st_mode), + islnk=stat_islnk(stat.st_mode), + extra=stat, + )) + + return ContextIterator(create_generator()) + return SmartPath(path).scandir()
+ + +
[docs]def smart_getsize(path: PathLike) -> int: + ''' + Get file size on the given s3_url or file path (in bytes). + If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist). + The result excludes the size of directory itself. In other words, return 0 Byte on an empty directory path. + + :param path: Given path + :returns: File size + :raises: FileNotFoundError + ''' + return SmartPath(path).getsize()
+ + +
[docs]def smart_getmtime(path: PathLike) -> float: + ''' + Get last-modified time of the file on the given s3_url or file path (in Unix timestamp format). + If the path is an existent directory, return the latest modified time of all file in it. The mtime of empty directory is 1970-01-01 00:00:00 + + :param path: Given path + :returns: Last-modified time + :raises: FileNotFoundError + ''' + return SmartPath(path).getmtime()
+ + +
[docs]def smart_stat(path: PathLike, follow_symlinks=True) -> StatResult: + ''' + Get StatResult of s3_url or file path + + :param path: Given path + :returns: StatResult + :raises: FileNotFoundError + ''' + return SmartPath(path).stat(follow_symlinks=follow_symlinks)
+ + +def smart_lstat(path: PathLike) -> StatResult: + ''' + Get StatResult of path but do not follow symbolic links + + :param path: Given path + :returns: StatResult + :raises: FileNotFoundError + ''' + return SmartPath(path).lstat() + + +_copy_funcs = { + 's3': { + 's3': s3_copy, + 'file': s3_download, + }, + 'file': { + 's3': s3_upload, + 'file': fs_copy, + 'sftp': sftp_upload, + }, + 'sftp': { + 'file': sftp_download, + 'sftp': sftp_copy, + }, +} + + +
[docs]def register_copy_func( + src_protocol: str, + dst_protocol: str, + copy_func: Optional[ + Callable[[str, str, Optional[Callable[[int], None]]], None]] = None, +) -> None: + ''' + Used to register copy func between protocols, and do not allow duplicate registration + + :param src_protocol: protocol name of source file, e.g. 's3' + :param dst_protocol: protocol name of destination file, e.g. 's3' + :param copy_func: copy func, its type is: Callable[[str, str, Optional[Callable[[int], None]]], None] + ''' + try: + _copy_funcs[src_protocol][dst_protocol] + except KeyError: + dst_dict = _copy_funcs.get(src_protocol, {}) + dst_dict[dst_protocol] = copy_func + _copy_funcs[src_protocol] = dst_dict + except Exception as error: + raise error + else: + raise ValueError( + 'Copy Function has already existed: {}->{}'.format( + src_protocol, dst_protocol))
+ + +def _default_copy_func( + src_path: PathLike, + dst_path: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True) -> None: + if not overwrite and smart_exists(dst_path): + return + + with smart_open(src_path, 'rb', followlinks=followlinks) as fsrc: + with smart_open(dst_path, 'wb') as fdst: + # This magic number is copied from copyfileobj + length = 16 * 1024 + while True: + buf = fsrc.read(length) + if not buf: + break + fdst.write(buf) + if callback is None: + continue + callback(len(buf)) + try: + src_stat = smart_stat(src_path) + dst_path = SmartPath(dst_path) + dst_path.utime(src_stat.st_atime, src_stat.st_mtime) + except (NotImplementedError, TypeError): + pass + + +
[docs]def smart_copy( + src_path: PathLike, + dst_path: PathLike, + callback: Optional[Callable[[int], None]] = None, + followlinks: bool = False, + overwrite: bool = True) -> None: + ''' + Copy file from source path to destination path + + Here are a few examples: :: + + >>> from tqdm import tqdm + >>> from megfile import smart_copy, smart_stat + >>> class Bar: + ... def __init__(self, total=10): + ... self._bar = tqdm(total=10) + ... + ... def __call__(self, bytes_num): + ... self._bar.update(bytes_num) + ... + >>> src_path = 'test.png' + >>> dst_path = 'test1.png' + >>> smart_copy(src_path, dst_path, callback=Bar(total=smart_stat(src_path).size), followlinks=False) + 856960it [00:00, 260592384.24it/s] + + :param src_path: Given source path + :param dst_path: Given destination path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param overwrite: whether or not overwrite file when exists, default is True + ''' + # this function contains plenty of manual polymorphism + if smart_islink(src_path) and is_s3(dst_path) and not followlinks: + return + + src_protocol, _ = SmartPath._extract_protocol(src_path) + dst_protocol, _ = SmartPath._extract_protocol(dst_path) + + try: + copy_func = _copy_funcs[src_protocol][dst_protocol] + except KeyError: + copy_func = _default_copy_func + try: + copy_func( + src_path, + dst_path, + callback=callback, + followlinks=followlinks, + overwrite=overwrite) # type: ignore + except S3UnknownError as e: + if 'cannot schedule new futures after interpreter shutdown' in str(e): + _default_copy_func( + src_path, + dst_path, + callback=callback, + followlinks=followlinks, + overwrite=overwrite) # type: ignore + else: + raise
+ + +def _smart_sync_single_file(items: dict): + src_root_path = items['src_root_path'] + dst_root_path = items['dst_root_path'] + src_file_path = items['src_file_path'] + callback = items['callback'] + followlinks = items['followlinks'] + callback_after_copy_file = items['callback_after_copy_file'] + force = items['force'] + overwrite = items['overwrite'] + + content_path = os.path.relpath(src_file_path, start=src_root_path) + if len(content_path) and content_path != '.': + content_path = content_path.lstrip('/') + dst_abs_file_path = smart_path_join(dst_root_path, content_path) + else: + # if content_path is empty, which means smart_isfile(src_path) is True, this function is equal to smart_copy + dst_abs_file_path = dst_root_path + + src_protocol, _ = SmartPath._extract_protocol(src_file_path) + dst_protocol, _ = SmartPath._extract_protocol(dst_abs_file_path) + should_sync = True + try: + if force: + pass + elif not overwrite and smart_exists(dst_abs_file_path): + should_sync = False + elif smart_exists(dst_abs_file_path) and is_same_file( + smart_stat(src_file_path, follow_symlinks=followlinks), + smart_stat(dst_abs_file_path, follow_symlinks=followlinks), + get_sync_type(src_protocol, dst_protocol)): + should_sync = False + except NotImplementedError: + pass + + if should_sync: + copy_callback = partial(callback, src_file_path) if callback else None + smart_copy( + src_file_path, + dst_abs_file_path, + callback=copy_callback, + followlinks=followlinks) + if callback_after_copy_file: + callback_after_copy_file(src_file_path, dst_abs_file_path) + return should_sync + + +
[docs]def smart_sync( + src_path: PathLike, + dst_path: PathLike, + callback: Optional[Callable[[str, int], None]] = None, + followlinks: bool = False, + callback_after_copy_file: Optional[Callable[[str, str], None]] = None, + src_file_stats: Optional[Iterable[FileEntry]] = None, + map_func: Callable[[Callable, Iterable], Any] = map, + force: bool = False, + overwrite: bool = True) -> None: + ''' + Sync file or directory + + .. note :: + + When the parameter is file, this function bahaves like ``smart_copy``. + + If file and directory of same name and same level, sync consider it's file first. + + Here are a few examples: :: + + >>> from tqdm import tqdm + >>> from threading import Lock + >>> from megfile import smart_sync, smart_stat, smart_glob + >>> class Bar: + ... def __init__(self, total_file): + ... self._total_file = total_file + ... self._bar = None + ... self._now = None + ... self._file_index = 0 + ... self._lock = Lock() + ... def __call__(self, path, num_bytes): + ... with self._lock: + ... if path != self._now: + ... self._file_index += 1 + ... print("copy file {}/{}:".format(self._file_index, self._total_file)) + ... if self._bar: + ... self._bar.close() + ... self._bar = tqdm(total=smart_stat(path).size) + ... self._now = path + ... self._bar.update(num_bytes) + >>> total_file = len(list(smart_glob('src_path'))) + >>> smart_sync('src_path', 'dst_path', callback=Bar(total_file=total_file)) + + :param src_path: Given source path + :param dst_path: Given destination path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param callback_after_copy_file: Called after copy success, and the input parameter is src file path and dst file path + :param src_file_stats: If this parameter is not None, only this parameter's files will be synced, + and src_path is the root_path of these files used to calculate the path of the target file. + This parameter is in order to reduce file traversal times. + :param map_func: A Callable func like `map`. You can use ThreadPoolExecutor.map, Pool.map and so on if you need concurrent capability. + default is standard library `map`. + :param force: Sync file forcely, do not ignore same files, priority is higher than 'overwrite', default is False + :param overwrite: whether or not overwrite file when exists, default is True + ''' + if not smart_exists(src_path): + raise FileNotFoundError(f'No match file: {src_path}') + + src_path, dst_path = get_traditional_path(src_path), get_traditional_path( + dst_path) + if not src_file_stats: + src_file_stats = smart_scan_stat(src_path, followlinks=followlinks) + + def create_generator(): + for src_file_entry in src_file_stats: + if src_file_entry.name: + src_file_path = src_file_entry.path + yield dict( + src_root_path=src_path, + dst_root_path=dst_path, + src_file_path=src_file_path, + callback=callback, + followlinks=followlinks, + callback_after_copy_file=callback_after_copy_file, + force=force, + overwrite=overwrite, + ) + + for _ in map_func(_smart_sync_single_file, create_generator()): + pass
+ + +
[docs]def smart_sync_with_progress( + src_path, + dst_path, + callback: Optional[Callable[[str, int], None]] = None, + followlinks: bool = False, + map_func: Callable[[Callable, Iterable], Iterator] = map, + force: bool = False, + overwrite: bool = True): + ''' + Sync file or directory with progress bar + + :param src_path: Given source path + :param dst_path: Given destination path + :param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call + :param followlinks: False if regard symlink as file, else True + :param callback_after_copy_file: Called after copy success, and the input parameter is src file path and dst file path + :param src_file_stats: If this parameter is not None, only this parameter's files will be synced, + and src_path is the root_path of these files used to calculate the path of the target file. + This parameter is in order to reduce file traversal times. + :param map_func: A Callable func like `map`. You can use ThreadPoolExecutor.map, Pool.map and so on if you need concurrent capability. + default is standard library `map`. + :param force: Sync file forcely, do not ignore same files, priority is higher than 'overwrite', default is False + :param overwrite: whether or not overwrite file when exists, default is True + ''' + if not smart_exists(src_path): + raise FileNotFoundError(f'No match file: {src_path}') + + src_path, dst_path = get_traditional_path(src_path), get_traditional_path( + dst_path) + file_stats = list(smart_scan_stat(src_path, followlinks=followlinks)) + tbar = tqdm(total=len(file_stats), ascii=True) + sbar = tqdm(unit='B', ascii=True, unit_scale=True, unit_divisor=1024) + + def tqdm_callback(current_src_path, length: int): + sbar.update(length) + if callback: + callback(current_src_path, length) + + def callback_after_copy_file(src_file_path, dst_file_path): + tbar.update(1) + + smart_sync( + src_path, + dst_path, + callback=tqdm_callback, + followlinks=followlinks, + callback_after_copy_file=callback_after_copy_file, + src_file_stats=file_stats, + map_func=map_func, + force=force, + overwrite=overwrite, + ) + tbar.close() + sbar.close()
+ + +
[docs]def smart_remove(path: PathLike, missing_ok: bool = False) -> None: + ''' + Remove the file or directory on s3 or fs, `s3://` and `s3://bucket` are not permitted to remove + + :param path: Given path + :param missing_ok: if False and target file/directory not exists, raise FileNotFoundError + :raises: PermissionError, FileNotFoundError + ''' + SmartPath(path).remove(missing_ok=missing_ok)
+ + +
[docs]def smart_rename( + src_path: PathLike, dst_path: PathLike, overwrite: bool = True) -> None: + ''' + Move file on s3 or fs. `s3://` or `s3://bucket` is not allowed to move + + :param src_path: Given source path + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + if smart_isdir(src_path): + raise IsADirectoryError('%r is a directory' % src_path) + src_protocol, _ = SmartPath._extract_protocol(src_path) + dst_protocol, _ = SmartPath._extract_protocol(dst_path) + if src_protocol == dst_protocol: + SmartPath(src_path).rename(dst_path, overwrite=overwrite) + return + smart_copy(src_path, dst_path, overwrite=overwrite) + smart_unlink(src_path)
+ + +
[docs]def smart_move( + src_path: PathLike, dst_path: PathLike, overwrite: bool = True) -> None: + ''' + Move file/directory on s3 or fs. `s3://` or `s3://bucket` is not allowed to move + + :param src_path: Given source path + :param dst_path: Given destination path + :param overwrite: whether or not overwrite file when exists + ''' + src_protocol, _ = SmartPath._extract_protocol(src_path) + dst_protocol, _ = SmartPath._extract_protocol(dst_path) + if src_protocol == dst_protocol: + SmartPath(src_path).rename(dst_path, overwrite=overwrite) + return + smart_sync(src_path, dst_path, followlinks=True, overwrite=overwrite) + smart_remove(src_path)
+ + + + + +
[docs]def smart_makedirs(path: PathLike, exist_ok: bool = False) -> None: + ''' + Create a directory if is on fs. + If on s3, it actually check if target exists, and check if bucket has WRITE access + + :param path: Given path + :param missing_ok: if False and target directory not exists, raise FileNotFoundError + :raises: PermissionError, FileExistsError + ''' + SmartPath(path).makedirs(exist_ok)
+ + +
[docs]def smart_open( + path: PathLike, + mode: str = 'r', + s3_open_func: Callable[[str, str], BinaryIO] = s3_open, + encoding: Optional[str] = None, + errors: Optional[str] = None, + **options) -> IO[AnyStr]: # pytype: disable=signature-mismatch + ''' + Open a file on the path + + .. note :: + + On fs, the difference between this function and ``io.open`` is that this function create directories automatically, instead of raising FileNotFoundError + + Currently, supported protocols are: + + 1. s3: "s3://<bucket>/<key>" + + 2. http(s): http(s) url + + 3. stdio: "stdio://-" + + 4. FS file: Besides above mentioned protocols, other path are considered fs path + + Here are a few examples: :: + + >>> import cv2 + >>> import numpy as np + >>> raw = smart_open('https://ss2.bdstatic.com/70cFvnSh_Q1YnxGkpoWK1HF6hhy/it/u=2275743969,3715493841&fm=26&gp=0.jpg').read() + >>> img = cv2.imdecode(np.frombuffer(raw, np.uint8), cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR) + + :param path: Given path + :param mode: Mode to open file, supports r'[rwa][tb]?\+?' + :param s3_open_func: Function used to open s3_url. Require the function includes 2 necessary parameters, file path and mode + :param encoding: encoding is the name of the encoding used to decode or encode the file. This should only be used in text mode. + :param errors: errors is an optional string that specifies how encoding and decoding errors are to be handled—this cannot be used in binary mode. + :returns: File-Like object + :raises: FileNotFoundError, IsADirectoryError, ValueError + ''' + options = { + 's3_open_func': s3_open_func, + 'encoding': encoding, + 'errors': errors, + **options, + } + return SmartPath(path).open(mode, **options)
+ + +
[docs]def smart_path_join(path: PathLike, *other_paths: PathLike) -> str: + ''' + Concat 2 or more path to a complete path + + :param path: Given path + :param other_paths: Paths to be concatenated + :returns: Concatenated complete path + + .. note :: + + For URI, the difference between this function and ``os.path.join`` is that this function ignores left side slash (which indicates absolute path) in ``other_paths`` and will directly concat. + e.g. os.path.join('s3://path', 'to', '/file') => '/file', and smart_path_join('s3://path', 'to', '/file') => '/path/to/file' + But for fs path, this function behaves exactly like ``os.path.join`` + e.g. smart_path_join('/path', 'to', '/file') => '/file' + ''' + return fspath(SmartPath(path).joinpath(*other_paths))
+ + +
[docs]def smart_walk(path: PathLike, followlinks: bool = False + ) -> Iterator[Tuple[str, List[str], List[str]]]: + ''' + Generate the file names in a directory tree by walking the tree top-down. + For each directory in the tree rooted at directory path (including path itself), + it yields a 3-tuple (root, dirs, files). + + root: a string of current path + dirs: name list of subdirectories (excluding '.' and '..' if they exist) in 'root'. The list is sorted by ascending alphabetical order + files: name list of non-directory files (link is regarded as file) in 'root'. The list is sorted by ascending alphabetical order + + If path not exists, return an empty generator + If path is a file, return an empty generator + If try to apply walk() on unsupported path, raise UnsupportedError + + :param path: Given path + :raises: UnsupportedError + :returns: A 3-tuple generator + ''' + return SmartPath(path).walk(followlinks=followlinks)
+ + +
[docs]def smart_scan( + path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[str]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a path string. + + If path is a file path, yields the file only + If path is a non-existent path, return an empty generator + If path is a bucket path, return all file paths in the bucket + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + return SmartPath(path).scan(missing_ok=missing_ok, followlinks=followlinks)
+ + +
[docs]def smart_scan_stat( + path: PathLike, missing_ok: bool = True, + followlinks: bool = False) -> Iterator[FileEntry]: + ''' + Iteratively traverse only files in given directory, in alphabetical order. + Every iteration on generator yields a tuple of path string and file stat + + :param path: Given path + :param missing_ok: If False and there's no file in the directory, raise FileNotFoundError + :raises: UnsupportedError + :returns: A file path generator + ''' + return SmartPath(path).scan_stat( + missing_ok=missing_ok, followlinks=followlinks)
+ + +def _group_glob(globstr: str) -> List[str]: + ''' + Split pathname, and group them by protocol, return the glob list of same group. + + :param globstr: A glob string + :returns: A glob list after being grouped by protocol + ''' + glob_dict = defaultdict(list) + expanded_glob = ungloblize(globstr) + + for single_glob in expanded_glob: + protocol, _ = SmartPath._extract_protocol(single_glob) + glob_dict[protocol].append(single_glob) + + group_glob_list = [] + + for protocol, glob_list in glob_dict.items(): + group_glob_list.append(globlize(glob_list)) + return group_glob_list + + +
[docs]def smart_glob( + pathname: PathLike, recursive: bool = True, + missing_ok: bool = True) -> List[str]: + ''' + Given pathname may contain shell wildcard characters, return path list in ascending alphabetical order, in which path matches glob pattern + + :param pathname: A path pattern may contain shell wildcard characters + :param recursive: If False, this function will not glob recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + ''' + # Split pathname, group by protocol, call glob respectively + # SmartPath(pathname).glob(recursive, missing_ok) + result = [] + group_glob_list = _group_glob(pathname) + for glob_path in group_glob_list: + for path_obj in SmartPath(glob_path).glob(pattern='', + recursive=recursive, + missing_ok=missing_ok): + result.append(path_obj.path) + return result
+ + +
[docs]def smart_iglob( + pathname: PathLike, recursive: bool = True, + missing_ok: bool = True) -> Iterator[str]: + ''' + Given pathname may contain shell wildcard characters, return path iterator in ascending alphabetical order, in which path matches glob pattern + + :param pathname: A path pattern may contain shell wildcard characters + :param recursive: If False, this function will not glob recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + ''' + # Split pathname, group by protocol, call glob respectively + # SmartPath(pathname).glob(recursive, missing_ok) + group_glob_list = _group_glob(pathname) + for glob_path in group_glob_list: + for path_obj in SmartPath(glob_path).iglob(pattern='', + recursive=recursive, + missing_ok=missing_ok): + yield path_obj.path
+ + +
[docs]def smart_glob_stat( + pathname: PathLike, recursive: bool = True, + missing_ok: bool = True) -> Iterator[FileEntry]: + ''' + Given pathname may contain shell wildcard characters, return a list contains tuples of path and file stat in ascending alphabetical order, in which path matches glob pattern + + :param pathname: A path pattern may contain shell wildcard characters + :param recursive: If False, this function will not glob recursively + :param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError + ''' + # Split pathname, group by protocol, call glob respectively + # SmartPath(pathname).glob(recursive, missing_ok) + group_glob_list = _group_glob(pathname) + for glob_path in group_glob_list: + yield from SmartPath(glob_path).glob_stat( + pattern='', recursive=recursive, missing_ok=missing_ok)
+ + +
[docs]def smart_save_as(file_object: BinaryIO, path: PathLike) -> None: + '''Write the opened binary stream to specified path, but the stream won't be closed + + :param file_object: Stream to be read + :param path: Specified target path + ''' + SmartPath(path).save(file_object)
+ + +
[docs]def smart_load_from(path: PathLike) -> BinaryIO: + '''Read all content in binary on specified path and write into memory + + User should close the BinaryIO manually + + :param path: Specified path + :returns: BinaryIO + ''' + return SmartPath(path).load()
+ + +
[docs]def smart_combine_open( + path_glob: str, mode: str = 'rb', + open_func=smart_open) -> CombineReader: + '''Open a unified reader that supports multi file reading. + + :param path_glob: A path may contain shell wildcard characters + :param mode: Mode to open file, supports 'rb' + :returns: A ```CombineReader``` + ''' + file_objects = list( + open_func(path, mode) for path in sorted(smart_glob(path_glob))) + return combine(file_objects, path_glob)
+ + +
[docs]def smart_abspath(path: PathLike): + '''Return the absolute path of given path + + :param path: Given path + :returns: Absolute path of given path + ''' + return SmartPath(path).abspath()
+ + +
[docs]def smart_realpath(path: PathLike): + '''Return the real path of given path + + :param path: Given path + :returns: Real path of given path + ''' + return SmartPath(path).realpath()
+ + +
[docs]def smart_relpath(path: PathLike, start=None): + '''Return the relative path of given path + + :param path: Given path + :param start: Given start directory + :returns: Relative path from start + ''' + return SmartPath(path).relpath(start)
+ + +
[docs]def smart_isabs(path: PathLike) -> bool: + '''Test whether a path is absolute + + :param path: Given path + :returns: True if a path is absolute, else False + ''' + return SmartPath(path).is_absolute()
+ + +
[docs]def smart_ismount(path: PathLike) -> bool: + '''Test whether a path is a mount point + + :param path: Given path + :returns: True if a path is a mount point, else False + ''' + return SmartPath(path).is_mount()
+ + +
[docs]def smart_load_content( + path: PathLike, start: Optional[int] = None, + stop: Optional[int] = None) -> bytes: + ''' + Get specified file from [start, stop) in bytes + + :param path: Specified path + :param start: start index + :param stop: stop index + :returns: bytes content in range [start, stop) + ''' + if is_s3(path): + return s3_load_content(path, start, stop) + + with smart_open(path, 'rb') as fd: + if start: + fd.seek(start) + offset = -1 + if start and stop: + offset = stop - start + return fd.read(offset)
+ + +
[docs]def smart_save_content(path: PathLike, content: bytes) -> None: + '''Save bytes content to specified path + + param path: Path to save content + ''' + with smart_open(path, 'wb') as fd: + fd.write(content)
+ + +
[docs]def smart_load_text(path: PathLike) -> str: + ''' + Read content from path + + param path: Path to be read + ''' + with smart_open(path) as fd: + return fd.read()
+ + +
[docs]def smart_save_text(path: PathLike, text: str) -> None: + '''Save text to specified path + + param path: Path to save text + ''' + with smart_open(path, 'w') as fd: + fd.write(text)
+ + +
[docs]class SmartCacher(FileCacher): + cache_path = None + + def __init__( + self, path: str, cache_path: Optional[str] = None, mode: str = 'r'): + if mode not in ('r', 'w', 'a'): + raise ValueError('unacceptable mode: %r' % mode) + if mode in ('r', 'a'): + if cache_path is None: + cache_path = generate_cache_path(path) + smart_copy(path, cache_path) + self.name = path + self.mode = mode + self.cache_path = cache_path + + def _close(self): + if self.cache_path is not None and \ + os.path.exists(self.cache_path): + if self.mode in ('w', 'a'): + smart_copy(self.cache_path, self.name) + os.unlink(self.cache_path)
+ + +
[docs]def smart_cache(path, cacher=SmartCacher, **options): + '''Return a path to Posixpath Interface + + param path: Path to cache + param s3_cacher: Cacher for s3 path + param options: Optional arguments for s3_cacher + ''' + if not is_fs(path): + return cacher(path, **options) + return NullCacher(path)
+ + +
[docs]def smart_touch(path: PathLike): + '''Create a new file on path + + param path: Path to create file + ''' + with smart_open(path, 'w'): + pass
+ + +
[docs]def smart_getmd5(path: PathLike, recalculate: bool = False): + '''Get md5 value of file + + param path: File path + param recalculate: calculate md5 in real-time or not return s3 etag when path is s3 + ''' + return SmartPath(path).md5(recalculate=recalculate)
+ + +_concat_funcs = { + 's3': s3_concat, + 'sftp': sftp_concat, +} + + +def _default_concat_func(src_paths: List[PathLike], dst_path: PathLike) -> None: + length = 16 * 1024 + with smart_open(dst_path, 'wb') as dst_fd: + for src_path in src_paths: + with smart_open(src_path, 'rb') as src_fd: + while True: + buf = src_fd.read(length) + if not buf: + break + dst_fd.write(buf) + + +
[docs]def smart_concat(src_paths: List[PathLike], dst_path: PathLike) -> None: + ''' + Concatenate src_paths to dst_path + + :param src_paths: List of source paths + :param dst_path: Destination path + ''' + if not src_paths: + return + + dst_protocol, _ = SmartPath._extract_protocol(dst_path) + for src_path in src_paths: + src_protocol, _ = SmartPath._extract_protocol(src_path) + if src_protocol != dst_protocol: + concat_func = _default_concat_func + break + else: + concat_func = _concat_funcs.get(dst_protocol, _default_concat_func) + concat_func(src_paths, dst_path)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/smart_path.html b/_modules/megfile/smart_path.html new file mode 100644 index 00000000..f50c0b28 --- /dev/null +++ b/_modules/megfile/smart_path.html @@ -0,0 +1,299 @@ + + + + + + megfile.smart_path — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.smart_path

+from pathlib import PurePath
+from typing import Tuple, Union
+
+from megfile.lib.compat import fspath
+from megfile.lib.url import get_url_scheme
+
+from .errors import ProtocolExistsError, ProtocolNotFoundError
+from .interfaces import BasePath, BaseURIPath, PathLike
+
+
+def _bind_function(name):
+
+    def smart_method(self, *args, **kwargs):
+        return getattr(self.pathlike, name)(*args, **kwargs)
+
+    smart_method.__name__ = name
+
+    return smart_method
+
+
+def _bind_property(name):
+
+    @property
+    def smart_property(self):
+        return getattr(self.pathlike, name)
+
+    return smart_property
+
+
+
[docs]class SmartPath(BasePath): + _registered_protocols = dict() + + def __init__(self, path: Union[PathLike, int], *other_paths: PathLike): + self.path = str(path) if not isinstance(path, int) else path + pathlike = path + if not isinstance(pathlike, BaseURIPath): + pathlike = self._create_pathlike(path) + if len(other_paths) > 0: + pathlike = pathlike.joinpath(*other_paths) + self.path = str(pathlike) + self.pathlike = pathlike + + @staticmethod + def _extract_protocol(path: Union[PathLike, int] + ) -> Tuple[str, Union[str, int]]: + if isinstance(path, int): + protocol = "file" + path_without_protocol = path + elif isinstance(path, str): + protocol = get_url_scheme(path) + if protocol == "": + protocol = "file" + path_without_protocol = path + else: + path_without_protocol = path[len(protocol) + 3:] + elif isinstance(path, (BaseURIPath, SmartPath)): + protocol = path.protocol + path_without_protocol = str(path) + elif isinstance(path, (PurePath, BasePath)): + protocol, path_without_protocol = SmartPath._extract_protocol( + fspath(path)) + else: + raise ProtocolNotFoundError('protocol not found: %r' % path) + return protocol, path_without_protocol + + @classmethod + def _create_pathlike(cls, path: Union[PathLike, int]) -> BasePath: + protocol, _ = cls._extract_protocol(path) + if protocol.startswith('s3+'): + protocol = 's3' + if protocol not in cls._registered_protocols: + raise ProtocolNotFoundError( + 'protocol %r not found: %r' % (protocol, path)) + path_class = cls._registered_protocols[protocol] + return path_class(path) + +
[docs] @classmethod + def register(cls, path_class, override_ok: bool = False): + protocol = path_class.protocol + if protocol in cls._registered_protocols and not override_ok: + raise ProtocolExistsError('protocol already exists: %r' % protocol) + cls._registered_protocols[protocol] = path_class + return path_class
+ + symlink = _bind_function('symlink') + symlink_to = _bind_function('symlink_to') + hardlink_to = _bind_function('hardlink_to') + readlink = _bind_function('readlink') + is_dir = _bind_function('is_dir') + is_file = _bind_function('is_file') + is_symlink = _bind_function('is_symlink') + access = _bind_function('access') + exists = _bind_function('exists') + listdir = _bind_function('listdir') + scandir = _bind_function('scandir') + getsize = _bind_function('getsize') + getmtime = _bind_function('getmtime') + stat = _bind_function('stat') + lstat = _bind_function('lstat') + remove = _bind_function('remove') + rename = _bind_function('rename') + replace = _bind_function('replace') + unlink = _bind_function('unlink') + mkdir = _bind_function('mkdir') + open = _bind_function('open') + touch = _bind_function('touch') + walk = _bind_function('walk') + scan = _bind_function('scan') + scan_stat = _bind_function('scan_stat') + glob = _bind_function('glob') + iglob = _bind_function('iglob') + glob_stat = _bind_function('glob_stat') + load = _bind_function('load') + save = _bind_function('save') + joinpath = _bind_function('joinpath') + abspath = _bind_function('abspath') + realpath = _bind_function('realpath') + relpath = _bind_function('relpath') + is_absolute = _bind_function('is_absolute') + is_mount = _bind_function('is_mount') + md5 = _bind_function('md5') + + @property + def protocol(self) -> str: + return self.pathlike.protocol # pytype: disable=attribute-error + +
[docs] @classmethod + def from_uri(cls, path: str): + return cls(path)
+ + as_uri = _bind_function('as_uri') + as_posix = _bind_function('as_posix') + __lt__ = _bind_function('__lt__') + __le__ = _bind_function('__le__') + __gt__ = _bind_function('__gt__') + __ge__ = _bind_function('__ge__') + __fspath__ = _bind_function('__fspath__') + __truediv__ = _bind_function('__truediv__') + + joinpath = _bind_function('joinpath') + is_reserved = _bind_function('is_reserved') + match = _bind_function('match') + relative_to = _bind_function('relative_to') + with_name = _bind_function('with_name') + with_suffix = _bind_function('with_suffix') + with_stem = _bind_function('with_stem') + is_absolute = _bind_function('is_absolute') + is_mount = _bind_function('is_mount') + abspath = _bind_function('abspath') + realpath = _bind_function('realpath') + relpath = _bind_function('relpath') + iterdir = _bind_function('iterdir') + cwd = _bind_function('cwd') + home = _bind_function('home') + expanduser = _bind_function('expanduser') + resolve = _bind_function('resolve') + chmod = _bind_function('chmod') + lchmod = _bind_function('lchmod') + group = _bind_function('group') + is_socket = _bind_function('is_socket') + is_fifo = _bind_function('is_fifo') + is_block_device = _bind_function('is_block_device') + is_char_device = _bind_function('is_char_device') + owner = _bind_function('owner') + absolute = _bind_function('absolute') + rmdir = _bind_function('rmdir') + is_relative_to = _bind_function('is_relative_to') + read_bytes = _bind_function('read_bytes') + read_text = _bind_function('read_text') + rglob = _bind_function('rglob') + samefile = _bind_function('samefile') + write_bytes = _bind_function('write_bytes') + write_text = _bind_function('write_text') + utime = _bind_function('utime') + + drive = _bind_property('drive') + root = _bind_property('root') + anchor = _bind_property('anchor') + parts = _bind_property('parts') + parents = _bind_property('parents') + parent = _bind_property('parent') + name = _bind_property('name') + suffix = _bind_property('suffix') + suffixes = _bind_property('suffixes') + stem = _bind_property('stem')
+ + +
[docs]def get_traditional_path(path: str): + return fspath(SmartPath(path).path)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/stdio.html b/_modules/megfile/stdio.html new file mode 100644 index 00000000..757f0312 --- /dev/null +++ b/_modules/megfile/stdio.html @@ -0,0 +1,138 @@ + + + + + + megfile.stdio — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.stdio

+from typing import IO, AnyStr, Optional
+
+from megfile.interfaces import PathLike
+from megfile.stdio_path import StdioPath, is_stdio
+
+__all__ = [
+    'is_stdio',
+    'stdio_open',
+]
+
+
+
[docs]def stdio_open( + path: PathLike, + mode: str = 'rb', + encoding: Optional[str] = None, + errors: Optional[str] = None, + **kwargs) -> IO[AnyStr]: # pytype: disable=signature-mismatch + '''Used to read or write stdio + + .. note :: + + Essentially invoke sys.stdin.buffer | sys.stdout.buffer to read or write + + :param path: Given path + :param mode: Only supports 'rb' and 'wb' now + :return: STDReader, STDWriter + ''' + return StdioPath(path).open(mode, encoding, errors)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/megfile/stdio_path.html b/_modules/megfile/stdio_path.html new file mode 100644 index 00000000..cfcfb262 --- /dev/null +++ b/_modules/megfile/stdio_path.html @@ -0,0 +1,207 @@ + + + + + + megfile.stdio_path — megfile documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for megfile.stdio_path

+import io
+from typing import IO, AnyStr, Optional, Union
+
+from megfile.interfaces import BaseURIPath, PathLike
+from megfile.lib.compat import fspath
+from megfile.lib.stdio_handler import STDReader, STDWriter
+from megfile.lib.url import get_url_scheme
+from megfile.smart_path import SmartPath
+from megfile.utils import get_binary_mode
+
+__all__ = [
+    "StdioPath",
+    "is_stdio",
+]
+
+
+
[docs]def is_stdio(path: PathLike) -> bool: + '''stdio scheme definition: stdio://- + + .. note :: + + Only tests protocol + + :param path: Path to be tested + :returns: True of a path is stdio url, else False + ''' + + path = fspath(path) + if not isinstance(path, str) or not path.startswith('stdio://'): + return False + + scheme = get_url_scheme(path) + return scheme == 'stdio'
+ + +
[docs]@SmartPath.register +class StdioPath(BaseURIPath): + + protocol = "stdio" + + def _open(self, mode: str = 'rb') -> Union[STDReader, STDWriter]: + '''Used to read or write stdio + + .. note :: + + Essentially invoke sys.stdin.buffer | sys.stdout.buffer to read or write + + :param path: stdio path, stdio://- or stdio://0 stdio://1 stdio://2 + :param mode: Only supports 'rb' and 'wb' now + :return: STDReader, STDWriter + ''' + + if mode not in ('rb', 'wb', 'rt', 'wt', 'r', 'w'): + raise ValueError('unacceptable mode: %r' % mode) + + mode = get_binary_mode(mode) + + if self.path_with_protocol not in ('stdio://-', 'stdio://0', + 'stdio://1', 'stdio://2'): + raise ValueError('unacceptable path: %r' % self.path_with_protocol) + + if self.path_with_protocol in ('stdio://1', + 'stdio://2') and 'r' in mode: + raise ValueError( + 'cannot open for reading: %r' % self.path_with_protocol) + + if self.path_with_protocol == 'stdio://0' and 'w' in mode: + raise ValueError( + 'cannot open for writing: %r' % self.path_with_protocol) + + if 'r' in mode: + return STDReader(mode) + return STDWriter(self.path_with_protocol, mode) + +
[docs] def open( + self, + mode: str = 'rb', + encoding: Optional[str] = None, + errors: Optional[str] = None, + **kwargs) -> IO[AnyStr]: # pytype: disable=signature-mismatch + '''Used to read or write stdio + + .. note :: + + Essentially invoke sys.stdin.buffer | sys.stdout.buffer to read or write + + :param mode: Only supports 'rb' and 'wb' now + :return: STDReader, STDWriter + ''' + fileobj = self._open(mode) + + if 'b' not in mode: + fileobj = io.TextIOWrapper( + fileobj, encoding=encoding, errors=errors) # type: ignore + fileobj.mode = mode + + return fileobj # type: ignore
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_sources/advanced.rst.txt b/_sources/advanced.rst.txt index c444b414..0a72150d 100644 --- a/_sources/advanced.rst.txt +++ b/_sources/advanced.rst.txt @@ -1,8 +1,11 @@ Advanced User Guide =================== -.. toctree:: - :maxdepth: 2 +Define Custom Protocol +---------------------- +.. mdinclude:: ./advanced/custom_protocol.md + +Glob Pattern +---------------------- +.. mdinclude:: ./advanced/glob.md - advanced/custom_protocol - advanced/glob diff --git a/_sources/advanced/custom_protocol.md.txt b/_sources/advanced/custom_protocol.md.txt index 0dca9b04..1c9ef2d8 100644 --- a/_sources/advanced/custom_protocol.md.txt +++ b/_sources/advanced/custom_protocol.md.txt @@ -1,6 +1,3 @@ -Define Custom Protocol -====================== - Megfile support custom protocols. You can define your own protocol class like this: ``` diff --git a/_sources/advanced/glob.md.txt b/_sources/advanced/glob.md.txt index 7b181151..b8a90eb0 100644 --- a/_sources/advanced/glob.md.txt +++ b/_sources/advanced/glob.md.txt @@ -1,6 +1,3 @@ -Glob Pattern -============= - The glob module finds all the pathnames matching a specified pattern according to the rules. ### Patterns are Unix shell style: diff --git a/_sources/configuration.rst.txt b/_sources/configuration.rst.txt index 050b4ab9..1cdc0874 100644 --- a/_sources/configuration.rst.txt +++ b/_sources/configuration.rst.txt @@ -1,11 +1,22 @@ Configuration ============= -.. toctree:: - :maxdepth: 2 - - configuration/common - configuration/s3 - configuration/hdfs - configuration/sftp - configuration/http +Common Configuration +---------------- +.. mdinclude:: ./configuration/common.md + +S3 Configuration +---------------- +.. mdinclude:: ./configuration/s3.md + +Hdfs Configuration +------------------ +.. mdinclude:: ./configuration/hdfs.md + +Sftp Configuration +------------------ +.. mdinclude:: ./configuration/sftp.md + +HTTP Configuration +------------------ +.. mdinclude:: ./configuration/http.md diff --git a/_sources/configuration/common.md.txt b/_sources/configuration/common.md.txt index 054e02c0..b78dd3b3 100644 --- a/_sources/configuration/common.md.txt +++ b/_sources/configuration/common.md.txt @@ -1,16 +1,8 @@ -Common Configuration -==================== - ### Environment configurations -- `MEGFILE_BLOCK_SIZE`: default block size of read and write operate, unit is bytes, default is `8MB` -- `MEGFILE_MIN_BLOCK_SIZE`: - - min write block size, unit is bytes, default is equal to `MEGFILE_BLOCK_SIZE` - - If you need write big size file, you should set `MEGFILE_MIN_BLOCK_SIZE` to a big value. -- `MEGFILE_MAX_BLOCK_SIZE`: max write block size, unit is bytes, default is `128MB` -- `MEGFILE_MAX_BUFFER_SIZE`: max read buffer size, unit is bytes, default is `128MB` +- `MEGFILE_BLOCK_SIZE`: block size in some `open` func, like `http_open`, `s3_open`, default is `8MB` +- `MEGFILE_MAX_BLOCK_SIZE`: max block size in some `open` func, like `http_open`, `s3_open`, default is `block size * 16` +- `MEGFILE_MAX_BUFFER_SIZE`: max buffer size in some `open` func, like `http_open`, `s3_open`, default is `block size * 16` - `MEGFILE_MAX_WORKERS`: max threads will be used, default is `32` -- `MEGFILE_BLOCK_CAPACITY`: - - default cache capacity of block, default is `16` - - if `MEGFILE_MAX_BUFFER_SIZE` and `MEGFILE_BLOCK_CAPACITY` are both set, `MEGFILE_BLOCK_CAPACITY` will be ignored +- `MEGFILE_BLOCK_CAPACITY`: default cache capacity of block and concurrency, default is `16` - `MEGFILE_S3_CLIENT_CACHE_MODE`: s3 client cache mode, `thread_local` or `process_local`, default is `thread_local`, **it's a experimental feature.** - `MEGFILE_MAX_RETRY_TIMES`: default max retry times when catch error which may fix by retry, default is `10` diff --git a/_sources/configuration/hdfs.md.txt b/_sources/configuration/hdfs.md.txt index 6baeda55..ddf59cf3 100644 --- a/_sources/configuration/hdfs.md.txt +++ b/_sources/configuration/hdfs.md.txt @@ -1,6 +1,3 @@ -Hdfs Configuration -================== - Please use command `pip install 'megfile[hdfs]'` to install hdfs requirements. You can use environments and configuration file for configuration, and priority is that environment variables take precedence over configuration file. diff --git a/_sources/configuration/http.md.txt b/_sources/configuration/http.md.txt index add7e7d7..3e33231f 100644 --- a/_sources/configuration/http.md.txt +++ b/_sources/configuration/http.md.txt @@ -1,6 +1,3 @@ -HTTP Configuration -================== - ### HTTP Environment configurations - `MEGFILE_HTTP_MAX_RETRY_TIMES`: http request max retry times when catch error which may fix by retry, default is `10` diff --git a/_sources/configuration/s3.md.txt b/_sources/configuration/s3.md.txt index c004b628..243d810a 100644 --- a/_sources/configuration/s3.md.txt +++ b/_sources/configuration/s3.md.txt @@ -1,6 +1,3 @@ -S3 Configuration -================ - Before using `megfile` to access files on s3, you need to set up authentication credentials for your s3 account. In addition to [boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html), `megfile` also supports some additional configuration items, and the following describes some common configurations. You can use environments and configuration file for configuration, and priority is that environment variables take precedence over configuration file. diff --git a/_sources/configuration/sftp.md.txt b/_sources/configuration/sftp.md.txt index a9165bde..f26e785e 100644 --- a/_sources/configuration/sftp.md.txt +++ b/_sources/configuration/sftp.md.txt @@ -1,6 +1,3 @@ -Sftp Configuration -================== - Sftp is a little different from other protocols, because you can set some configurations in path(`sftp://[username[:password]@]hostname[:port]/file_path`). **But we suggest you not to use password in path.** You can use environments setting configuration, and priority is that path settings take precedence over environments. ### Use environments diff --git a/_sources/megfile.hdfs.rst.txt b/_sources/megfile.hdfs.rst.txt index a8cb7f65..17de4c68 100644 --- a/_sources/megfile.hdfs.rst.txt +++ b/_sources/megfile.hdfs.rst.txt @@ -1,5 +1,5 @@ megfile.hdfs module -=================== +================= .. automodule:: megfile.hdfs :members: diff --git a/_sources/megfile.hdfs_path.rst.txt b/_sources/megfile.hdfs_path.rst.txt index d0fc4c20..56846ac6 100644 --- a/_sources/megfile.hdfs_path.rst.txt +++ b/_sources/megfile.hdfs_path.rst.txt @@ -1,5 +1,5 @@ megfile.hdfs_path module -======================== +====================== .. automodule:: megfile.hdfs_path :members: HdfsPath diff --git a/_sources/path_format.md.txt b/_sources/path_format.md.txt deleted file mode 100644 index 9c06142a..00000000 --- a/_sources/path_format.md.txt +++ /dev/null @@ -1,49 +0,0 @@ -Path Format -=========== - -**In path, brackets** `[]` **means this part is optional.** - -### fs -- An integer file descriptor of the file, e.g. `0` -- Absolute or relative file path, e.g. `/root`, `root` -- File path with protocol, e.g. `file://root` - -### s3 -- `s3[+profile_name]://bucket/key` - -### http -- Http uri, e.g. `https://megvii-research.github.io/megfile/` - -##### set cookies, headers and other parameters -You can set `cookies`, `headers` and other parameters of `requests.request` in `HttpPath`'s property `request_kwargs`, like: - -``` -from megfile import HttpPath, smart_copy - -url = HttpPath('https://megvii-research.github.io/megfile/') -url.request_kwargs = { - 'cookies': {'key': 'value'}, - 'headers': {'key': 'value'}, -} -smart_copy(url, 'index.html') -``` - -### stdio -- `stdio://-` -- `stdio://0` -- `stdio://1` -- `stdio://2` - -### sftp - -Relative path will be assumed relative to the directory that is setted by sftp server. - -- Absolute path: `sftp://[username[:password]@]hostname[:port]//file_path` -- Relative path: `sftp://[username[:password]@]hostname[:port]/file_path` - -### hdfs - -If root is relative or unset, the relative path will be assumed relative to the user’s home directory. - -- Absolute path: `hdfs[+profile_name]:///path/to/file` -- Relative path: `hdfs[+profile_name]://path/to/file` diff --git a/_sources/path_format.rst.txt b/_sources/path_format.rst.txt new file mode 100644 index 00000000..5b37aac0 --- /dev/null +++ b/_sources/path_format.rst.txt @@ -0,0 +1,4 @@ +Path Format +=========== + +.. mdinclude:: ./path_format.md diff --git a/_static/_sphinx_javascript_frameworks_compat.js b/_static/_sphinx_javascript_frameworks_compat.js index 81415803..8549469d 100644 --- a/_static/_sphinx_javascript_frameworks_compat.js +++ b/_static/_sphinx_javascript_frameworks_compat.js @@ -1,9 +1,20 @@ -/* Compatability shim for jQuery and underscores.js. +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning * - * Copyright Sphinx contributors - * Released under the two clause BSD licence */ +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + /** * small helper function to urldecode strings * diff --git a/_static/basic.css b/_static/basic.css index f316efcb..08896771 100644 --- a/_static/basic.css +++ b/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -237,10 +237,6 @@ a.headerlink { visibility: hidden; } -a:visited { - color: #551A8B; -} - h1:hover > a.headerlink, h2:hover > a.headerlink, h3:hover > a.headerlink, @@ -328,17 +324,17 @@ aside.sidebar { p.sidebar-title { font-weight: bold; } - nav.contents, aside.topic, + div.admonition, div.topic, blockquote { clear: left; } /* -- topics ---------------------------------------------------------------- */ - nav.contents, aside.topic, + div.topic { border: 1px solid #ccc; padding: 7px; @@ -379,6 +375,7 @@ div.sidebar > :last-child, aside.sidebar > :last-child, nav.contents > :last-child, aside.topic > :last-child, + div.topic > :last-child, div.admonition > :last-child { margin-bottom: 0; @@ -388,6 +385,7 @@ div.sidebar::after, aside.sidebar::after, nav.contents::after, aside.topic::after, + div.topic::after, div.admonition::after, blockquote::after { @@ -613,6 +611,25 @@ ul.simple p { margin-bottom: 0; } +/* Docutils 0.17 and older (footnotes & citations) */ +dl.footnote > dt, +dl.citation > dt { + float: left; + margin-right: 0.5em; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +/* Docutils 0.18+ (footnotes & citations) */ aside.footnote > span, div.citation > span { float: left; @@ -637,6 +654,8 @@ div.citation > p:last-of-type:after { clear: both; } +/* Footnotes & citations ends */ + dl.field-list { display: grid; grid-template-columns: fit-content(30%) auto; @@ -649,6 +668,10 @@ dl.field-list > dt { padding-right: 5px; } +dl.field-list > dt:after { + content: ":"; +} + dl.field-list > dd { padding-left: 0.5em; margin-top: 0em; @@ -674,16 +697,6 @@ dd { margin-left: 30px; } -.sig dd { - margin-top: 0px; - margin-bottom: 0px; -} - -.sig dl { - margin-top: 0px; - margin-bottom: 0px; -} - dl > dd:last-child, dl > dd:last-child > :last-child { margin-bottom: 0; @@ -752,14 +765,6 @@ abbr, acronym { cursor: help; } -.translated { - background-color: rgba(207, 255, 207, 0.2) -} - -.untranslated { - background-color: rgba(255, 207, 207, 0.2) -} - /* -- code displays --------------------------------------------------------- */ pre { diff --git a/_static/doctools.js b/_static/doctools.js index 4d67807d..c3db08d1 100644 --- a/_static/doctools.js +++ b/_static/doctools.js @@ -4,19 +4,12 @@ * * Base JavaScript utilities for all Sphinx HTML documentation. * - * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ "use strict"; -const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ - "TEXTAREA", - "INPUT", - "SELECT", - "BUTTON", -]); - const _ready = (callback) => { if (document.readyState !== "loading") { callback(); @@ -25,11 +18,73 @@ const _ready = (callback) => { } }; +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + parent.insertBefore( + span, + parent.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + /** * Small JavaScript module for the documentation. */ const Documentation = { init: () => { + Documentation.highlightSearchWords(); Documentation.initDomainIndexTable(); Documentation.initOnKeyListeners(); }, @@ -71,6 +126,51 @@ const Documentation = { Documentation.LOCALE = catalog.locale; }, + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords: () => { + const highlight = + new URLSearchParams(window.location.search).get("highlight") || ""; + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + const url = new URL(window.location); + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + }, + /** * helper function to focus on search bar */ @@ -110,11 +210,15 @@ const Documentation = { ) return; + const blacklistedElements = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", + ]); document.addEventListener("keydown", (event) => { - // bail for input elements - if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; - // bail with special keys - if (event.altKey || event.ctrlKey || event.metaKey) return; + if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements + if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys if (!event.shiftKey) { switch (event.key) { @@ -136,6 +240,10 @@ const Documentation = { event.preventDefault(); } break; + case "Escape": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.hideSearchWords(); + event.preventDefault(); } } diff --git a/_static/documentation_options.js b/_static/documentation_options.js index 7e4c114f..a750e4d5 100644 --- a/_static/documentation_options.js +++ b/_static/documentation_options.js @@ -1,4 +1,5 @@ -const DOCUMENTATION_OPTIONS = { +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), VERSION: '', LANGUAGE: 'en', COLLAPSE_INDEX: false, @@ -9,5 +10,5 @@ const DOCUMENTATION_OPTIONS = { SOURCELINK_SUFFIX: '.txt', NAVIGATION_WITH_KEYS: false, SHOW_SEARCH_SUMMARY: true, - ENABLE_SEARCH_SHORTCUTS: true, + ENABLE_SEARCH_SHORTCUTS: false, }; \ No newline at end of file diff --git a/_static/jquery-3.6.0.js b/_static/jquery-3.6.0.js new file mode 100644 index 00000000..fc6c299b --- /dev/null +++ b/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " - - - - + + + + + - + @@ -52,8 +52,11 @@
  • API Reference
  • Command Line Interface
  • Advanced User Guide
  • CHANGELOG
  • @@ -85,16 +88,80 @@
    -

    Advanced User Guide

    -
    +

    Advanced User Guide

    +
    +

    Define Custom Protocol

    +

    Megfile support custom protocols. You can define your own protocol class like this:

    +
    # custom.py
    +import io
    +from typing import IO, AnyStr
    +
    +from megfile.interfaces import URIPath
    +from megfile.smart_path import SmartPath
    +
    +@SmartPath.register
    +class CustomPath(URIPath):
    +
    +    protocol = "custom"
    +
    +    def open(self, mode: str = 'rb', **kwargs) -> IO[AnyStr]:
    +        return io.BytesIO(b'test')
    +
    +    ...
    +
    +
    +
      +
    • protocol = "custom" is the name of your custom protocol. Then your path will be like custom://path/to/file.

    • +
    • Implement methods

        -
      • Define Custom Protocol
      • -
      • Glob Pattern
          -
        • Patterns are Unix shell style:
        • +
        • URIPath provide some properties and methods like path_with_protocol, path_without_protocol, parts, parents and you can use them. You can read more about them in megfile.pathlike.URIPath.

        • +
        • smart methods will call your CustomPath‘s methods automatically, if you have implemented the corresponding method. For example: if you implement CustomPath.open, smart_open will call it when path is custom://path/to/file. You can find the corresponding class methods required for smart methods in megfile.smart_path.SmartPath.

      • +
      • You must import your custom python file before you use smart methods. You must make the decorator @SmartPath.register effective. Like this:

      +
      from custom import CustomPath
      +from megfile import smart_open
      +
      +with smart_open("custom://path/to/file", "rb") as f:
      +    assert f.read() == b'test'
      +
      +
    +
    +

    Glob Pattern

    +

    The glob module finds all the pathnames matching a specified pattern according to the rules.

    +
    +

    Patterns are Unix shell style:

    + + + + + + + + + + + + + + + + + + + + + + + + + + +

    pattern

    meaning

    *

    matches any characters but ‘/’

    **

    matches everything

    ?

    matches any single character

    [seq]

    matches any character in seq

    [!seq]

    matches any char not in seq

    {seq1,seq2}

    matches seq1 or seq2

    +
    +
    @@ -102,7 +169,7 @@

    Advanced User Guide - +


    diff --git a/advanced/custom_protocol.html b/advanced/custom_protocol.html index 065896eb..75f403f4 100644 --- a/advanced/custom_protocol.html +++ b/advanced/custom_protocol.html @@ -1,28 +1,26 @@ - + - + - Define Custom Protocol — megfile documentation - - + <no title> — megfile documentation + + - - - - - + + + + + - - - + @@ -45,17 +43,13 @@